Merge "AudioResampler: Add configurable resampler design"
diff --git a/Android.bp b/Android.bp
index a3679b1..e4f12c8 100644
--- a/Android.bp
+++ b/Android.bp
@@ -2,5 +2,6 @@
"camera",
"drm/*",
"media/*",
+ "services/*",
"soundtrigger",
]
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index af977b8..907802c 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -59,7 +59,8 @@
mWrapper(wrapper),
mInError(false),
mError(ACAMERA_OK),
- mIdle(true) {
+ mIdle(true),
+ mCurrentSession(nullptr) {
mClosing = false;
// Setup looper thread to perfrom device callbacks to app
mCbLooper = new ALooper;
@@ -98,18 +99,30 @@
// Device close implementaiton
CameraDevice::~CameraDevice() {
- Mutex::Autolock _l(mDeviceLock);
- if (!isClosed()) {
- disconnectLocked();
- }
- if (mCbLooper != nullptr) {
- mCbLooper->unregisterHandler(mHandler->id());
- mCbLooper->stop();
+ sp<ACameraCaptureSession> session = mCurrentSession.promote();
+ {
+ Mutex::Autolock _l(mDeviceLock);
+ if (!isClosed()) {
+ disconnectLocked(session);
+ }
+ mCurrentSession = nullptr;
+ if (mCbLooper != nullptr) {
+ mCbLooper->unregisterHandler(mHandler->id());
+ mCbLooper->stop();
+ }
}
mCbLooper.clear();
mHandler.clear();
}
+void
+CameraDevice::postSessionMsgAndCleanup(sp<AMessage>& msg) {
+ msg->post();
+ msg.clear();
+ sp<AMessage> cleanupMsg = new AMessage(kWhatCleanUpSessions, mHandler);
+ cleanupMsg->post();
+}
+
// TODO: cached created request?
camera_status_t
CameraDevice::createCaptureRequest(
@@ -146,14 +159,15 @@
const ACaptureSessionOutputContainer* outputs,
const ACameraCaptureSession_stateCallbacks* callbacks,
/*out*/ACameraCaptureSession** session) {
+ sp<ACameraCaptureSession> currentSession = mCurrentSession.promote();
Mutex::Autolock _l(mDeviceLock);
camera_status_t ret = checkCameraClosedOrErrorLocked();
if (ret != ACAMERA_OK) {
return ret;
}
- if (mCurrentSession != nullptr) {
- mCurrentSession->closeByDevice();
+ if (currentSession != nullptr) {
+ currentSession->closeByDevice();
stopRepeatingLocked();
}
@@ -264,7 +278,7 @@
msg->setPointer(kContextKey, session->mUserSessionCallback.context);
msg->setObject(kSessionSpKey, session);
msg->setPointer(kCallbackFpKey, (void*) session->mUserSessionCallback.onActive);
- msg->post();
+ postSessionMsgAndCleanup(msg);
}
mIdle = false;
mBusySession = session;
@@ -328,7 +342,7 @@
return;
}
- if (session != mCurrentSession) {
+ if (mCurrentSession != session) {
// Session has been replaced by other seesion or device is closed
return;
}
@@ -349,7 +363,7 @@
}
void
-CameraDevice::disconnectLocked() {
+CameraDevice::disconnectLocked(sp<ACameraCaptureSession>& session) {
if (mClosing.exchange(true)) {
// Already closing, just return
ALOGW("Camera device %s is already closing.", getId());
@@ -361,9 +375,8 @@
}
mRemote = nullptr;
- if (mCurrentSession != nullptr) {
- mCurrentSession->closeByDevice();
- mCurrentSession = nullptr;
+ if (session != nullptr) {
+ session->closeByDevice();
}
}
@@ -404,7 +417,7 @@
// This should never happen because creating a new session will close
// previous one and thus reject any API call from previous session.
// But still good to check here in case something unexpected happen.
- if (session != mCurrentSession) {
+ if (mCurrentSession != session) {
ALOGE("Camera %s session %p is not current active session!", getId(), session);
return ACAMERA_ERROR_INVALID_OPERATION;
}
@@ -415,12 +428,13 @@
}
mFlushing = true;
+
// Send onActive callback to guarantee there is always active->ready transition
sp<AMessage> msg = new AMessage(kWhatSessionStateCb, mHandler);
msg->setPointer(kContextKey, session->mUserSessionCallback.context);
msg->setObject(kSessionSpKey, session);
msg->setPointer(kCallbackFpKey, (void*) session->mUserSessionCallback.onActive);
- msg->post();
+ postSessionMsgAndCleanup(msg);
// If device is already idling, send callback and exit early
if (mIdle) {
@@ -428,7 +442,7 @@
msg->setPointer(kContextKey, session->mUserSessionCallback.context);
msg->setObject(kSessionSpKey, session);
msg->setPointer(kCallbackFpKey, (void*) session->mUserSessionCallback.onReady);
- msg->post();
+ postSessionMsgAndCleanup(msg);
mFlushing = false;
return ACAMERA_OK;
}
@@ -568,7 +582,7 @@
msg->setObject(kSessionSpKey, mBusySession);
msg->setPointer(kCallbackFpKey, (void*) mBusySession->mUserSessionCallback.onReady);
mBusySession.clear();
- msg->post();
+ postSessionMsgAndCleanup(msg);
}
mIdle = true;
@@ -728,7 +742,7 @@
msg->setObject(kCaptureRequestKey, request);
msg->setPointer(kAnwKey, (void*) anw);
msg->setInt64(kFrameNumberKey, frameNumber);
- msg->post();
+ postSessionMsgAndCleanup(msg);
} else { // Handle other capture failures
// Fire capture failure callback if there is one registered
ACameraCaptureSession_captureCallback_failed onError = cbh.mCallbacks.onCaptureFailed;
@@ -746,7 +760,7 @@
msg->setPointer(kCallbackFpKey, (void*) onError);
msg->setObject(kCaptureRequestKey, request);
msg->setObject(kCaptureFailureKey, failure);
- msg->post();
+ postSessionMsgAndCleanup(msg);
// Update tracker
mFrameNumberTracker.updateTracker(frameNumber, /*isError*/true);
@@ -769,6 +783,9 @@
case kWhatCaptureBufferLost:
ALOGV("%s: Received msg %d", __FUNCTION__, msg->what());
break;
+ case kWhatCleanUpSessions:
+ mCachedSessions.clear();
+ return;
default:
ALOGE("%s:Error: unknown device callback %d", __FUNCTION__, msg->what());
return;
@@ -842,6 +859,7 @@
return;
}
sp<ACameraCaptureSession> session(static_cast<ACameraCaptureSession*>(obj.get()));
+ mCachedSessions.push(session);
sp<CaptureRequest> requestSp = nullptr;
switch (msg->what()) {
case kWhatCaptureStart:
@@ -1053,7 +1071,7 @@
msg->setObject(kSessionSpKey, cbh.mSession);
msg->setPointer(kCallbackFpKey, (void*) cbh.mCallbacks.onCaptureSequenceAborted);
msg->setInt32(kSequenceIdKey, sequenceId);
- msg->post();
+ postSessionMsgAndCleanup(msg);
} else {
// Use mSequenceLastFrameNumberMap to track
mSequenceLastFrameNumberMap.insert(std::make_pair(sequenceId, lastFrameNumber));
@@ -1110,7 +1128,7 @@
// before cbh goes out of scope and causing we call the session
// destructor while holding device lock
cbh.mSession.clear();
- msg->post();
+ postSessionMsgAndCleanup(msg);
}
// No need to track sequence complete if there is no callback registered
@@ -1137,6 +1155,7 @@
return ret; // device has been closed
}
+ sp<ACameraCaptureSession> session = dev->mCurrentSession.promote();
Mutex::Autolock _l(dev->mDeviceLock);
if (dev->mRemote == nullptr) {
return ret; // device has been closed
@@ -1145,10 +1164,10 @@
case ERROR_CAMERA_DISCONNECTED:
{
// Camera is disconnected, close the session and expect no more callbacks
- if (dev->mCurrentSession != nullptr) {
- dev->mCurrentSession->closeByDevice();
- dev->mCurrentSession = nullptr;
+ if (session != nullptr) {
+ session->closeByDevice();
}
+ dev->mCurrentSession = nullptr;
sp<AMessage> msg = new AMessage(kWhatOnDisconnected, dev->mHandler);
msg->setPointer(kContextKey, dev->mAppCallbacks.context);
msg->setPointer(kDeviceKey, (void*) dev->getWrapper());
@@ -1216,6 +1235,7 @@
dev->setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
return ret;
}
+
sp<AMessage> msg = new AMessage(kWhatSessionStateCb, dev->mHandler);
msg->setPointer(kContextKey, dev->mBusySession->mUserSessionCallback.context);
msg->setObject(kSessionSpKey, dev->mBusySession);
@@ -1223,7 +1243,7 @@
// Make sure we clear the sp first so the session destructor can
// only happen on handler thread (where we don't hold device/session lock)
dev->mBusySession.clear();
- msg->post();
+ dev->postSessionMsgAndCleanup(msg);
}
dev->mIdle = true;
dev->mFlushing = false;
@@ -1265,7 +1285,7 @@
msg->setPointer(kCallbackFpKey, (void*) onStart);
msg->setObject(kCaptureRequestKey, request);
msg->setInt64(kTimeStampKey, timestamp);
- msg->post();
+ dev->postSessionMsgAndCleanup(msg);
}
return ret;
}
@@ -1328,7 +1348,7 @@
msg->setPointer(kCallbackFpKey, (void*) onResult);
msg->setObject(kCaptureRequestKey, request);
msg->setObject(kCaptureResultKey, result);
- msg->post();
+ dev->postSessionMsgAndCleanup(msg);
}
if (!isPartialResult) {
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index 78a7891..6ed3881 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -96,7 +96,7 @@
// device goes into fatal error state after this
void setCameraDeviceErrorLocked(camera_status_t error);
- void disconnectLocked(); // disconnect from camera service
+ void disconnectLocked(sp<ACameraCaptureSession>& session); // disconnect from camera service
camera_status_t stopRepeatingLocked();
@@ -138,6 +138,9 @@
camera_status_t configureStreamsLocked(const ACaptureSessionOutputContainer* outputs);
+ // Input message will be posted and cleared after this returns
+ void postSessionMsgAndCleanup(sp<AMessage>& msg);
+
static camera_status_t getIGBPfromAnw(
ANativeWindow* anw, sp<IGraphicBufferProducer>& out);
@@ -185,7 +188,9 @@
kWhatCaptureFail, // onCaptureFailed
kWhatCaptureSeqEnd, // onCaptureSequenceCompleted
kWhatCaptureSeqAbort, // onCaptureSequenceAborted
- kWhatCaptureBufferLost // onCaptureBufferLost
+ kWhatCaptureBufferLost,// onCaptureBufferLost
+ // Internal cleanup
+ kWhatCleanUpSessions // Cleanup cached sp<ACameraCaptureSession>
};
static const char* kContextKey;
static const char* kDeviceKey;
@@ -199,10 +204,16 @@
static const char* kSequenceIdKey;
static const char* kFrameNumberKey;
static const char* kAnwKey;
+
class CallbackHandler : public AHandler {
public:
- CallbackHandler() {}
void onMessageReceived(const sp<AMessage> &msg) override;
+
+ private:
+ // This handler will cache all capture session sp until kWhatCleanUpSessions
+ // is processed. This is used to guarantee the last session reference is always
+ // being removed in callback thread without holding camera device lock
+ Vector<sp<ACameraCaptureSession>> mCachedSessions;
};
sp<CallbackHandler> mHandler;
@@ -210,7 +221,7 @@
* Capture session related members *
***********************************/
// The current active session
- ACameraCaptureSession* mCurrentSession = nullptr;
+ wp<ACameraCaptureSession> mCurrentSession;
bool mFlushing = false;
int mNextSessionId = 0;
diff --git a/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.cpp b/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.cpp
index 01f8d65..f7106b2 100644
--- a/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.cpp
+++ b/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.cpp
@@ -36,6 +36,11 @@
uint8_t previousEncryptedCounter[kBlockSize];
memset(previousEncryptedCounter, 0, kBlockSize);
+ if (key.size() != kBlockSize || (sizeof(Iv) / sizeof(uint8_t)) != kBlockSize) {
+ android_errorWriteLog(0x534e4554, "63982768");
+ return android::ERROR_DRM_DECRYPT;
+ }
+
size_t offset = 0;
AES_KEY opensslKey;
AES_set_encrypt_key(key.array(), kBlockBitCount, &opensslKey);
diff --git a/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.h b/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.h
index b416266..edb8445 100644
--- a/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.h
+++ b/drm/mediadrm/plugins/clearkey/AesCtrDecryptor.h
@@ -18,6 +18,7 @@
#define CLEARKEY_AES_CTR_DECRYPTOR_H_
#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/MediaErrors.h>
#include <Utils.h>
#include <utils/Errors.h>
#include <utils/Vector.h>
diff --git a/drm/mediadrm/plugins/clearkey/tests/AesCtrDecryptorUnittest.cpp b/drm/mediadrm/plugins/clearkey/tests/AesCtrDecryptorUnittest.cpp
index 039e402..5db8290 100644
--- a/drm/mediadrm/plugins/clearkey/tests/AesCtrDecryptorUnittest.cpp
+++ b/drm/mediadrm/plugins/clearkey/tests/AesCtrDecryptorUnittest.cpp
@@ -34,7 +34,7 @@
uint8_t* destination, const SubSample* subSamples,
size_t numSubSamples, size_t* bytesDecryptedOut) {
Vector<uint8_t> keyVector;
- keyVector.appendArray(key, kBlockSize);
+ keyVector.appendArray(key, sizeof(key) / sizeof(uint8_t));
AesCtrDecryptor decryptor;
return decryptor.decrypt(keyVector, iv, source, destination, subSamples,
@@ -57,6 +57,67 @@
}
};
+TEST_F(AesCtrDecryptorTest, DecryptsWithEmptyKey) {
+ const size_t kTotalSize = 64;
+ const size_t kNumSubsamples = 1;
+
+ // Test vectors from NIST-800-38A
+ Iv iv = {
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+ };
+
+ uint8_t source[kTotalSize] = { 0 };
+ uint8_t destination[kTotalSize] = { 0 };
+ SubSample subSamples[kNumSubsamples] = {
+ {0, 64}
+ };
+
+ size_t bytesDecrypted = 0;
+ Vector<uint8_t> keyVector;
+ keyVector.clear();
+
+ AesCtrDecryptor decryptor;
+ ASSERT_EQ(android::ERROR_DRM_DECRYPT, decryptor.decrypt(keyVector, iv,
+ &source[0], &destination[0],
+ &subSamples[0], kNumSubsamples, &bytesDecrypted));
+ ASSERT_EQ(0u, bytesDecrypted);
+}
+
+TEST_F(AesCtrDecryptorTest, DecryptsWithKeyTooLong) {
+ const size_t kTotalSize = 64;
+ const size_t kNumSubsamples = 1;
+
+ // Test vectors from NIST-800-38A
+ uint8_t key[kBlockSize * 2] = {
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c,
+ 0x2b, 0x7e, 0x15, 0x16, 0x28, 0xae, 0xd2, 0xa6,
+ 0xab, 0xf7, 0x15, 0x88, 0x09, 0xcf, 0x4f, 0x3c
+ };
+
+ Iv iv = {
+ 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
+ 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
+ };
+
+ uint8_t source[kTotalSize] = { 0 };
+ uint8_t destination[kTotalSize] = { 0 };
+ SubSample subSamples[kNumSubsamples] = {
+ {0, 64}
+ };
+
+ size_t bytesDecrypted = 0;
+ Vector<uint8_t> keyVector;
+ keyVector.appendArray(key, sizeof(key) / sizeof(uint8_t));
+
+ AesCtrDecryptor decryptor;
+ ASSERT_EQ(android::ERROR_DRM_DECRYPT, decryptor.decrypt(keyVector, iv,
+ &source[0], &destination[0],
+ &subSamples[0], kNumSubsamples, &bytesDecrypted));
+ ASSERT_EQ(0u, bytesDecrypted);
+}
+
TEST_F(AesCtrDecryptorTest, DecryptsContiguousEncryptedBlock) {
const size_t kTotalSize = 64;
const size_t kNumSubsamples = 1;
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index 5a8e79d..e199f03 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -703,18 +703,22 @@
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
- if (options && options->getSeekTo(&seekTimeUs, &mode)
- && !mExtractor->isLiveStreaming()) {
- clearPendingFrames();
+ if (options && options->getSeekTo(&seekTimeUs, &mode)) {
+ if (mode == ReadOptions::SEEK_FRAME_INDEX) {
+ return ERROR_UNSUPPORTED;
+ }
- // The audio we want is located by using the Cues to seek the video
- // stream to find the target Cluster then iterating to finalize for
- // audio.
- int64_t actualFrameTimeUs;
- mBlockIter.seek(seekTimeUs, mIsAudio, &actualFrameTimeUs);
+ if (!mExtractor->isLiveStreaming()) {
+ clearPendingFrames();
- if (mode == ReadOptions::SEEK_CLOSEST) {
- targetSampleTimeUs = actualFrameTimeUs;
+ // The audio we want is located by using the Cues to seek the video
+ // stream to find the target Cluster then iterating to finalize for
+ // audio.
+ int64_t actualFrameTimeUs;
+ mBlockIter.seek(seekTimeUs, mIsAudio, &actualFrameTimeUs);
+ if (mode == ReadOptions::SEEK_CLOSEST) {
+ targetSampleTimeUs = actualFrameTimeUs;
+ }
}
}
diff --git a/media/extractors/mp4/ItemTable.cpp b/media/extractors/mp4/ItemTable.cpp
index ed560e1..9a6cb64 100644
--- a/media/extractors/mp4/ItemTable.cpp
+++ b/media/extractors/mp4/ItemTable.cpp
@@ -40,8 +40,9 @@
friend struct ItemReference;
friend struct ItemProperty;
- ImageItem() : ImageItem(0) {}
- ImageItem(uint32_t _type) : type(_type),
+ ImageItem() : ImageItem(0, 0, false) {}
+ ImageItem(uint32_t _type, uint32_t _id, bool _hidden) :
+ type(_type), itemId(_id), hidden(_hidden),
rows(0), columns(0), width(0), height(0), rotation(0),
offset(0), size(0), nextTileIndex(0) {}
@@ -61,6 +62,8 @@
}
uint32_t type;
+ uint32_t itemId;
+ bool hidden;
int32_t rows;
int32_t columns;
int32_t width;
@@ -496,7 +499,25 @@
ALOGW("dimgRefs if not clean!");
}
derivedImage.dimgRefs.appendVector(mRefs);
+
+ for (size_t i = 0; i < mRefs.size(); i++) {
+ itemIndex = itemIdToItemMap.indexOfKey(mRefs[i]);
+
+ // ignore non-image items
+ if (itemIndex < 0) {
+ continue;
+ }
+ ImageItem &sourceImage = itemIdToItemMap.editValueAt(itemIndex);
+
+ // mark the source image of the derivation as hidden
+ sourceImage.hidden = true;
+ }
} else if (type() == FOURCC('t', 'h', 'm', 'b')) {
+ // mark thumbnail image as hidden, these can be retrieved if the client
+ // request thumbnail explicitly, but won't be exposed as displayables.
+ ImageItem &thumbImage = itemIdToItemMap.editValueAt(itemIndex);
+ thumbImage.hidden = true;
+
for (size_t i = 0; i < mRefs.size(); i++) {
itemIndex = itemIdToItemMap.indexOfKey(mRefs[i]);
@@ -511,6 +532,10 @@
}
masterImage.thumbnails.push_back(mItemId);
}
+ } else if (type() == FOURCC('a', 'u', 'x', 'l')) {
+ // mark auxiliary image as hidden
+ ImageItem &auxImage = itemIdToItemMap.editValueAt(itemIndex);
+ auxImage.hidden = true;
} else {
ALOGW("ignoring unsupported ref type 0x%x", type());
}
@@ -942,6 +967,7 @@
struct ItemInfo {
uint32_t itemId;
uint32_t itemType;
+ bool hidden;
};
struct InfeBox : public FullBox {
@@ -1012,6 +1038,9 @@
itemInfo->itemId = item_id;
itemInfo->itemType = item_type;
+ // According to HEIF spec, (flags & 1) indicates the image is hidden
+ // and not supposed to be displayed.
+ itemInfo->hidden = (flags() & 1);
char itemTypeString[5];
MakeFourCCString(item_type, itemTypeString);
@@ -1295,7 +1324,7 @@
return ERROR_MALFORMED;
}
- ImageItem image(info.itemType);
+ ImageItem image(info.itemType, info.itemId, info.hidden);
ALOGV("adding %s: itemId %d", image.isGrid() ? "grid" : "image", info.itemId);
@@ -1327,6 +1356,29 @@
mItemReferences[i]->apply(mItemIdToItemMap);
}
+ bool foundPrimary = false;
+ for (size_t i = 0; i < mItemIdToItemMap.size(); i++) {
+ // add all non-hidden images, also add the primary even if it's marked
+ // hidden, in case the primary is set to a thumbnail
+ bool isPrimary = (mItemIdToItemMap[i].itemId == mPrimaryItemId);
+ if (!mItemIdToItemMap[i].hidden || isPrimary) {
+ mDisplayables.push_back(i);
+ }
+ foundPrimary |= isPrimary;
+ }
+
+ ALOGV("found %zu displayables", mDisplayables.size());
+
+ // fail if no displayables are found
+ if (mDisplayables.empty()) {
+ return ERROR_MALFORMED;
+ }
+
+ // if the primary item id is invalid, set primary to the first displayable
+ if (!foundPrimary) {
+ mPrimaryItemId = mItemIdToItemMap[mDisplayables[0]].itemId;
+ }
+
mImageItemsValid = true;
return OK;
}
@@ -1348,29 +1400,36 @@
ALOGV("attach property %d to item id %d)",
propertyIndex, association.itemId);
- mItemProperties[propertyIndex]->attachTo(
- mItemIdToItemMap.editValueAt(itemIndex));
+ mItemProperties[propertyIndex]->attachTo(mItemIdToItemMap.editValueAt(itemIndex));
}
-sp<MetaData> ItemTable::getImageMeta() {
+uint32_t ItemTable::countImages() const {
+ return mImageItemsValid ? mDisplayables.size() : 0;
+}
+
+sp<MetaData> ItemTable::getImageMeta(const uint32_t imageIndex) {
if (!mImageItemsValid) {
return NULL;
}
- ssize_t itemIndex = mItemIdToItemMap.indexOfKey(mPrimaryItemId);
- if (itemIndex < 0) {
- ALOGE("Primary item id %d not found!", mPrimaryItemId);
+ if (imageIndex >= mDisplayables.size()) {
+ ALOGE("%s: invalid image index %u", __FUNCTION__, imageIndex);
return NULL;
}
-
- ALOGV("primary item index %zu", itemIndex);
+ const uint32_t itemIndex = mDisplayables[imageIndex];
+ ALOGV("image[%u]: item index %u", imageIndex, itemIndex);
const ImageItem *image = &mItemIdToItemMap[itemIndex];
sp<MetaData> meta = new MetaData;
- meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_HEVC);
+ meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
- ALOGV("setting image size %dx%d", image->width, image->height);
+ if (image->itemId == mPrimaryItemId) {
+ meta->setInt32(kKeyIsPrimaryImage, 1);
+ }
+
+ ALOGV("image[%u]: size %dx%d", imageIndex, image->width, image->height);
+
meta->setInt32(kKeyWidth, image->width);
meta->setInt32(kKeyHeight, image->height);
if (image->rotation != 0) {
@@ -1394,8 +1453,8 @@
meta->setInt32(kKeyThumbnailHeight, thumbnail.height);
meta->setData(kKeyThumbnailHVCC, kTypeHVCC,
thumbnail.hvcc->data(), thumbnail.hvcc->size());
- ALOGV("thumbnail meta: %dx%d, item index %zd",
- thumbnail.width, thumbnail.height, thumbItemIndex);
+ ALOGV("image[%u]: thumbnail: size %dx%d, item index %zd",
+ imageIndex, thumbnail.width, thumbnail.height, thumbItemIndex);
} else {
ALOGW("%s: Referenced thumbnail does not exist!", __FUNCTION__);
}
@@ -1406,23 +1465,18 @@
if (tileItemIndex < 0) {
return NULL;
}
- // when there are tiles, (kKeyWidth, kKeyHeight) is the full tiled area,
- // and (kKeyDisplayWidth, kKeyDisplayHeight) may be smaller than that.
- meta->setInt32(kKeyDisplayWidth, image->width);
- meta->setInt32(kKeyDisplayHeight, image->height);
- int32_t gridRows = image->rows, gridCols = image->columns;
+ meta->setInt32(kKeyGridRows, image->rows);
+ meta->setInt32(kKeyGridCols, image->columns);
// point image to the first tile for grid size and HVCC
image = &mItemIdToItemMap.editValueAt(tileItemIndex);
- meta->setInt32(kKeyWidth, image->width * gridCols);
- meta->setInt32(kKeyHeight, image->height * gridRows);
meta->setInt32(kKeyGridWidth, image->width);
meta->setInt32(kKeyGridHeight, image->height);
meta->setInt32(kKeyMaxInputSize, image->width * image->height * 1.5);
}
if (image->hvcc == NULL) {
- ALOGE("%s: hvcc is missing for item index %zd!", __FUNCTION__, itemIndex);
+ ALOGE("%s: hvcc is missing for image[%u]!", __FUNCTION__, imageIndex);
return NULL;
}
meta->setData(kKeyHVCC, kTypeHVCC, image->hvcc->data(), image->hvcc->size());
@@ -1433,48 +1487,46 @@
return meta;
}
-uint32_t ItemTable::countImages() const {
- return mImageItemsValid ? mItemIdToItemMap.size() : 0;
-}
-
-status_t ItemTable::findPrimaryImage(uint32_t *itemIndex) {
+status_t ItemTable::findImageItem(const uint32_t imageIndex, uint32_t *itemIndex) {
if (!mImageItemsValid) {
return INVALID_OPERATION;
}
- ssize_t index = mItemIdToItemMap.indexOfKey(mPrimaryItemId);
- if (index < 0) {
- return ERROR_MALFORMED;
+ if (imageIndex >= mDisplayables.size()) {
+ ALOGE("%s: invalid image index %d", __FUNCTION__, imageIndex);
+ return BAD_VALUE;
}
- *itemIndex = index;
+ *itemIndex = mDisplayables[imageIndex];
+
+ ALOGV("image[%u]: item index %u", imageIndex, *itemIndex);
return OK;
}
-status_t ItemTable::findThumbnail(uint32_t *itemIndex) {
+status_t ItemTable::findThumbnailItem(const uint32_t imageIndex, uint32_t *itemIndex) {
if (!mImageItemsValid) {
return INVALID_OPERATION;
}
- ssize_t primaryItemIndex = mItemIdToItemMap.indexOfKey(mPrimaryItemId);
- if (primaryItemIndex < 0) {
- ALOGE("%s: Primary item id %d not found!", __FUNCTION__, mPrimaryItemId);
- return ERROR_MALFORMED;
+ if (imageIndex >= mDisplayables.size()) {
+ ALOGE("%s: invalid image index %d", __FUNCTION__, imageIndex);
+ return BAD_VALUE;
}
- const ImageItem &primaryImage = mItemIdToItemMap[primaryItemIndex];
- if (primaryImage.thumbnails.empty()) {
- ALOGW("%s: Using primary in place of thumbnail.", __FUNCTION__);
- *itemIndex = primaryItemIndex;
+ uint32_t masterItemIndex = mDisplayables[imageIndex];
+
+ const ImageItem &masterImage = mItemIdToItemMap[masterItemIndex];
+ if (masterImage.thumbnails.empty()) {
+ *itemIndex = masterItemIndex;
return OK;
}
- ssize_t thumbItemIndex = mItemIdToItemMap.indexOfKey(
- primaryImage.thumbnails[0]);
+ ssize_t thumbItemIndex = mItemIdToItemMap.indexOfKey(masterImage.thumbnails[0]);
if (thumbItemIndex < 0) {
- ALOGE("%s: Thumbnail item id %d not found!",
- __FUNCTION__, primaryImage.thumbnails[0]);
- return ERROR_MALFORMED;
+ ALOGW("%s: Thumbnail item id %d not found, use master instead",
+ __FUNCTION__, masterImage.thumbnails[0]);
+ *itemIndex = masterItemIndex;
+ return OK;
}
*itemIndex = thumbItemIndex;
diff --git a/media/extractors/mp4/ItemTable.h b/media/extractors/mp4/ItemTable.h
index 6591271..3d2e2ae 100644
--- a/media/extractors/mp4/ItemTable.h
+++ b/media/extractors/mp4/ItemTable.h
@@ -49,12 +49,12 @@
status_t parse(uint32_t type, off64_t offset, size_t size);
bool isValid() { return mImageItemsValid; }
- sp<MetaData> getImageMeta();
uint32_t countImages() const;
- status_t findPrimaryImage(uint32_t *imageIndex);
- status_t findThumbnail(uint32_t *thumbnailIndex);
+ sp<MetaData> getImageMeta(const uint32_t imageIndex);
+ status_t findImageItem(const uint32_t imageIndex, uint32_t *itemIndex);
+ status_t findThumbnailItem(const uint32_t imageIndex, uint32_t *itemIndex);
status_t getImageOffsetAndSize(
- uint32_t *imageIndex, off64_t *offset, size_t *size);
+ uint32_t *itemIndex, off64_t *offset, size_t *size);
protected:
~ItemTable();
@@ -78,6 +78,7 @@
bool mImageItemsValid;
uint32_t mCurrentItemIndex;
KeyedVector<uint32_t, ImageItem> mItemIdToItemMap;
+ Vector<uint32_t> mDisplayables;
status_t parseIlocBox(off64_t offset, size_t size);
status_t parseIinfBox(off64_t offset, size_t size);
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index ede7e84..6671956 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -138,7 +138,7 @@
uint8_t *mSrcBuffer;
- bool mIsHEIF;
+ bool mIsHeif;
sp<ItemTable> mItemTable;
size_t parseNALSize(const uint8_t *data) const;
@@ -338,7 +338,7 @@
return false;
}
-MPEG4Extractor::MPEG4Extractor(const sp<DataSource> &source)
+MPEG4Extractor::MPEG4Extractor(const sp<DataSource> &source, const char *mime)
: mMoofOffset(0),
mMoofFound(false),
mMdatFound(false),
@@ -346,12 +346,15 @@
mInitCheck(NO_INIT),
mHeaderTimescale(0),
mIsQT(false),
- mIsHEIF(false),
+ mIsHeif(false),
+ mIsHeifSequence(false),
+ mPreferHeif(mime != NULL && !strcasecmp(mime, MEDIA_MIMETYPE_CONTAINER_HEIF)),
mFirstTrack(NULL),
mLastTrack(NULL),
mFileMetaData(new MetaData),
mFirstSINF(NULL),
mIsDrm(false) {
+ ALOGV("mime=%s, mPreferHeif=%d", mime, mPreferHeif);
}
MPEG4Extractor::~MPEG4Extractor() {
@@ -560,8 +563,9 @@
status_t err;
bool sawMoovOrSidx = false;
- while (!((sawMoovOrSidx && (mMdatFound || mMoofFound)) ||
- (mIsHEIF && (mItemTable != NULL) && mItemTable->isValid()))) {
+ while (!((!mIsHeif && sawMoovOrSidx && (mMdatFound || mMoofFound)) ||
+ (mIsHeif && (mPreferHeif || !mIsHeifSequence)
+ && (mItemTable != NULL) && mItemTable->isValid()))) {
off64_t orig_offset = offset;
err = parseChunk(&offset, 0);
@@ -578,12 +582,47 @@
}
}
+ if (mIsHeif) {
+ uint32_t imageCount = mItemTable->countImages();
+ if (imageCount == 0) {
+ ALOGE("found no image in heif!");
+ } else {
+ for (uint32_t imageIndex = 0; imageIndex < imageCount; imageIndex++) {
+ sp<MetaData> meta = mItemTable->getImageMeta(imageIndex);
+ if (meta == NULL) {
+ ALOGE("heif image %u has no meta!", imageIndex);
+ continue;
+ }
+
+ ALOGV("adding HEIF image track %u", imageIndex);
+ Track *track = new Track;
+ track->next = NULL;
+ if (mLastTrack != NULL) {
+ mLastTrack->next = track;
+ } else {
+ mFirstTrack = track;
+ }
+ mLastTrack = track;
+
+ track->meta = meta;
+ track->meta->setInt32(kKeyTrackID, imageIndex);
+ track->includes_expensive_metadata = false;
+ track->skipTrack = false;
+ track->timescale = 0;
+ }
+ }
+ }
+
if (mInitCheck == OK) {
if (findTrackByMimePrefix("video/") != NULL) {
mFileMetaData->setCString(
kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4);
} else if (findTrackByMimePrefix("audio/") != NULL) {
mFileMetaData->setCString(kKeyMIMEType, "audio/mp4");
+ } else if (findTrackByMimePrefix(
+ MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC) != NULL) {
+ mFileMetaData->setCString(
+ kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_HEIF);
} else {
mFileMetaData->setCString(kKeyMIMEType, "application/octet-stream");
}
@@ -614,28 +653,6 @@
free(buf);
}
- if (mIsHEIF) {
- sp<MetaData> meta = mItemTable->getImageMeta();
- if (meta == NULL) {
- return ERROR_MALFORMED;
- }
-
- Track *track = mLastTrack;
- if (track != NULL) {
- ALOGW("track is set before metadata is fully processed");
- } else {
- track = new Track;
- track->next = NULL;
- mFirstTrack = mLastTrack = track;
- }
-
- track->meta = meta;
- track->meta->setInt32(kKeyTrackID, 0);
- track->includes_expensive_metadata = false;
- track->skipTrack = false;
- track->timescale = 0;
- }
-
return mInitCheck;
}
@@ -1037,6 +1054,7 @@
}
isTrack = true;
+ ALOGV("adding new track");
Track *track = new Track;
track->next = NULL;
if (mLastTrack) {
@@ -1084,6 +1102,7 @@
}
if (mLastTrack->skipTrack) {
+ ALOGV("skipping this track...");
Track *cur = mFirstTrack;
if (cur == mLastTrack) {
@@ -1260,6 +1279,25 @@
break;
}
+ case FOURCC('t', 'r', 'e', 'f'):
+ {
+ *offset += chunk_size;
+
+ if (mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ // Skip thumbnail track for now since we don't have an
+ // API to retrieve it yet.
+ // The thumbnail track can't be accessed by negative index or time,
+ // because each timed sample has its own corresponding thumbnail
+ // in the thumbnail track. We'll need a dedicated API to retrieve
+ // thumbnail at time instead.
+ mLastTrack->skipTrack = true;
+
+ break;
+ }
+
case FOURCC('p', 's', 's', 'h'):
{
*offset += chunk_size;
@@ -1758,6 +1796,8 @@
mLastTrack->meta->setInt32(kKeyFrameRate, frameRate);
}
}
+ ALOGV("setting frame count %zu", nSamples);
+ mLastTrack->meta->setInt32(kKeyFrameCount, nSamples);
}
}
@@ -2089,7 +2129,7 @@
case FOURCC('i', 'r', 'e', 'f'):
case FOURCC('i', 'p', 'r', 'o'):
{
- if (mIsHEIF) {
+ if (mIsHeif) {
if (mItemTable == NULL) {
mItemTable = new ItemTable(mDataSource);
}
@@ -2469,10 +2509,17 @@
if (brandSet.count(FOURCC('q', 't', ' ', ' ')) > 0) {
mIsQT = true;
- } else if (brandSet.count(FOURCC('m', 'i', 'f', '1')) > 0
- && brandSet.count(FOURCC('h', 'e', 'i', 'c')) > 0) {
- mIsHEIF = true;
- ALOGV("identified HEIF image");
+ } else {
+ if (brandSet.count(FOURCC('m', 'i', 'f', '1')) > 0
+ && brandSet.count(FOURCC('h', 'e', 'i', 'c')) > 0) {
+ mIsHeif = true;
+ ALOGV("identified HEIF image");
+ }
+ if (brandSet.count(FOURCC('m', 's', 'f', '1')) > 0
+ && brandSet.count(FOURCC('h', 'e', 'v', 'c')) > 0) {
+ mIsHeifSequence = true;
+ ALOGV("identified HEIF image sequence");
+ }
}
*offset = stop_offset;
@@ -3391,6 +3438,7 @@
return NULL;
}
+ sp<ItemTable> itemTable;
if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
uint32_t type;
const void *data;
@@ -3404,7 +3452,8 @@
if (size < 7 || ptr[0] != 1) { // configurationVersion == 1
return NULL;
}
- } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)) {
+ } else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC)
+ || !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
uint32_t type;
const void *data;
size_t size;
@@ -3417,11 +3466,14 @@
if (size < 22 || ptr[0] != 1) { // configurationVersion == 1
return NULL;
}
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
+ itemTable = mItemTable;
+ }
}
sp<MPEG4Source> source = new MPEG4Source(this,
track->meta, mDataSource, track->timescale, track->sampleTable,
- mSidxEntries, trex, mMoofOffset, mItemTable);
+ mSidxEntries, trex, mMoofOffset, itemTable);
if (source->init() != OK) {
return NULL;
}
@@ -3849,7 +3901,7 @@
mBuffer(NULL),
mWantsNALFragments(false),
mSrcBuffer(NULL),
- mIsHEIF(itemTable != NULL),
+ mIsHeif(itemTable != NULL),
mItemTable(itemTable) {
memset(&mTrackFragmentHeaderInfo, 0, sizeof(mTrackFragmentHeaderInfo));
@@ -3871,7 +3923,8 @@
CHECK(success);
mIsAVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
- mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
+ mIsHEVC = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC) ||
+ !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
if (mIsAVC) {
uint32_t type;
@@ -4625,15 +4678,19 @@
int64_t seekTimeUs;
ReadOptions::SeekMode mode;
if (options && options->getSeekTo(&seekTimeUs, &mode)) {
- if (mIsHEIF) {
+ if (mIsHeif) {
CHECK(mSampleTable == NULL);
CHECK(mItemTable != NULL);
+ int32_t imageIndex;
+ if (!mFormat->findInt32(kKeyTrackID, &imageIndex)) {
+ return ERROR_MALFORMED;
+ }
status_t err;
if (seekTimeUs >= 0) {
- err = mItemTable->findPrimaryImage(&mCurrentSampleIndex);
+ err = mItemTable->findImageItem(imageIndex, &mCurrentSampleIndex);
} else {
- err = mItemTable->findThumbnail(&mCurrentSampleIndex);
+ err = mItemTable->findThumbnailItem(imageIndex, &mCurrentSampleIndex);
}
if (err != OK) {
return err;
@@ -4651,6 +4708,9 @@
case ReadOptions::SEEK_CLOSEST:
findFlags = SampleTable::kFlagClosest;
break;
+ case ReadOptions::SEEK_FRAME_INDEX:
+ findFlags = SampleTable::kFlagFrameIndex;
+ break;
default:
CHECK(!"Should not be here.");
break;
@@ -4661,7 +4721,8 @@
seekTimeUs, 1000000, mTimescale,
&sampleIndex, findFlags);
- if (mode == ReadOptions::SEEK_CLOSEST) {
+ if (mode == ReadOptions::SEEK_CLOSEST
+ || mode == ReadOptions::SEEK_FRAME_INDEX) {
// We found the closest sample already, now we want the sync
// sample preceding it (or the sample itself of course), even
// if the subsequent sync sample is closer.
@@ -4693,7 +4754,8 @@
return err;
}
- if (mode == ReadOptions::SEEK_CLOSEST) {
+ if (mode == ReadOptions::SEEK_CLOSEST
+ || mode == ReadOptions::SEEK_FRAME_INDEX) {
targetSampleTimeUs = (sampleTime * 1000000ll) / mTimescale;
}
@@ -4729,7 +4791,7 @@
newBuffer = true;
status_t err;
- if (!mIsHEIF) {
+ if (!mIsHeif) {
err = mSampleTable->getMetaDataForSample(
mCurrentSampleIndex, &offset, &size, &cts, &isSyncSample, &stts);
} else {
@@ -5316,7 +5378,8 @@
|| !memcmp(header, "ftypisom", 8) || !memcmp(header, "ftypM4V ", 8)
|| !memcmp(header, "ftypM4A ", 8) || !memcmp(header, "ftypf4v ", 8)
|| !memcmp(header, "ftypkddi", 8) || !memcmp(header, "ftypM4VP", 8)
- || !memcmp(header, "ftypmif1", 8) || !memcmp(header, "ftypheic", 8)) {
+ || !memcmp(header, "ftypmif1", 8) || !memcmp(header, "ftypheic", 8)
+ || !memcmp(header, "ftypmsf1", 8) || !memcmp(header, "ftyphevc", 8)) {
*mimeType = MEDIA_MIMETYPE_CONTAINER_MPEG4;
*confidence = 0.4;
@@ -5347,6 +5410,8 @@
FOURCC('3', 'g', '2', 'b'),
FOURCC('m', 'i', 'f', '1'), // HEIF image
FOURCC('h', 'e', 'i', 'c'), // HEIF image
+ FOURCC('m', 's', 'f', '1'), // HEIF image sequence
+ FOURCC('h', 'e', 'v', 'c'), // HEIF image sequence
};
for (size_t i = 0;
diff --git a/media/extractors/mp4/MPEG4Extractor.h b/media/extractors/mp4/MPEG4Extractor.h
index c634796..d4f17e3 100644
--- a/media/extractors/mp4/MPEG4Extractor.h
+++ b/media/extractors/mp4/MPEG4Extractor.h
@@ -52,7 +52,7 @@
class MPEG4Extractor : public MediaExtractor {
public:
// Extractor assumes ownership of "source".
- explicit MPEG4Extractor(const sp<DataSource> &source);
+ explicit MPEG4Extractor(const sp<DataSource> &source, const char *mime = NULL);
virtual size_t countTracks();
virtual sp<MediaSource> getTrack(size_t index);
@@ -103,7 +103,9 @@
status_t mInitCheck;
uint32_t mHeaderTimescale;
bool mIsQT;
- bool mIsHEIF;
+ bool mIsHeif;
+ bool mIsHeifSequence;
+ bool mPreferHeif;
Track *mFirstTrack, *mLastTrack;
diff --git a/media/extractors/mp4/SampleTable.cpp b/media/extractors/mp4/SampleTable.cpp
index fe25e95..378d63a 100644
--- a/media/extractors/mp4/SampleTable.cpp
+++ b/media/extractors/mp4/SampleTable.cpp
@@ -724,6 +724,14 @@
return ERROR_OUT_OF_RANGE;
}
+ if (flags == kFlagFrameIndex) {
+ if (req_time >= mNumSampleSizes) {
+ return ERROR_OUT_OF_RANGE;
+ }
+ *sample_index = mSampleTimeEntries[req_time].mSampleIndex;
+ return OK;
+ }
+
uint32_t left = 0;
uint32_t right_plus_one = mNumSampleSizes;
while (left < right_plus_one) {
diff --git a/media/extractors/mp4/SampleTable.h b/media/extractors/mp4/SampleTable.h
index eb1a674..466e26b 100644
--- a/media/extractors/mp4/SampleTable.h
+++ b/media/extractors/mp4/SampleTable.h
@@ -72,7 +72,8 @@
enum {
kFlagBefore,
kFlagAfter,
- kFlagClosest
+ kFlagClosest,
+ kFlagFrameIndex,
};
status_t findSampleAtTime(
uint64_t req_time, uint64_t scale_num, uint64_t scale_den,
diff --git a/media/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
index abe2054..4f61e16 100644
--- a/media/extractors/mpeg2/MPEG2TSExtractor.cpp
+++ b/media/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -512,6 +512,8 @@
--index;
}
break;
+ default:
+ return ERROR_UNSUPPORTED;
}
if (!shouldSeekBeyond || mOffset <= mSeekSyncPoints->valueAt(index)) {
int64_t actualSeekTimeUs = mSeekSyncPoints->keyAt(index);
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
index 6e60f24..f00f7a8 100644
--- a/media/libaaudio/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -28,3 +28,10 @@
first_version: "26",
unversioned_until: "current",
}
+
+cc_library_headers {
+ name: "libaaudio_headers",
+ export_include_dirs: ["include"],
+}
+
+subdirs = ["*"]
diff --git a/media/libaaudio/Android.mk b/media/libaaudio/Android.mk
deleted file mode 100644
index 5053e7d..0000000
--- a/media/libaaudio/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/Android.bp b/media/libaaudio/examples/Android.bp
new file mode 100644
index 0000000..f2e00a7
--- /dev/null
+++ b/media/libaaudio/examples/Android.bp
@@ -0,0 +1,4 @@
+cc_library_headers {
+ name: "libaaudio_example_utils",
+ export_include_dirs: ["."],
+}
diff --git a/media/libaaudio/examples/Android.mk b/media/libaaudio/examples/Android.mk
deleted file mode 100644
index 5053e7d..0000000
--- a/media/libaaudio/examples/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/input_monitor/Android.mk b/media/libaaudio/examples/input_monitor/Android.mk
deleted file mode 100644
index 5053e7d..0000000
--- a/media/libaaudio/examples/input_monitor/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/loopback/Android.mk b/media/libaaudio/examples/loopback/Android.mk
deleted file mode 100644
index 5053e7d..0000000
--- a/media/libaaudio/examples/loopback/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/utils/AAudioArgsParser.h b/media/libaaudio/examples/utils/AAudioArgsParser.h
index ada37e2..142b295 100644
--- a/media/libaaudio/examples/utils/AAudioArgsParser.h
+++ b/media/libaaudio/examples/utils/AAudioArgsParser.h
@@ -17,6 +17,8 @@
#ifndef AAUDIO_EXAMPLE_ARGS_PARSER_H
#define AAUDIO_EXAMPLE_ARGS_PARSER_H
+#define MAX_CHANNELS 8
+
#include <cctype>
#include <unistd.h>
#include <stdio.h>
@@ -39,6 +41,10 @@
}
void setChannelCount(int32_t channelCount) {
+ if (channelCount > MAX_CHANNELS) {
+ printf("Sorry, MAX of %d channels!\n", MAX_CHANNELS);
+ channelCount = MAX_CHANNELS;
+ }
mChannelCount = channelCount;
}
diff --git a/media/libaaudio/examples/utils/AAudioSimplePlayer.h b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
index 606c4ba..1061e42 100644
--- a/media/libaaudio/examples/utils/AAudioSimplePlayer.h
+++ b/media/libaaudio/examples/utils/AAudioSimplePlayer.h
@@ -19,11 +19,10 @@
#ifndef AAUDIO_SIMPLE_PLAYER_H
#define AAUDIO_SIMPLE_PLAYER_H
-#include <unistd.h>
#include <sched.h>
+#include <unistd.h>
#include <aaudio/AAudio.h>
-#include <atomic>
#include "AAudioArgsParser.h"
#include "SineGenerator.h"
@@ -36,7 +35,7 @@
// How long to sleep in a callback to cause an intentional glitch. For testing.
#define FORCED_UNDERRUN_SLEEP_MICROS (10 * 1000)
-#define MAX_TIMESTAMPS 16
+#define MAX_TIMESTAMPS 16
typedef struct Timestamp {
int64_t position;
@@ -70,13 +69,6 @@
}
// TODO Extract a common base class for record and playback.
- /**
- * Also known as "sample rate"
- * Only call this after open() has been called.
- */
- int32_t getFramesPerSecond() const {
- return getSampleRate(); // alias
- }
/**
* Only call this after open() has been called.
@@ -172,6 +164,7 @@
result = AAudioStreamBuilder_openStream(builder, &mStream);
AAudioStreamBuilder_delete(builder);
+
return result;
}
@@ -212,13 +205,35 @@
aaudio_result_t result = AAudioStream_requestStop(mStream);
if (result != AAUDIO_OK) {
printf("ERROR - AAudioStream_requestStop() returned %d %s\n",
- result, AAudio_convertResultToText(result));
+ result, AAudio_convertResultToText(result));
}
int32_t xRunCount = AAudioStream_getXRunCount(mStream);
printf("AAudioStream_getXRunCount %d\n", xRunCount);
return result;
}
+ // Pause the stream. AAudio will stop calling your callback function.
+ aaudio_result_t pause() {
+ aaudio_result_t result = AAudioStream_requestPause(mStream);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_requestPause() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ int32_t xRunCount = AAudioStream_getXRunCount(mStream);
+ printf("AAudioStream_getXRunCount %d\n", xRunCount);
+ return result;
+ }
+
+ // Flush the stream. AAudio will stop calling your callback function.
+ aaudio_result_t flush() {
+ aaudio_result_t result = AAudioStream_requestFlush(mStream);
+ if (result != AAUDIO_OK) {
+ printf("ERROR - AAudioStream_requestFlush() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
AAudioStream *getStream() const {
return mStream;
}
@@ -232,23 +247,49 @@
typedef struct SineThreadedData_s {
- SineGenerator sineOsc1;
- SineGenerator sineOsc2;
- Timestamp timestamps[MAX_TIMESTAMPS];
- int64_t framesTotal = 0;
- int64_t nextFrameToGlitch = FORCED_UNDERRUN_PERIOD_FRAMES;
- int32_t minNumFrames = INT32_MAX;
- int32_t maxNumFrames = 0;
- int32_t timestampCount = 0; // in timestamps
+ SineGenerator sineOscillators[MAX_CHANNELS];
+ Timestamp timestamps[MAX_TIMESTAMPS];
+ int64_t framesTotal = 0;
+ int64_t nextFrameToGlitch = FORCED_UNDERRUN_PERIOD_FRAMES;
+ int32_t minNumFrames = INT32_MAX;
+ int32_t maxNumFrames = 0;
+ int32_t timestampCount = 0; // in timestamps
+ int32_t sampleRate = 48000;
+ int32_t prefixToneFrames = 0;
+ bool sweepSetup = false;
- int scheduler = 0;
- bool schedulerChecked = false;
- bool forceUnderruns = false;
+ int scheduler = 0;
+ bool schedulerChecked = false;
+ bool forceUnderruns = false;
AAudioSimplePlayer simplePlayer;
int32_t callbackCount = 0;
WakeUp waker{AAUDIO_OK};
+ /**
+ * Set sampleRate first.
+ */
+ void setupSineBlip() {
+ for (int i = 0; i < MAX_CHANNELS; ++i) {
+ double centerFrequency = 880.0 * (i + 2);
+ sineOscillators[i].setup(centerFrequency, sampleRate);
+ sineOscillators[i].setSweep(centerFrequency, centerFrequency, 0.0);
+ }
+ }
+
+ void setupSineSweeps() {
+ for (int i = 0; i < MAX_CHANNELS; ++i) {
+ double centerFrequency = 220.0 * (i + 2);
+ sineOscillators[i].setup(centerFrequency, sampleRate);
+ double minFrequency = centerFrequency * 2.0 / 3.0;
+ // Change range slightly so they will go out of phase.
+ double maxFrequency = centerFrequency * 3.0 / 2.0;
+ double sweepSeconds = 5.0 + i;
+ sineOscillators[i].setSweep(minFrequency, maxFrequency, sweepSeconds);
+ }
+ sweepSetup = true;
+ }
+
} SineThreadedData_t;
// Callback function that fills the audio output buffer.
@@ -265,9 +306,11 @@
return AAUDIO_CALLBACK_RESULT_STOP;
}
SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
- sineData->callbackCount++;
- sineData->framesTotal += numFrames;
+ // Play an initial high tone so we can tell whether the beginning was truncated.
+ if (!sineData->sweepSetup && sineData->framesTotal >= sineData->prefixToneFrames) {
+ sineData->setupSineSweeps();
+ }
if (sineData->forceUnderruns) {
if (sineData->framesTotal > sineData->nextFrameToGlitch) {
@@ -301,33 +344,32 @@
}
int32_t samplesPerFrame = AAudioStream_getChannelCount(stream);
- // This code only plays on the first one or two channels.
- // TODO Support arbitrary number of channels.
+
+
+ int numActiveOscilators = (samplesPerFrame > MAX_CHANNELS) ? MAX_CHANNELS : samplesPerFrame;
switch (AAudioStream_getFormat(stream)) {
case AAUDIO_FORMAT_PCM_I16: {
int16_t *audioBuffer = (int16_t *) audioData;
- // Render sine waves as shorts to first channel.
- sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
- // Render sine waves to second channel if there is one.
- if (samplesPerFrame > 1) {
- sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ for (int i = 0; i < numActiveOscilators; ++i) {
+ sineData->sineOscillators[i].render(&audioBuffer[i], samplesPerFrame,
+ numFrames);
}
}
- break;
+ break;
case AAUDIO_FORMAT_PCM_FLOAT: {
float *audioBuffer = (float *) audioData;
- // Render sine waves as floats to first channel.
- sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
- // Render sine waves to second channel if there is one.
- if (samplesPerFrame > 1) {
- sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ for (int i = 0; i < numActiveOscilators; ++i) {
+ sineData->sineOscillators[i].render(&audioBuffer[i], samplesPerFrame,
+ numFrames);
}
}
- break;
+ break;
default:
return AAUDIO_CALLBACK_RESULT_STOP;
}
+ sineData->callbackCount++;
+ sineData->framesTotal += numFrames;
return AAUDIO_CALLBACK_RESULT_CONTINUE;
}
diff --git a/media/libaaudio/examples/utils/SineGenerator.h b/media/libaaudio/examples/utils/SineGenerator.h
index a755582..9e6d46d 100644
--- a/media/libaaudio/examples/utils/SineGenerator.h
+++ b/media/libaaudio/examples/utils/SineGenerator.h
@@ -31,20 +31,20 @@
}
void setSweep(double frequencyLow, double frequencyHigh, double seconds) {
- mPhaseIncrementLow = frequencyLow * M_PI * 2 / mFrameRate;
- mPhaseIncrementHigh = frequencyHigh * M_PI * 2 / mFrameRate;
-
- double numFrames = seconds * mFrameRate;
- mUpScaler = pow((frequencyHigh / frequencyLow), (1.0 / numFrames));
- mDownScaler = 1.0 / mUpScaler;
- mGoingUp = true;
- mSweeping = true;
+ mSweeping = seconds > 0.0;
+ if (mSweeping) {
+ mPhaseIncrementLow = frequencyLow * M_PI * 2 / mFrameRate;
+ mPhaseIncrementHigh = frequencyHigh * M_PI * 2 / mFrameRate;
+ double numFrames = seconds * mFrameRate;
+ mUpScaler = pow((frequencyHigh / frequencyLow), (1.0 / numFrames));
+ mDownScaler = 1.0 / mUpScaler;
+ }
}
void render(int16_t *buffer, int32_t channelStride, int32_t numFrames) {
int sampleIndex = 0;
for (int i = 0; i < numFrames; i++) {
- buffer[sampleIndex] = (int16_t) (32767 * sin(mPhase) * mAmplitude);
+ buffer[sampleIndex] = (int16_t) (INT16_MAX * sin(mPhase) * mAmplitude);
sampleIndex += channelStride;
advancePhase();
}
@@ -61,6 +61,7 @@
void setAmplitude(double amplitude) {
mAmplitude = amplitude;
}
+
double getAmplitude() const {
return mAmplitude;
}
diff --git a/media/libaaudio/examples/write_sine/Android.mk b/media/libaaudio/examples/write_sine/Android.mk
deleted file mode 100644
index 5053e7d..0000000
--- a/media/libaaudio/examples/write_sine/Android.mk
+++ /dev/null
@@ -1 +0,0 @@
-include $(call all-subdir-makefiles)
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index 677fb6c..65d98d1 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -48,6 +48,7 @@
int32_t framesToPlay = 0;
int32_t framesLeft = 0;
int32_t xRunCount = 0;
+ int numActiveOscilators = 0;
float *floatData = nullptr;
int16_t *shortData = nullptr;
@@ -77,8 +78,8 @@
actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
actualDataFormat = AAudioStream_getFormat(aaudioStream);
- myData.sineOsc1.setup(440.0, actualSampleRate);
- myData.sineOsc2.setup(660.0, actualSampleRate);
+ myData.sampleRate = actualSampleRate;
+ myData.setupSineSweeps();
// Some DMA might use very short bursts of 16 frames. We don't need to write such small
// buffers. But it helps to use a multiple of the burst size for predictable scheduling.
@@ -117,19 +118,18 @@
// Play for a while.
framesToPlay = actualSampleRate * argParser.getDurationSeconds();
framesLeft = framesToPlay;
+ numActiveOscilators = (actualChannelCount > MAX_CHANNELS) ? MAX_CHANNELS : actualChannelCount;
while (framesLeft > 0) {
-
+ // Render as FLOAT or PCM
if (actualDataFormat == AAUDIO_FORMAT_PCM_FLOAT) {
- // Render sine waves to left and right channels.
- myData.sineOsc1.render(&floatData[0], actualChannelCount, framesPerWrite);
- if (actualChannelCount > 1) {
- myData.sineOsc2.render(&floatData[1], actualChannelCount, framesPerWrite);
+ for (int i = 0; i < numActiveOscilators; ++i) {
+ myData.sineOscillators[i].render(&floatData[i], actualChannelCount,
+ framesPerWrite);
}
} else if (actualDataFormat == AAUDIO_FORMAT_PCM_I16) {
- // Render sine waves to left and right channels.
- myData.sineOsc1.render(&shortData[0], actualChannelCount, framesPerWrite);
- if (actualChannelCount > 1) {
- myData.sineOsc2.render(&shortData[1], actualChannelCount, framesPerWrite);
+ for (int i = 0; i < numActiveOscilators; ++i) {
+ myData.sineOscillators[i].render(&shortData[i], actualChannelCount,
+ framesPerWrite);
}
}
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
index 4f9cde6..c2dd7af 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -28,7 +28,6 @@
#include <aaudio/AAudio.h>
#include "AAudioExampleUtils.h"
#include "AAudioSimplePlayer.h"
-#include "../../utils/AAudioSimplePlayer.h"
/**
* Open stream, play some sine waves, then close the stream.
@@ -36,37 +35,39 @@
* @param argParser
* @return AAUDIO_OK or negative error code
*/
-static aaudio_result_t testOpenPlayClose(AAudioArgsParser &argParser)
+static aaudio_result_t testOpenPlayClose(AAudioArgsParser &argParser,
+ int32_t loopCount,
+ int32_t prefixToneMsec)
{
SineThreadedData_t myData;
AAudioSimplePlayer &player = myData.simplePlayer;
aaudio_result_t result = AAUDIO_OK;
bool disconnected = false;
+ bool bailOut = false;
int64_t startedAtNanos;
printf("----------------------- run complete test --------------------------\n");
myData.schedulerChecked = false;
myData.callbackCount = 0;
+ // TODO add a command line option for the forceUnderruns
myData.forceUnderruns = false; // set true to test AAudioStream_getXRunCount()
result = player.open(argParser,
SimplePlayerDataCallbackProc, SimplePlayerErrorCallbackProc, &myData);
if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - player.open() returned %d\n", result);
+ fprintf(stderr, "ERROR - player.open() returned %s\n",
+ AAudio_convertResultToText(result));
goto error;
}
argParser.compareWithStream(player.getStream());
- // Setup sine wave generators.
- {
- int32_t actualSampleRate = player.getSampleRate();
- myData.sineOsc1.setup(440.0, actualSampleRate);
- myData.sineOsc1.setSweep(300.0, 600.0, 5.0);
- myData.sineOsc1.setAmplitude(0.2);
- myData.sineOsc2.setup(660.0, actualSampleRate);
- myData.sineOsc2.setSweep(350.0, 900.0, 7.0);
- myData.sineOsc2.setAmplitude(0.2);
+ myData.sampleRate = player.getSampleRate();
+ myData.prefixToneFrames = prefixToneMsec * myData.sampleRate / 1000;
+ if (myData.prefixToneFrames > 0) {
+ myData.setupSineBlip();
+ } else {
+ myData.setupSineSweeps();
}
#if 0
@@ -78,42 +79,93 @@
}
#endif
- result = player.start();
- if (result != AAUDIO_OK) {
- fprintf(stderr, "ERROR - player.start() returned %d\n", result);
- goto error;
- }
+ for (int loopIndex = 0; loopIndex < loopCount; loopIndex++) {
+ // Only play data on every other loop so we can hear if there is stale data.
+ double amplitude;
+ int32_t durationSeconds;
+ if ((loopIndex & 1) == 0) {
+ printf("--------------- SINE ------\n");
+ amplitude = 0.2;
+ durationSeconds = argParser.getDurationSeconds();
+ } else {
+ printf("--------------- QUIET -----\n");
+ amplitude = 0.0;
+ durationSeconds = 2; // just wait briefly when quiet
+ }
+ for (int i = 0; i < MAX_CHANNELS; ++i) {
+ myData.sineOscillators[i].setAmplitude(amplitude);
+ }
- // Play a sine wave in the background.
- printf("Sleep for %d seconds while audio plays in a callback thread.\n",
- argParser.getDurationSeconds());
- startedAtNanos = getNanoseconds(CLOCK_MONOTONIC);
- for (int second = 0; second < argParser.getDurationSeconds(); second++)
- {
- // Sleep a while. Wake up early if there is an error, for example a DISCONNECT.
- long ret = myData.waker.wait(AAUDIO_OK, NANOS_PER_SECOND);
- int64_t millis = (getNanoseconds(CLOCK_MONOTONIC) - startedAtNanos) / NANOS_PER_MILLISECOND;
- result = myData.waker.get();
- printf("wait() returns %ld, aaudio_result = %d, at %6d millis"
- ", second = %d, framesWritten = %8d, underruns = %d\n",
- ret, result, (int) millis,
- second,
- (int) AAudioStream_getFramesWritten(player.getStream()),
- (int) AAudioStream_getXRunCount(player.getStream()));
+ result = player.start();
if (result != AAUDIO_OK) {
- if (result == AAUDIO_ERROR_DISCONNECTED) {
- disconnected = true;
+ fprintf(stderr, "ERROR - player.start() returned %d\n", result);
+ goto error;
+ }
+
+ // Play a sine wave in the background.
+ printf("Sleep for %d seconds while audio plays in a callback thread. %d of %d\n",
+ argParser.getDurationSeconds(), (loopIndex + 1), loopCount);
+ startedAtNanos = getNanoseconds(CLOCK_MONOTONIC);
+ for (int second = 0; second < durationSeconds; second++) {
+ // Sleep a while. Wake up early if there is an error, for example a DISCONNECT.
+ long ret = myData.waker.wait(AAUDIO_OK, NANOS_PER_SECOND);
+ int64_t millis =
+ (getNanoseconds(CLOCK_MONOTONIC) - startedAtNanos) / NANOS_PER_MILLISECOND;
+ result = myData.waker.get();
+ printf("wait() returns %ld, aaudio_result = %d, at %6d millis"
+ ", second = %3d, framesWritten = %8d, underruns = %d\n",
+ ret, result, (int) millis,
+ second,
+ (int) AAudioStream_getFramesWritten(player.getStream()),
+ (int) AAudioStream_getXRunCount(player.getStream()));
+ if (result != AAUDIO_OK) {
+ disconnected = (result == AAUDIO_ERROR_DISCONNECTED);
+ bailOut = true;
+ break;
}
+ }
+ printf("AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+
+ // Alternate between using stop or pause for each sine/quiet pair.
+ // Repeat this pattern: {sine-stop-quiet-stop-sine-pause-quiet-pause}
+ if ((loopIndex & 2) == 0) {
+ printf("STOP, callback # = %d\n", myData.callbackCount);
+ result = player.stop();
+ } else {
+ printf("PAUSE/FLUSH, callback # = %d\n", myData.callbackCount);
+ result = player.pause();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+ result = player.flush();
+ }
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+
+ if (bailOut) {
break;
}
- }
- printf("AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
- printf("call stop() callback # = %d\n", myData.callbackCount);
- result = player.stop();
- if (result != AAUDIO_OK) {
- goto error;
+ {
+ aaudio_stream_state_t state = AAudioStream_getState(player.getStream());
+ aaudio_stream_state_t finalState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ int64_t timeoutNanos = 2000 * NANOS_PER_MILLISECOND;
+ result = AAudioStream_waitForStateChange(player.getStream(), state,
+ &finalState, timeoutNanos);
+ printf("waitForStateChange returns %s, state = %s\n",
+ AAudio_convertResultToText(result),
+ AAudio_convertStreamStateToText(finalState));
+ int64_t written = AAudioStream_getFramesWritten(player.getStream());
+ int64_t read = AAudioStream_getFramesRead(player.getStream());
+ printf(" framesWritten = %lld, framesRead = %lld, diff = %d\n",
+ (long long) written,
+ (long long) read,
+ (int) (written - read));
+ }
+
}
+
printf("call close()\n");
result = player.close();
if (result != AAUDIO_OK) {
@@ -147,23 +199,54 @@
return disconnected ? AAUDIO_ERROR_DISCONNECTED : result;
}
+static void usage() {
+ AAudioArgsParser::usage();
+ printf(" -l{count} loopCount start/stop, every other one is silent\n");
+ printf(" -t{msec} play a high pitched tone at the beginning\n");
+}
+
int main(int argc, const char **argv)
{
AAudioArgsParser argParser;
aaudio_result_t result;
+ int32_t loopCount = 1;
+ int32_t prefixToneMsec = 0;
// Make printf print immediately so that debug info is not stuck
// in a buffer if we hang or crash.
setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
- printf("%s - Play a sine sweep using an AAudio callback V0.1.2\n", argv[0]);
+ printf("%s - Play a sine sweep using an AAudio callback V0.1.3\n", argv[0]);
- if (argParser.parseArgs(argc, argv)) {
- return EXIT_FAILURE;
+ for (int i = 1; i < argc; i++) {
+ const char *arg = argv[i];
+ if (argParser.parseArg(arg)) {
+ // Handle options that are not handled by the ArgParser
+ if (arg[0] == '-') {
+ char option = arg[1];
+ switch (option) {
+ case 'l':
+ loopCount = atoi(&arg[2]);
+ break;
+ case 't':
+ prefixToneMsec = atoi(&arg[2]);
+ break;
+ default:
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ } else {
+ usage();
+ exit(EXIT_FAILURE);
+ break;
+ }
+ }
}
// Keep looping until we can complete the test without disconnecting.
- while((result = testOpenPlayClose(argParser)) == AAUDIO_ERROR_DISCONNECTED);
+ while((result = testOpenPlayClose(argParser, loopCount, prefixToneMsec))
+ == AAUDIO_ERROR_DISCONNECTED);
return (result) ? EXIT_FAILURE : EXIT_SUCCESS;
}
diff --git a/media/libaaudio/src/Android.bp b/media/libaaudio/src/Android.bp
new file mode 100644
index 0000000..788833b
--- /dev/null
+++ b/media/libaaudio/src/Android.bp
@@ -0,0 +1,66 @@
+cc_library {
+ name: "libaaudio",
+
+ local_include_dirs: [
+ "binding",
+ "client",
+ "core",
+ "fifo",
+ "legacy",
+ "utility",
+ ],
+ export_include_dirs: ["."],
+ header_libs: ["libaaudio_headers"],
+ export_header_lib_headers: ["libaaudio_headers"],
+
+ srcs: [
+ "core/AudioStream.cpp",
+ "core/AudioStreamBuilder.cpp",
+ "core/AAudioAudio.cpp",
+ "core/AAudioStreamParameters.cpp",
+ "legacy/AudioStreamLegacy.cpp",
+ "legacy/AudioStreamRecord.cpp",
+ "legacy/AudioStreamTrack.cpp",
+ "utility/AAudioUtilities.cpp",
+ "utility/FixedBlockAdapter.cpp",
+ "utility/FixedBlockReader.cpp",
+ "utility/FixedBlockWriter.cpp",
+ "utility/LinearRamp.cpp",
+ "fifo/FifoBuffer.cpp",
+ "fifo/FifoControllerBase.cpp",
+ "client/AudioEndpoint.cpp",
+ "client/AudioStreamInternal.cpp",
+ "client/AudioStreamInternalCapture.cpp",
+ "client/AudioStreamInternalPlay.cpp",
+ "client/IsochronousClockModel.cpp",
+ "binding/AudioEndpointParcelable.cpp",
+ "binding/AAudioBinderClient.cpp",
+ "binding/AAudioStreamRequest.cpp",
+ "binding/AAudioStreamConfiguration.cpp",
+ "binding/IAAudioClient.cpp",
+ "binding/IAAudioService.cpp",
+ "binding/RingBufferParcelable.cpp",
+ "binding/SharedMemoryParcelable.cpp",
+ "binding/SharedRegionParcelable.cpp",
+ ],
+
+ cflags: [
+ "-Wno-unused-parameter",
+ "-Wall",
+ "-Werror",
+
+ // By default, all symbols are hidden.
+ // "-fvisibility=hidden",
+ // AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
+ "-DAAUDIO_API=__attribute__((visibility(\"default\")))",
+ ],
+
+ shared_libs: [
+ "libaudioclient",
+ "liblog",
+ "libcutils",
+ "libutils",
+ "libbinder",
+ "libaudiomanager",
+ ],
+}
diff --git a/media/libaaudio/src/Android.mk b/media/libaaudio/src/Android.mk
deleted file mode 100644
index f7a5f9b..0000000
--- a/media/libaaudio/src/Android.mk
+++ /dev/null
@@ -1,132 +0,0 @@
-LOCAL_PATH:= $(call my-dir)
-
-# ======================= STATIC LIBRARY ==========================
-# This is being built because it make AAudio testing very easy with a complete executable.
-# TODO Remove this target later, when not needed.
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := libaaudio
-LOCAL_MODULE_TAGS := optional
-
-LIBAAUDIO_DIR := $(TOP)/frameworks/av/media/libaaudio
-LIBAAUDIO_SRC_DIR := $(LIBAAUDIO_DIR)/src
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/native/include \
- system/core/base/include \
- frameworks/native/media/libaaudio/include/include \
- frameworks/av/media/libaaudio/include \
- frameworks/native/include \
- frameworks/av/media/libaudioclient/include \
- $(LOCAL_PATH) \
- $(LOCAL_PATH)/binding \
- $(LOCAL_PATH)/client \
- $(LOCAL_PATH)/core \
- $(LOCAL_PATH)/fifo \
- $(LOCAL_PATH)/legacy \
- $(LOCAL_PATH)/utility
-
-LOCAL_AIDL_INCLUDES := frameworks/av/media/libaudioclient/aidl
-
-# If you add a file here then also add it below in the SHARED target
-LOCAL_SRC_FILES = \
- core/AudioStream.cpp \
- core/AudioStreamBuilder.cpp \
- core/AAudioAudio.cpp \
- core/AAudioStreamParameters.cpp \
- legacy/AudioStreamLegacy.cpp \
- legacy/AudioStreamRecord.cpp \
- legacy/AudioStreamTrack.cpp \
- utility/AAudioUtilities.cpp \
- utility/FixedBlockAdapter.cpp \
- utility/FixedBlockReader.cpp \
- utility/FixedBlockWriter.cpp \
- utility/LinearRamp.cpp \
- fifo/FifoBuffer.cpp \
- fifo/FifoControllerBase.cpp \
- client/AudioEndpoint.cpp \
- client/AudioStreamInternal.cpp \
- client/AudioStreamInternalCapture.cpp \
- client/AudioStreamInternalPlay.cpp \
- client/IsochronousClockModel.cpp \
- binding/AudioEndpointParcelable.cpp \
- binding/AAudioBinderClient.cpp \
- binding/AAudioStreamRequest.cpp \
- binding/AAudioStreamConfiguration.cpp \
- binding/IAAudioClient.cpp \
- binding/IAAudioService.cpp \
- binding/RingBufferParcelable.cpp \
- binding/SharedMemoryParcelable.cpp \
- binding/SharedRegionParcelable.cpp \
- ../../libaudioclient/aidl/android/media/IAudioRecord.aidl \
- ../../libaudioclient/aidl/android/media/IPlayer.aidl
-
-LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
-
-# By default, all symbols are hidden.
-# LOCAL_CFLAGS += -fvisibility=hidden
-# AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
-LOCAL_CFLAGS += -DAAUDIO_API='__attribute__((visibility("default")))'
-
-include $(BUILD_STATIC_LIBRARY)
-
-# ======================= SHARED LIBRARY ==========================
-include $(CLEAR_VARS)
-
-LOCAL_MODULE := libaaudio
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/native/include \
- system/core/base/include \
- frameworks/native/media/libaaudio/include/include \
- frameworks/av/media/libaaudio/include \
- $(LOCAL_PATH) \
- $(LOCAL_PATH)/binding \
- $(LOCAL_PATH)/client \
- $(LOCAL_PATH)/core \
- $(LOCAL_PATH)/fifo \
- $(LOCAL_PATH)/legacy \
- $(LOCAL_PATH)/utility
-
-LOCAL_SRC_FILES = core/AudioStream.cpp \
- core/AudioStreamBuilder.cpp \
- core/AAudioAudio.cpp \
- core/AAudioStreamParameters.cpp \
- legacy/AudioStreamLegacy.cpp \
- legacy/AudioStreamRecord.cpp \
- legacy/AudioStreamTrack.cpp \
- utility/AAudioUtilities.cpp \
- utility/FixedBlockAdapter.cpp \
- utility/FixedBlockReader.cpp \
- utility/FixedBlockWriter.cpp \
- utility/LinearRamp.cpp \
- fifo/FifoBuffer.cpp \
- fifo/FifoControllerBase.cpp \
- client/AudioEndpoint.cpp \
- client/AudioStreamInternal.cpp \
- client/AudioStreamInternalCapture.cpp \
- client/AudioStreamInternalPlay.cpp \
- client/IsochronousClockModel.cpp \
- binding/AudioEndpointParcelable.cpp \
- binding/AAudioBinderClient.cpp \
- binding/AAudioStreamRequest.cpp \
- binding/AAudioStreamConfiguration.cpp \
- binding/IAAudioClient.cpp \
- binding/IAAudioService.cpp \
- binding/RingBufferParcelable.cpp \
- binding/SharedMemoryParcelable.cpp \
- binding/SharedRegionParcelable.cpp
-
-LOCAL_CFLAGS += -Wno-unused-parameter -Wall -Werror
-
-# By default, all symbols are hidden.
-# LOCAL_CFLAGS += -fvisibility=hidden
-# AAUDIO_API is used to explicitly export a function or a variable as a visible symbol.
-LOCAL_CFLAGS += -DAAUDIO_API='__attribute__((visibility("default")))'
-
-LOCAL_SHARED_LIBRARIES := libaudioclient liblog libcutils libutils libbinder libaudiomanager
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libaaudio/src/utility/MonotonicCounter.h b/media/libaaudio/src/utility/MonotonicCounter.h
index 13c92a2..5833eab 100644
--- a/media/libaaudio/src/utility/MonotonicCounter.h
+++ b/media/libaaudio/src/utility/MonotonicCounter.h
@@ -89,6 +89,18 @@
mCounter32 = 0;
}
+ /**
+ * Round 64-bit counter up to a multiple of the period.
+ *
+ * @param period might be, for example, a buffer capacity
+ */
+ void roundUp64(int32_t period) {
+ if (period > 0) {
+ int64_t numPeriods = (mCounter64 + period - 1) / period;
+ mCounter64 = numPeriods * period;
+ }
+ }
+
private:
int64_t mCounter64 = 0;
int32_t mCounter32 = 0;
diff --git a/media/libaaudio/tests/Android.bp b/media/libaaudio/tests/Android.bp
new file mode 100644
index 0000000..099f416
--- /dev/null
+++ b/media/libaaudio/tests/Android.bp
@@ -0,0 +1,84 @@
+cc_test {
+ name: "test_aaudio_marshalling",
+ srcs: ["test_marshalling.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
+
+cc_test {
+ name: "test_block_adapter",
+ srcs: ["test_block_adapter.cpp"],
+ shared_libs: ["libaaudio"],
+}
+
+cc_test {
+ name: "test_timestamps",
+ srcs: ["test_timestamps.cpp"],
+ header_libs: ["libaaudio_example_utils"],
+ shared_libs: ["libaaudio"],
+}
+
+cc_test {
+ name: "test_linear_ramp",
+ srcs: ["test_linear_ramp.cpp"],
+ shared_libs: ["libaaudio"],
+}
+
+cc_test {
+ name: "test_open_params",
+ srcs: ["test_open_params.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
+
+cc_test {
+ name: "test_no_close",
+ srcs: ["test_no_close.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
+
+cc_test {
+ name: "test_aaudio_recovery",
+ srcs: ["test_recovery.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
+
+cc_test {
+ name: "test_n_streams",
+ srcs: ["test_n_streams.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
+
+cc_test {
+ name: "test_bad_disconnect",
+ srcs: ["test_bad_disconnect.cpp"],
+ shared_libs: [
+ "libaaudio",
+ "libbinder",
+ "libcutils",
+ "libutils",
+ ],
+}
diff --git a/media/libaaudio/tests/Android.mk b/media/libaaudio/tests/Android.mk
deleted file mode 100644
index 4120f7f..0000000
--- a/media/libaaudio/tests/Android.mk
+++ /dev/null
@@ -1,92 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src
-LOCAL_SRC_FILES:= test_marshalling.cpp
-LOCAL_SHARED_LIBRARIES := libaaudio libbinder libcutils libutils
-LOCAL_MODULE := test_aaudio_marshalling
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src
-LOCAL_SRC_FILES:= test_block_adapter.cpp
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := test_block_adapter
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src \
- frameworks/av/media/libaaudio/examples
-LOCAL_SRC_FILES:= test_timestamps.cpp
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := test_timestamps
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src
-LOCAL_SRC_FILES:= test_linear_ramp.cpp
-LOCAL_SHARED_LIBRARIES := libaaudio
-LOCAL_MODULE := test_linear_ramp
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src
-LOCAL_SRC_FILES:= test_open_params.cpp
-LOCAL_SHARED_LIBRARIES := libaaudio libbinder libcutils libutils
-LOCAL_MODULE := test_open_params
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src
-LOCAL_SRC_FILES:= test_no_close.cpp
-LOCAL_SHARED_LIBRARIES := libaaudio libbinder libcutils libutils
-LOCAL_MODULE := test_no_close
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src
-LOCAL_SRC_FILES:= test_recovery.cpp
-LOCAL_SHARED_LIBRARIES := libaaudio libbinder libcutils libutils
-LOCAL_MODULE := test_aaudio_recovery
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src
-LOCAL_SRC_FILES:= test_n_streams.cpp
-LOCAL_SHARED_LIBRARIES := libaaudio libbinder libcutils libutils
-LOCAL_MODULE := test_n_streams
-include $(BUILD_NATIVE_TEST)
-
-include $(CLEAR_VARS)
-LOCAL_C_INCLUDES := \
- $(call include-path-for, audio-utils) \
- frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src
-LOCAL_SRC_FILES:= test_bad_disconnect.cpp
-LOCAL_SHARED_LIBRARIES := libaaudio libbinder libcutils libutils
-LOCAL_MODULE := test_bad_disconnect
-include $(BUILD_NATIVE_TEST)
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 6206be0..a5d35f9 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -1393,14 +1393,14 @@
bool useCaseAllowed = sharedBuffer || transferAllowed;
if (!useCaseAllowed) {
- ALOGW("AUDIO_OUTPUT_FLAG_FAST denied, not shared buffer and transfer = %s",
+ ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client, not shared buffer and transfer = %s",
convertTransferToText(mTransfer));
}
// sample rates must also match
bool sampleRateAllowed = mSampleRate == mAfSampleRate;
if (!sampleRateAllowed) {
- ALOGW("AUDIO_OUTPUT_FLAG_FAST denied, rates do not match %u Hz, require %u Hz",
+ ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client, sample rate %u Hz but HAL needs %u Hz",
mSampleRate, mAfSampleRate);
}
@@ -1578,6 +1578,15 @@
// or at least triple-buffering if there is sample rate conversion
const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
maxNotificationFrames = frameCount / nBuffering;
+ // If client requested a fast track but this was denied, then use the smaller maximum.
+ // FMS_20 is the minimum task wakeup period in ms for which CFS operates reliably.
+#define FMS_20 20 // FIXME share a common declaration with the same symbol in Threads.cpp
+ if (mOrigFlags & AUDIO_OUTPUT_FLAG_FAST) {
+ size_t maxNotificationFramesFastDenied = FMS_20 * mSampleRate / 1000;
+ if (maxNotificationFrames > maxNotificationFramesFastDenied) {
+ maxNotificationFrames = maxNotificationFramesFastDenied;
+ }
+ }
}
if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
if (mNotificationFramesAct == 0) {
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
new file mode 100644
index 0000000..700de8e
--- /dev/null
+++ b/media/libaudiohal/Android.bp
@@ -0,0 +1,47 @@
+cc_library_shared {
+ name: "libaudiohal",
+
+ srcs: [
+ "DeviceHalLocal.cpp",
+ "DevicesFactoryHalHybrid.cpp",
+ "DevicesFactoryHalLocal.cpp",
+ "StreamHalLocal.cpp",
+
+ "ConversionHelperHidl.cpp",
+ "HalDeathHandlerHidl.cpp",
+ "DeviceHalHidl.cpp",
+ "DevicesFactoryHalHidl.cpp",
+ "EffectBufferHalHidl.cpp",
+ "EffectHalHidl.cpp",
+ "EffectsFactoryHalHidl.cpp",
+ "StreamHalHidl.cpp",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+ export_include_dirs: ["include"],
+
+ shared_libs: [
+ "libaudioutils",
+ "libcutils",
+ "liblog",
+ "libutils",
+ "libhardware",
+ "libbase",
+ "libfmq",
+ "libhwbinder",
+ "libhidlbase",
+ "libhidlmemory",
+ "libhidltransport",
+ "android.hardware.audio@2.0",
+ "android.hardware.audio.common@2.0",
+ "android.hardware.audio.common@2.0-util",
+ "android.hardware.audio.effect@2.0",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
+ "libmedia_helper",
+ "libmediautils",
+ ],
+}
diff --git a/media/libaudiohal/Android.mk b/media/libaudiohal/Android.mk
deleted file mode 100644
index 827908e..0000000
--- a/media/libaudiohal/Android.mk
+++ /dev/null
@@ -1,71 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SHARED_LIBRARIES := \
- libaudioutils \
- libcutils \
- liblog \
- libutils \
- libhardware
-
-LOCAL_SRC_FILES := \
- DeviceHalLocal.cpp \
- DevicesFactoryHalHybrid.cpp \
- DevicesFactoryHalLocal.cpp \
- StreamHalLocal.cpp
-
-LOCAL_CFLAGS := -Wall -Werror
-
-ifeq ($(USE_LEGACY_LOCAL_AUDIO_HAL), true)
-
-# Use audiohal directly w/o hwbinder middleware.
-# This is for performance comparison and debugging only.
-
-LOCAL_SRC_FILES += \
- EffectBufferHalLocal.cpp \
- EffectsFactoryHalLocal.cpp \
- EffectHalLocal.cpp
-
-LOCAL_SHARED_LIBRARIES += \
- libeffects
-
-LOCAL_CFLAGS += -DUSE_LEGACY_LOCAL_AUDIO_HAL
-
-else # if !USE_LEGACY_LOCAL_AUDIO_HAL
-
-LOCAL_SRC_FILES += \
- ConversionHelperHidl.cpp \
- HalDeathHandlerHidl.cpp \
- DeviceHalHidl.cpp \
- DevicesFactoryHalHidl.cpp \
- EffectBufferHalHidl.cpp \
- EffectHalHidl.cpp \
- EffectsFactoryHalHidl.cpp \
- StreamHalHidl.cpp
-
-LOCAL_SHARED_LIBRARIES += \
- libbase \
- libfmq \
- libhwbinder \
- libhidlbase \
- libhidlmemory \
- libhidltransport \
- android.hardware.audio@2.0 \
- android.hardware.audio.common@2.0 \
- android.hardware.audio.common@2.0-util \
- android.hardware.audio.effect@2.0 \
- android.hidl.allocator@1.0 \
- android.hidl.memory@1.0 \
- libmedia_helper \
- libmediautils
-
-endif # USE_LEGACY_LOCAL_AUDIO_HAL
-
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
-
-LOCAL_MODULE := libaudiohal
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libaudiohal/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/DevicesFactoryHalHybrid.cpp
index 454b03b..8dc1434 100644
--- a/media/libaudiohal/DevicesFactoryHalHybrid.cpp
+++ b/media/libaudiohal/DevicesFactoryHalHybrid.cpp
@@ -19,9 +19,7 @@
#include "DevicesFactoryHalHybrid.h"
#include "DevicesFactoryHalLocal.h"
-#ifndef USE_LEGACY_LOCAL_AUDIO_HAL
#include "DevicesFactoryHalHidl.h"
-#endif
namespace android {
@@ -32,13 +30,7 @@
DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
: mLocalFactory(new DevicesFactoryHalLocal()),
- mHidlFactory(
-#ifdef USE_LEGACY_LOCAL_AUDIO_HAL
- nullptr
-#else
- new DevicesFactoryHalHidl()
-#endif
- ) {
+ mHidlFactory(new DevicesFactoryHalHidl()) {
}
DevicesFactoryHalHybrid::~DevicesFactoryHalHybrid() {
diff --git a/media/libaudiohal/EffectBufferHalLocal.cpp b/media/libaudiohal/EffectBufferHalLocal.cpp
deleted file mode 100644
index 7951c8e..0000000
--- a/media/libaudiohal/EffectBufferHalLocal.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectBufferHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <utils/Log.h>
-
-#include "EffectBufferHalLocal.h"
-
-namespace android {
-
-// static
-status_t EffectBufferHalInterface::allocate(
- size_t size, sp<EffectBufferHalInterface>* buffer) {
- *buffer = new EffectBufferHalLocal(size);
- return OK;
-}
-
-// static
-status_t EffectBufferHalInterface::mirror(
- void* external, size_t size, sp<EffectBufferHalInterface>* buffer) {
- *buffer = new EffectBufferHalLocal(external, size);
- return OK;
-}
-
-EffectBufferHalLocal::EffectBufferHalLocal(size_t size)
- : mOwnBuffer(new uint8_t[size]),
- mBufferSize(size), mFrameCountChanged(false),
- mAudioBuffer{0, {mOwnBuffer.get()}} {
-}
-
-EffectBufferHalLocal::EffectBufferHalLocal(void* external, size_t size)
- : mOwnBuffer(nullptr),
- mBufferSize(size), mFrameCountChanged(false),
- mAudioBuffer{0, {external}} {
-}
-
-EffectBufferHalLocal::~EffectBufferHalLocal() {
-}
-
-audio_buffer_t* EffectBufferHalLocal::audioBuffer() {
- return &mAudioBuffer;
-}
-
-void* EffectBufferHalLocal::externalData() const {
- return mAudioBuffer.raw;
-}
-
-void EffectBufferHalLocal::setFrameCount(size_t frameCount) {
- mAudioBuffer.frameCount = frameCount;
- mFrameCountChanged = true;
-}
-
-void EffectBufferHalLocal::setExternalData(void* external) {
- ALOGE_IF(mOwnBuffer != nullptr, "Attempt to set external data for allocated buffer");
- mAudioBuffer.raw = external;
-}
-
-bool EffectBufferHalLocal::checkFrameCountChange() {
- bool result = mFrameCountChanged;
- mFrameCountChanged = false;
- return result;
-}
-
-void EffectBufferHalLocal::update() {
-}
-
-void EffectBufferHalLocal::commit() {
-}
-
-void EffectBufferHalLocal::update(size_t) {
-}
-
-void EffectBufferHalLocal::commit(size_t) {
-}
-
-} // namespace android
diff --git a/media/libaudiohal/EffectBufferHalLocal.h b/media/libaudiohal/EffectBufferHalLocal.h
deleted file mode 100644
index d2b624b..0000000
--- a/media/libaudiohal/EffectBufferHalLocal.h
+++ /dev/null
@@ -1,61 +0,0 @@
-/*
- * Copyright (C) 2017 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
-#define ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
-
-#include <memory>
-
-#include <media/audiohal/EffectBufferHalInterface.h>
-#include <system/audio_effect.h>
-
-namespace android {
-
-class EffectBufferHalLocal : public EffectBufferHalInterface
-{
- public:
- virtual audio_buffer_t* audioBuffer();
- virtual void* externalData() const;
-
- virtual void setExternalData(void* external);
- virtual void setFrameCount(size_t frameCount);
- virtual bool checkFrameCountChange();
-
- virtual void update();
- virtual void commit();
- virtual void update(size_t size);
- virtual void commit(size_t size);
-
- private:
- friend class EffectBufferHalInterface;
-
- std::unique_ptr<uint8_t[]> mOwnBuffer;
- const size_t mBufferSize;
- bool mFrameCountChanged;
- audio_buffer_t mAudioBuffer;
-
- // Can not be constructed directly by clients.
- explicit EffectBufferHalLocal(size_t size);
- EffectBufferHalLocal(void* external, size_t size);
-
- virtual ~EffectBufferHalLocal();
-
- status_t init();
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECT_BUFFER_HAL_LOCAL_H
diff --git a/media/libaudiohal/EffectHalLocal.cpp b/media/libaudiohal/EffectHalLocal.cpp
deleted file mode 100644
index dd465c3..0000000
--- a/media/libaudiohal/EffectHalLocal.cpp
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "EffectHalLocal"
-//#define LOG_NDEBUG 0
-
-#include <media/EffectsFactoryApi.h>
-#include <utils/Log.h>
-
-#include "EffectHalLocal.h"
-
-namespace android {
-
-EffectHalLocal::EffectHalLocal(effect_handle_t handle)
- : mHandle(handle) {
-}
-
-EffectHalLocal::~EffectHalLocal() {
- int status = EffectRelease(mHandle);
- ALOGW_IF(status, "Error releasing effect %p: %s", mHandle, strerror(-status));
- mHandle = 0;
-}
-
-status_t EffectHalLocal::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
- mInBuffer = buffer;
- return OK;
-}
-
-status_t EffectHalLocal::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
- mOutBuffer = buffer;
- return OK;
-}
-
-status_t EffectHalLocal::process() {
- if (mInBuffer == nullptr || mOutBuffer == nullptr) {
- ALOGE_IF(mInBuffer == nullptr, "Input buffer not set");
- ALOGE_IF(mOutBuffer == nullptr, "Output buffer not set");
- return NO_INIT;
- }
- return (*mHandle)->process(mHandle, mInBuffer->audioBuffer(), mOutBuffer->audioBuffer());
-}
-
-status_t EffectHalLocal::processReverse() {
- if ((*mHandle)->process_reverse != NULL) {
- if (mInBuffer == nullptr || mOutBuffer == nullptr) {
- ALOGE_IF(mInBuffer == nullptr, "Input buffer not set");
- ALOGE_IF(mOutBuffer == nullptr, "Output buffer not set");
- return NO_INIT;
- }
- return (*mHandle)->process_reverse(
- mHandle, mInBuffer->audioBuffer(), mOutBuffer->audioBuffer());
- } else {
- return INVALID_OPERATION;
- }
-}
-
-status_t EffectHalLocal::command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
- uint32_t *replySize, void *pReplyData) {
- return (*mHandle)->command(mHandle, cmdCode, cmdSize, pCmdData, replySize, pReplyData);
-}
-
-status_t EffectHalLocal::getDescriptor(effect_descriptor_t *pDescriptor) {
- return (*mHandle)->get_descriptor(mHandle, pDescriptor);
-}
-
-status_t EffectHalLocal::close() {
- return OK;
-}
-
-} // namespace android
diff --git a/media/libaudiohal/EffectHalLocal.h b/media/libaudiohal/EffectHalLocal.h
deleted file mode 100644
index 693fb50..0000000
--- a/media/libaudiohal/EffectHalLocal.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
-#define ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
-
-#include <hardware/audio_effect.h>
-#include <media/audiohal/EffectHalInterface.h>
-
-namespace android {
-
-class EffectHalLocal : public EffectHalInterface
-{
- public:
- // Set the input buffer.
- virtual status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer);
-
- // Set the output buffer.
- virtual status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer);
-
- // Effect process function.
- virtual status_t process();
-
- // Process reverse stream function. This function is used to pass
- // a reference stream to the effect engine.
- virtual status_t processReverse();
-
- // Send a command and receive a response to/from effect engine.
- virtual status_t command(uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
- uint32_t *replySize, void *pReplyData);
-
- // Returns the effect descriptor.
- virtual status_t getDescriptor(effect_descriptor_t *pDescriptor);
-
- // Free resources on the remote side.
- virtual status_t close();
-
- // Whether it's a local implementation.
- virtual bool isLocal() const { return true; }
-
- effect_handle_t handle() const { return mHandle; }
-
- private:
- effect_handle_t mHandle;
- sp<EffectBufferHalInterface> mInBuffer;
- sp<EffectBufferHalInterface> mOutBuffer;
-
- friend class EffectsFactoryHalLocal;
-
- // Can not be constructed directly by clients.
- explicit EffectHalLocal(effect_handle_t handle);
-
- // The destructor automatically releases the effect.
- virtual ~EffectHalLocal();
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECT_HAL_LOCAL_H
diff --git a/media/libaudiohal/EffectsFactoryHalLocal.cpp b/media/libaudiohal/EffectsFactoryHalLocal.cpp
deleted file mode 100644
index bbdef5d..0000000
--- a/media/libaudiohal/EffectsFactoryHalLocal.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/EffectsFactoryApi.h>
-
-#include "EffectHalLocal.h"
-#include "EffectsFactoryHalLocal.h"
-
-namespace android {
-
-// static
-sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
- return new EffectsFactoryHalLocal();
-}
-
-// static
-bool EffectsFactoryHalInterface::isNullUuid(const effect_uuid_t *pEffectUuid) {
- return EffectIsNullUuid(pEffectUuid);
-}
-
-status_t EffectsFactoryHalLocal::queryNumberEffects(uint32_t *pNumEffects) {
- return EffectQueryNumberEffects(pNumEffects);
-}
-
-status_t EffectsFactoryHalLocal::getDescriptor(
- uint32_t index, effect_descriptor_t *pDescriptor) {
- return EffectQueryEffect(index, pDescriptor);
-}
-
-status_t EffectsFactoryHalLocal::getDescriptor(
- const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
- return EffectGetDescriptor(pEffectUuid, pDescriptor);
-}
-
-status_t EffectsFactoryHalLocal::createEffect(
- const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
- sp<EffectHalInterface> *effect) {
- effect_handle_t handle;
- int result = EffectCreate(pEffectUuid, sessionId, ioId, &handle);
- if (result == 0) {
- *effect = new EffectHalLocal(handle);
- }
- return result;
-}
-
-status_t EffectsFactoryHalLocal::dumpEffects(int fd) {
- return EffectDumpEffects(fd);
-}
-
-} // namespace android
diff --git a/media/libaudiohal/EffectsFactoryHalLocal.h b/media/libaudiohal/EffectsFactoryHalLocal.h
deleted file mode 100644
index d5b81be..0000000
--- a/media/libaudiohal/EffectsFactoryHalLocal.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2016 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
-#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
-
-#include <media/audiohal/EffectsFactoryHalInterface.h>
-
-namespace android {
-
-class EffectsFactoryHalLocal : public EffectsFactoryHalInterface
-{
- public:
- // Returns the number of different effects in all loaded libraries.
- virtual status_t queryNumberEffects(uint32_t *pNumEffects);
-
- // Returns a descriptor of the next available effect.
- virtual status_t getDescriptor(uint32_t index,
- effect_descriptor_t *pDescriptor);
-
- virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
- effect_descriptor_t *pDescriptor);
-
- // Creates an effect engine of the specified type.
- // To release the effect engine, it is necessary to release references
- // to the returned effect object.
- virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
- int32_t sessionId, int32_t ioId,
- sp<EffectHalInterface> *effect);
-
- virtual status_t dumpEffects(int fd);
-
- private:
- friend class EffectsFactoryHalInterface;
-
- // Can not be constructed directly by clients.
- EffectsFactoryHalLocal() {}
-
- virtual ~EffectsFactoryHalLocal() {}
-};
-
-} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_LOCAL_H
diff --git a/media/libaudiohal/StreamHalLocal.cpp b/media/libaudiohal/StreamHalLocal.cpp
index dc17f5c..8d61e24 100644
--- a/media/libaudiohal/StreamHalLocal.cpp
+++ b/media/libaudiohal/StreamHalLocal.cpp
@@ -21,7 +21,6 @@
#include <utils/Log.h>
#include "DeviceHalLocal.h"
-#include "EffectHalLocal.h"
#include "StreamHalLocal.h"
namespace android {
@@ -86,16 +85,14 @@
return OK;
}
-status_t StreamHalLocal::addEffect(sp<EffectHalInterface> effect) {
- LOG_ALWAYS_FATAL_IF(!effect->isLocal(), "Only local effects can be added for a local stream");
- return mStream->add_audio_effect(mStream,
- static_cast<EffectHalLocal*>(effect.get())->handle());
+status_t StreamHalLocal::addEffect(sp<EffectHalInterface>) {
+ LOG_ALWAYS_FATAL("Local streams can not have effects");
+ return INVALID_OPERATION;
}
-status_t StreamHalLocal::removeEffect(sp<EffectHalInterface> effect) {
- LOG_ALWAYS_FATAL_IF(!effect->isLocal(), "Only local effects can be removed for a local stream");
- return mStream->remove_audio_effect(mStream,
- static_cast<EffectHalLocal*>(effect.get())->handle());
+status_t StreamHalLocal::removeEffect(sp<EffectHalInterface>) {
+ LOG_ALWAYS_FATAL("Local streams can not have effects");
+ return INVALID_OPERATION;
}
status_t StreamHalLocal::standby() {
diff --git a/media/libeffects/config/include/media/EffectsConfig.h b/media/libeffects/config/include/media/EffectsConfig.h
index 811730c..55b946f 100644
--- a/media/libeffects/config/include/media/EffectsConfig.h
+++ b/media/libeffects/config/include/media/EffectsConfig.h
@@ -32,8 +32,13 @@
namespace android {
namespace effectsConfig {
-/** Default path of effect configuration file. */
-constexpr char DEFAULT_PATH[] = "/vendor/etc/audio_effects.xml";
+/** Default path of effect configuration file. Relative to DEFAULT_LOCATIONS. */
+constexpr const char* DEFAULT_NAME = "audio_effects.xml";
+
+/** Default path of effect configuration file.
+ * The /vendor partition is the recommended one, the others are deprecated.
+ */
+constexpr const char* DEFAULT_LOCATIONS[] = {"/odm/etc", "/vendor/etc", "/system/etc"};
/** Directories where the effect libraries will be search for. */
constexpr const char* LD_EFFECT_LIBRARY_PATH[] =
@@ -91,13 +96,16 @@
/** Parsed config, nullptr if the xml lib could not load the file */
std::unique_ptr<Config> parsedConfig;
size_t nbSkippedElement; //< Number of skipped invalid library, effect or processing chain
+ const char* configPath; //< Path to the loaded configuration
};
/** Parses the provided effect configuration.
* Parsing do not stop of first invalid element, but continues to the next.
+ * @param[in] path of the configuration file do load
+ * if nullptr, look for DEFAULT_NAME in DEFAULT_LOCATIONS.
* @see ParsingResult::nbSkippedElement
*/
-ParsingResult parse(const char* path = DEFAULT_PATH);
+ParsingResult parse(const char* path = nullptr);
} // namespace effectsConfig
} // namespace android
diff --git a/media/libeffects/config/src/EffectsConfig.cpp b/media/libeffects/config/src/EffectsConfig.cpp
index 97462f8..18c406d 100644
--- a/media/libeffects/config/src/EffectsConfig.cpp
+++ b/media/libeffects/config/src/EffectsConfig.cpp
@@ -20,6 +20,7 @@
#include <cstdint>
#include <functional>
#include <string>
+#include <unistd.h>
#include <tinyxml2.h>
#include <log/log.h>
@@ -85,7 +86,7 @@
constexpr std::enable_if<false, Enum> STREAM_NAME_MAP;
/** All output stream types which support effects.
- * This need to be kept in sink with the xsd streamOutputType.
+ * This need to be kept in sync with the xsd streamOutputType.
*/
template <>
constexpr std::pair<audio_stream_type_t, const char*> STREAM_NAME_MAP<audio_stream_type_t>[] = {
@@ -102,7 +103,7 @@
};
/** All input stream types which support effects.
- * This need to be kept in sink with the xsd streamOutputType.
+ * This need to be kept in sync with the xsd streamOutputType.
*/
template <>
constexpr std::pair<audio_source_t, const char*> STREAM_NAME_MAP<audio_source_t>[] = {
@@ -142,7 +143,7 @@
}
/** Find an element in a collection by its name.
- * @return nullptr if not found, the ellements address if found.
+ * @return nullptr if not found, the element address if found.
*/
template <class T>
T* findByName(const char* name, std::vector<T>& collection) {
@@ -249,15 +250,14 @@
return true;
}
-}; // namespace
-
-ParsingResult parse(const char* path) {
+/** Internal version of the public parse(const char* path) with precondition `path != nullptr`. */
+ParsingResult parseWithPath(const char* path) {
XMLDocument doc;
doc.LoadFile(path);
if (doc.Error()) {
ALOGE("Failed to parse %s: Tinyxml2 error (%d): %s %s", path,
doc.ErrorID(), doc.GetErrorStr1(), doc.GetErrorStr2());
- return {nullptr, 0};
+ return {nullptr, 0, path};
}
auto config = std::make_unique<Config>();
@@ -295,7 +295,29 @@
}
}
}
- return {std::move(config), nbSkippedElements};
+ return {std::move(config), nbSkippedElements, path};
+}
+
+}; // namespace
+
+ParsingResult parse(const char* path) {
+ if (path != nullptr) {
+ return parseWithPath(path);
+ }
+
+ for (std::string location : DEFAULT_LOCATIONS) {
+ std::string defaultPath = location + '/' + DEFAULT_NAME;
+ if (access(defaultPath.c_str(), R_OK) != 0) {
+ continue;
+ }
+ auto result = parseWithPath(defaultPath.c_str());
+ if (result.parsedConfig != nullptr) {
+ return result;
+ }
+ }
+
+ ALOGE("Could not parse effect configuration in any of the default locations.");
+ return {nullptr, 0, nullptr};
}
} // namespace effectsConfig
diff --git a/media/libeffects/factory/EffectsXmlConfigLoader.cpp b/media/libeffects/factory/EffectsXmlConfigLoader.cpp
index 438b787..7a7d431 100644
--- a/media/libeffects/factory/EffectsXmlConfigLoader.cpp
+++ b/media/libeffects/factory/EffectsXmlConfigLoader.cpp
@@ -327,7 +327,7 @@
&gSkippedEffects, &gSubEffectList);
ALOGE_IF(result.nbSkippedElement != 0, "%zu errors during loading of configuration: %s",
- result.nbSkippedElement, path ?: effectsConfig::DEFAULT_PATH);
+ result.nbSkippedElement, result.configPath ?: "No config file found");
return result.nbSkippedElement;
}
diff --git a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
index ee9406d..3d8e982 100644
--- a/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/EffectReverb.cpp
@@ -1889,6 +1889,10 @@
if (param != REVERB_PARAM_PRESET) {
return -EINVAL;
}
+ if (vsize < (int)sizeof(uint16_t)) {
+ android_errorWriteLog(0x534e4554, "67647856");
+ return -EINVAL;
+ }
uint16_t preset = *(uint16_t *)pValue;
ALOGV("set REVERB_PARAM_PRESET, preset %d", preset);
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 303f667..a63a2df 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -25,8 +25,8 @@
#include <drm/drm_framework_common.h>
#include <media/IDataSource.h>
#include <media/mediametadataretriever.h>
-#include <media/stagefright/foundation/ADebug.h>
#include <media/MediaSource.h>
+#include <media/stagefright/foundation/ADebug.h>
#include <private/media/VideoFrame.h>
#include <utils/Log.h>
#include <utils/RefBase.h>
@@ -270,7 +270,9 @@
// it's not, default to HAL_PIXEL_FORMAT_RGB_565.
mOutputColor(HAL_PIXEL_FORMAT_RGB_565),
mCurScanline(0),
- mFrameDecoded(false) {
+ mFrameDecoded(false),
+ mHasImage(false),
+ mHasVideo(false) {
}
HeifDecoderImpl::~HeifDecoderImpl() {
@@ -278,6 +280,8 @@
bool HeifDecoderImpl::init(HeifStream* stream, HeifFrameInfo* frameInfo) {
mFrameDecoded = false;
+ mFrameMemory.clear();
+
sp<HeifDataSource> dataSource = new HeifDataSource(stream);
if (!dataSource->init()) {
return false;
@@ -285,7 +289,7 @@
mDataSource = dataSource;
mRetriever = new MediaMetadataRetriever();
- status_t err = mRetriever->setDataSource(mDataSource, "video/mp4");
+ status_t err = mRetriever->setDataSource(mDataSource, "image/heif");
if (err != OK) {
ALOGE("failed to set data source!");
@@ -295,15 +299,21 @@
}
ALOGV("successfully set data source.");
+ const char* hasImage = mRetriever->extractMetadata(METADATA_KEY_HAS_IMAGE);
const char* hasVideo = mRetriever->extractMetadata(METADATA_KEY_HAS_VIDEO);
- if (!hasVideo || strcasecmp(hasVideo, "yes")) {
- ALOGE("no video: %s", hasVideo ? hasVideo : "null");
- return false;
+
+ mHasImage = hasImage && !strcasecmp(hasImage, "yes");
+ mHasVideo = hasVideo && !strcasecmp(hasVideo, "yes");
+ if (mHasImage) {
+ // image index < 0 to retrieve primary image
+ mFrameMemory = mRetriever->getImageAtIndex(
+ -1, mOutputColor, true /*metaOnly*/);
+ } else if (mHasVideo) {
+ mFrameMemory = mRetriever->getFrameAtTime(0,
+ MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
+ mOutputColor, true /*metaOnly*/);
}
- mFrameMemory = mRetriever->getFrameAtTime(0,
- MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
- mOutputColor, true /*metaOnly*/);
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
ALOGE("getFrameAtTime: videoFrame is a nullptr");
return false;
@@ -368,8 +378,14 @@
return true;
}
- mFrameMemory = mRetriever->getFrameAtTime(0,
- MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC, mOutputColor);
+ if (mHasImage) {
+ // image index < 0 to retrieve primary image
+ mFrameMemory = mRetriever->getImageAtIndex(-1, mOutputColor);
+ } else if (mHasVideo) {
+ mFrameMemory = mRetriever->getFrameAtTime(0,
+ MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC, mOutputColor);
+ }
+
if (mFrameMemory == nullptr || mFrameMemory->pointer() == nullptr) {
ALOGE("getFrameAtTime: videoFrame is a nullptr");
return false;
diff --git a/media/libheif/HeifDecoderImpl.h b/media/libheif/HeifDecoderImpl.h
index c2e4ff3..406c2c1 100644
--- a/media/libheif/HeifDecoderImpl.h
+++ b/media/libheif/HeifDecoderImpl.h
@@ -55,6 +55,8 @@
android_pixel_format_t mOutputColor;
size_t mCurScanline;
bool mFrameDecoded;
+ bool mHasImage;
+ bool mHasVideo;
};
} // namespace android
diff --git a/media/libmedia/IMediaMetadataRetriever.cpp b/media/libmedia/IMediaMetadataRetriever.cpp
index 5ea2e8b..f725c97 100644
--- a/media/libmedia/IMediaMetadataRetriever.cpp
+++ b/media/libmedia/IMediaMetadataRetriever.cpp
@@ -68,6 +68,8 @@
SET_DATA_SOURCE_FD,
SET_DATA_SOURCE_CALLBACK,
GET_FRAME_AT_TIME,
+ GET_IMAGE_AT_INDEX,
+ GET_FRAME_AT_INDEX,
EXTRACT_ALBUM_ART,
EXTRACT_METADATA,
};
@@ -164,6 +166,55 @@
return interface_cast<IMemory>(reply.readStrongBinder());
}
+ sp<IMemory> getImageAtIndex(int index, int colorFormat, bool metaOnly)
+ {
+ ALOGV("getImageAtIndex: index %d, colorFormat(%d) metaOnly(%d)",
+ index, colorFormat, metaOnly);
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
+ data.writeInt32(index);
+ data.writeInt32(colorFormat);
+ data.writeInt32(metaOnly);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ sendSchedPolicy(data);
+#endif
+ remote()->transact(GET_IMAGE_AT_INDEX, data, &reply);
+ status_t ret = reply.readInt32();
+ if (ret != NO_ERROR) {
+ return NULL;
+ }
+ return interface_cast<IMemory>(reply.readStrongBinder());
+ }
+
+ status_t getFrameAtIndex(std::vector<sp<IMemory> > *frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly)
+ {
+ ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d) metaOnly(%d)",
+ frameIndex, numFrames, colorFormat, metaOnly);
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaMetadataRetriever::getInterfaceDescriptor());
+ data.writeInt32(frameIndex);
+ data.writeInt32(numFrames);
+ data.writeInt32(colorFormat);
+ data.writeInt32(metaOnly);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ sendSchedPolicy(data);
+#endif
+ remote()->transact(GET_FRAME_AT_INDEX, data, &reply);
+ status_t ret = reply.readInt32();
+ if (ret != NO_ERROR) {
+ return ret;
+ }
+ int retNumFrames = reply.readInt32();
+ if (retNumFrames < numFrames) {
+ numFrames = retNumFrames;
+ }
+ for (int i = 0; i < numFrames; i++) {
+ frames->push_back(interface_cast<IMemory>(reply.readStrongBinder()));
+ }
+ return OK;
+ }
+
sp<IMemory> extractAlbumArt()
{
Parcel data, reply;
@@ -300,6 +351,54 @@
#endif
return NO_ERROR;
} break;
+ case GET_IMAGE_AT_INDEX: {
+ CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
+ int index = data.readInt32();
+ int colorFormat = data.readInt32();
+ bool metaOnly = (data.readInt32() != 0);
+ ALOGV("getImageAtIndex: index(%d), colorFormat(%d), metaOnly(%d)",
+ index, colorFormat, metaOnly);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ setSchedPolicy(data);
+#endif
+ sp<IMemory> bitmap = getImageAtIndex(index, colorFormat, metaOnly);
+ if (bitmap != 0) { // Don't send NULL across the binder interface
+ reply->writeInt32(NO_ERROR);
+ reply->writeStrongBinder(IInterface::asBinder(bitmap));
+ } else {
+ reply->writeInt32(UNKNOWN_ERROR);
+ }
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ restoreSchedPolicy();
+#endif
+ return NO_ERROR;
+ } break;
+ case GET_FRAME_AT_INDEX: {
+ CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
+ int frameIndex = data.readInt32();
+ int numFrames = data.readInt32();
+ int colorFormat = data.readInt32();
+ bool metaOnly = (data.readInt32() != 0);
+ ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d), metaOnly(%d)",
+ frameIndex, numFrames, colorFormat, metaOnly);
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ setSchedPolicy(data);
+#endif
+ std::vector<sp<IMemory> > frames;
+ status_t err = getFrameAtIndex(
+ &frames, frameIndex, numFrames, colorFormat, metaOnly);
+ reply->writeInt32(err);
+ if (OK == err) {
+ reply->writeInt32(frames.size());
+ for (size_t i = 0; i < frames.size(); i++) {
+ reply->writeStrongBinder(IInterface::asBinder(frames[i]));
+ }
+ }
+#ifndef DISABLE_GROUP_SCHEDULE_HACK
+ restoreSchedPolicy();
+#endif
+ return NO_ERROR;
+ } break;
case EXTRACT_ALBUM_ART: {
CHECK_INTERFACE(IMediaMetadataRetriever, data, reply);
#ifndef DISABLE_GROUP_SCHEDULE_HACK
diff --git a/media/libmedia/include/media/IMediaMetadataRetriever.h b/media/libmedia/include/media/IMediaMetadataRetriever.h
index ea95161..5491535 100644
--- a/media/libmedia/include/media/IMediaMetadataRetriever.h
+++ b/media/libmedia/include/media/IMediaMetadataRetriever.h
@@ -44,6 +44,11 @@
const sp<IDataSource>& dataSource, const char *mime) = 0;
virtual sp<IMemory> getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
+ virtual sp<IMemory> getImageAtIndex(
+ int index, int colorFormat, bool metaOnly) = 0;
+ virtual status_t getFrameAtIndex(
+ std::vector<sp<IMemory> > *frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly) = 0;
virtual sp<IMemory> extractAlbumArt() = 0;
virtual const char* extractMetadata(int keyCode) = 0;
};
diff --git a/media/libmedia/include/media/IMediaSource.h b/media/libmedia/include/media/IMediaSource.h
index 1e36ab7..7aea90c 100644
--- a/media/libmedia/include/media/IMediaSource.h
+++ b/media/libmedia/include/media/IMediaSource.h
@@ -55,7 +55,6 @@
// Returns the format of the data output by this media source.
virtual sp<MetaData> getFormat() = 0;
-
// Returns a new buffer of data. Call blocks until a
// buffer is available, an error is encountered or the end of the stream
// is reached.
diff --git a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
index 257002d..fc9e53c 100644
--- a/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
+++ b/media/libmedia/include/media/MediaMetadataRetrieverInterface.h
@@ -22,6 +22,7 @@
#include <media/mediametadataretriever.h>
#include <media/mediascanner.h>
#include <private/media/VideoFrame.h>
+#include <media/stagefright/MediaErrors.h>
namespace android {
@@ -41,9 +42,14 @@
const KeyedVector<String8, String8> *headers = NULL) = 0;
virtual status_t setDataSource(int fd, int64_t offset, int64_t length) = 0;
- virtual status_t setDataSource(const sp<DataSource>& source, const char *mime) = 0;
+ virtual status_t setDataSource(const sp<DataSource>& source, const char *mime) = 0;
virtual VideoFrame* getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly) = 0;
+ virtual VideoFrame* getImageAtIndex(
+ int index, int colorFormat, bool metaOnly) = 0;
+ virtual status_t getFrameAtIndex(
+ std::vector<VideoFrame*>* frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly);
virtual MediaAlbumArt* extractAlbumArt() = 0;
virtual const char* extractMetadata(int keyCode) = 0;
};
@@ -58,6 +64,13 @@
virtual VideoFrame* getFrameAtTime(
int64_t /*timeUs*/, int /*option*/, int /*colorFormat*/, bool /*metaOnly*/)
{ return NULL; }
+ virtual VideoFrame* getImageAtIndex(
+ int /*index*/, int /*colorFormat*/, bool /*metaOnly*/)
+ { return NULL; }
+ virtual status_t getFrameAtIndex(
+ std::vector<VideoFrame*>* /*frames*/,
+ int /*frameIndex*/, int /*numFrames*/, int /*colorFormat*/, bool /*metaOnly*/)
+ { return ERROR_UNSUPPORTED; }
virtual MediaAlbumArt* extractAlbumArt() { return NULL; }
virtual const char* extractMetadata(int /*keyCode*/) { return NULL; }
};
diff --git a/media/libmedia/include/media/mediametadataretriever.h b/media/libmedia/include/media/mediametadataretriever.h
index 65c266b..3511253 100644
--- a/media/libmedia/include/media/mediametadataretriever.h
+++ b/media/libmedia/include/media/mediametadataretriever.h
@@ -59,6 +59,13 @@
METADATA_KEY_LOCATION = 23,
METADATA_KEY_VIDEO_ROTATION = 24,
METADATA_KEY_CAPTURE_FRAMERATE = 25,
+ METADATA_KEY_HAS_IMAGE = 26,
+ METADATA_KEY_IMAGE_COUNT = 27,
+ METADATA_KEY_IMAGE_PRIMARY = 28,
+ METADATA_KEY_IMAGE_WIDTH = 29,
+ METADATA_KEY_IMAGE_HEIGHT = 30,
+ METADATA_KEY_IMAGE_ROTATION = 31,
+ METADATA_KEY_VIDEO_FRAME_COUNT = 32,
// Add more here...
};
@@ -80,6 +87,11 @@
const sp<IDataSource>& dataSource, const char *mime = NULL);
sp<IMemory> getFrameAtTime(int64_t timeUs, int option,
int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
+ sp<IMemory> getImageAtIndex(int index,
+ int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
+ status_t getFrameAtIndex(
+ std::vector<sp<IMemory> > *frames, int frameIndex, int numFrames = 1,
+ int colorFormat = HAL_PIXEL_FORMAT_RGB_565, bool metaOnly = false);
sp<IMemory> extractAlbumArt();
const char* extractMetadata(int keyCode);
diff --git a/media/libmedia/mediametadataretriever.cpp b/media/libmedia/mediametadataretriever.cpp
index 7d27d57..6a4204b 100644
--- a/media/libmedia/mediametadataretriever.cpp
+++ b/media/libmedia/mediametadataretriever.cpp
@@ -154,6 +154,32 @@
return mRetriever->getFrameAtTime(timeUs, option, colorFormat, metaOnly);
}
+sp<IMemory> MediaMetadataRetriever::getImageAtIndex(
+ int index, int colorFormat, bool metaOnly) {
+ ALOGV("getImageAtIndex: index(%d) colorFormat(%d) metaOnly(%d)",
+ index, colorFormat, metaOnly);
+ Mutex::Autolock _l(mLock);
+ if (mRetriever == 0) {
+ ALOGE("retriever is not initialized");
+ return NULL;
+ }
+ return mRetriever->getImageAtIndex(index, colorFormat, metaOnly);
+}
+
+status_t MediaMetadataRetriever::getFrameAtIndex(
+ std::vector<sp<IMemory> > *frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
+ ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d) metaOnly(%d)",
+ frameIndex, numFrames, colorFormat, metaOnly);
+ Mutex::Autolock _l(mLock);
+ if (mRetriever == 0) {
+ ALOGE("retriever is not initialized");
+ return INVALID_OPERATION;
+ }
+ return mRetriever->getFrameAtIndex(
+ frames, frameIndex, numFrames, colorFormat, metaOnly);
+}
+
const char* MediaMetadataRetriever::extractMetadata(int keyCode)
{
ALOGV("extractMetadata(%d)", keyCode);
diff --git a/media/libmediaextractor/include/media/MediaSource.h b/media/libmediaextractor/include/media/MediaSource.h
index 749a4df..504653b 100644
--- a/media/libmediaextractor/include/media/MediaSource.h
+++ b/media/libmediaextractor/include/media/MediaSource.h
@@ -60,6 +60,7 @@
SEEK_NEXT_SYNC,
SEEK_CLOSEST_SYNC,
SEEK_CLOSEST,
+ SEEK_FRAME_INDEX,
};
ReadOptions();
diff --git a/media/libmediametrics/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
index f968c09..f7df2b4 100644
--- a/media/libmediametrics/MediaAnalyticsItem.cpp
+++ b/media/libmediametrics/MediaAnalyticsItem.cpp
@@ -279,8 +279,10 @@
prop = &mProps[i];
} else {
if (i == mPropSize) {
- growProps();
- // XXX: verify success
+ if (growProps() == false) {
+ ALOGE("failed allocation for new props");
+ return NULL;
+ }
}
i = mPropCount++;
prop = &mProps[i];
@@ -312,41 +314,54 @@
// set the values
void MediaAnalyticsItem::setInt32(MediaAnalyticsItem::Attr name, int32_t value) {
Prop *prop = allocateProp(name);
- prop->mType = kTypeInt32;
- prop->u.int32Value = value;
+ if (prop != NULL) {
+ prop->mType = kTypeInt32;
+ prop->u.int32Value = value;
+ }
}
void MediaAnalyticsItem::setInt64(MediaAnalyticsItem::Attr name, int64_t value) {
Prop *prop = allocateProp(name);
- prop->mType = kTypeInt64;
- prop->u.int64Value = value;
+ if (prop != NULL) {
+ prop->mType = kTypeInt64;
+ prop->u.int64Value = value;
+ }
}
void MediaAnalyticsItem::setDouble(MediaAnalyticsItem::Attr name, double value) {
Prop *prop = allocateProp(name);
- prop->mType = kTypeDouble;
- prop->u.doubleValue = value;
+ if (prop != NULL) {
+ prop->mType = kTypeDouble;
+ prop->u.doubleValue = value;
+ }
}
void MediaAnalyticsItem::setCString(MediaAnalyticsItem::Attr name, const char *value) {
Prop *prop = allocateProp(name);
// any old value will be gone
- prop->mType = kTypeCString;
- prop->u.CStringValue = strdup(value);
+ if (prop != NULL) {
+ prop->mType = kTypeCString;
+ prop->u.CStringValue = strdup(value);
+ }
}
void MediaAnalyticsItem::setRate(MediaAnalyticsItem::Attr name, int64_t count, int64_t duration) {
Prop *prop = allocateProp(name);
- prop->mType = kTypeRate;
- prop->u.rate.count = count;
- prop->u.rate.duration = duration;
+ if (prop != NULL) {
+ prop->mType = kTypeRate;
+ prop->u.rate.count = count;
+ prop->u.rate.duration = duration;
+ }
}
// find/add/set fused into a single operation
void MediaAnalyticsItem::addInt32(MediaAnalyticsItem::Attr name, int32_t value) {
Prop *prop = allocateProp(name);
+ if (prop == NULL) {
+ return;
+ }
switch (prop->mType) {
case kTypeInt32:
prop->u.int32Value += value;
@@ -361,6 +376,9 @@
void MediaAnalyticsItem::addInt64(MediaAnalyticsItem::Attr name, int64_t value) {
Prop *prop = allocateProp(name);
+ if (prop == NULL) {
+ return;
+ }
switch (prop->mType) {
case kTypeInt64:
prop->u.int64Value += value;
@@ -375,6 +393,9 @@
void MediaAnalyticsItem::addRate(MediaAnalyticsItem::Attr name, int64_t count, int64_t duration) {
Prop *prop = allocateProp(name);
+ if (prop == NULL) {
+ return;
+ }
switch (prop->mType) {
case kTypeRate:
prop->u.rate.count += count;
@@ -391,6 +412,9 @@
void MediaAnalyticsItem::addDouble(MediaAnalyticsItem::Attr name, double value) {
Prop *prop = allocateProp(name);
+ if (prop == NULL) {
+ return;
+ }
switch (prop->mType) {
case kTypeDouble:
prop->u.doubleValue += value;
@@ -585,7 +609,7 @@
}
}
-void MediaAnalyticsItem::growProps(int increment)
+bool MediaAnalyticsItem::growProps(int increment)
{
if (increment <= 0) {
increment = kGrowProps;
@@ -599,6 +623,10 @@
}
mProps = ni;
mPropSize = nsize;
+ return true;
+ } else {
+ ALOGW("MediaAnalyticsItem::growProps fails");
+ return false;
}
}
@@ -963,32 +991,26 @@
int nattr = incoming->mPropCount;
for (int i = 0 ; i < nattr; i++ ) {
Prop *iprop = &incoming->mProps[i];
- Prop *oprop = findProp(iprop->mName);
const char *p = iprop->mName;
size_t len = strlen(p);
- char semantic = p[len-1];
+
+ // should ignore a zero length name...
+ if (len == 0) {
+ continue;
+ }
+
+ Prop *oprop = findProp(iprop->mName);
if (oprop == NULL) {
// no oprop, so we insert the new one
oprop = allocateProp(p);
- copyProp(oprop, iprop);
- } else {
- // merge iprop into oprop
- switch (semantic) {
- case '<': // first aka keep old)
- /* nop */
- break;
-
- default: // default is 'last'
- case '>': // last (aka keep new)
- copyProp(oprop, iprop);
- break;
-
- case '+': /* sum */
- // XXX validate numeric types, sum in place
- break;
-
+ if (oprop != NULL) {
+ copyProp(oprop, iprop);
+ } else {
+ ALOGW("dropped property '%s'", iprop->mName);
}
+ } else {
+ copyProp(oprop, iprop);
}
}
diff --git a/media/libmediametrics/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
index dd7452f..5f9b916 100644
--- a/media/libmediametrics/include/MediaAnalyticsItem.h
+++ b/media/libmediametrics/include/MediaAnalyticsItem.h
@@ -243,7 +243,7 @@
enum {
kGrowProps = 10
};
- void growProps(int increment = kGrowProps);
+ bool growProps(int increment = kGrowProps);
size_t findPropIndex(const char *name, size_t len);
Prop *findProp(const char *name);
Prop *allocateProp(const char *name);
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index 246c746..beceed3 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -582,17 +582,14 @@
MediaPlayerService::Client::~Client()
{
ALOGV("Client(%d) destructor pid = %d", mConnId, mPid);
- {
- Mutex::Autolock l(mLock);
- mAudioOutput.clear();
- }
+ mAudioOutput.clear();
wp<Client> client(this);
disconnect();
mService->removeClient(client);
if (mAudioAttributes != NULL) {
free(mAudioAttributes);
}
- clearDeathNotifiers();
+ clearDeathNotifiers_l();
}
void MediaPlayerService::Client::disconnect()
@@ -620,7 +617,10 @@
p->reset();
}
- disconnectNativeWindow();
+ {
+ Mutex::Autolock l(mLock);
+ disconnectNativeWindow_l();
+ }
IPCThreadState::self()->flushCommands();
}
@@ -697,7 +697,7 @@
}
}
-void MediaPlayerService::Client::clearDeathNotifiers() {
+void MediaPlayerService::Client::clearDeathNotifiers_l() {
if (mExtractorDeathListener != nullptr) {
mExtractorDeathListener->unlinkToDeath();
mExtractorDeathListener = nullptr;
@@ -712,7 +712,6 @@
player_type playerType)
{
ALOGV("player type = %d", playerType);
- clearDeathNotifiers();
// create the right type of player
sp<MediaPlayerBase> p = createPlayer(playerType);
@@ -726,9 +725,11 @@
ALOGE("extractor service not available");
return NULL;
}
- mExtractorDeathListener = new ServiceDeathNotifier(binder, p, MEDIAEXTRACTOR_PROCESS_DEATH);
- binder->linkToDeath(mExtractorDeathListener);
+ sp<ServiceDeathNotifier> extractorDeathListener =
+ new ServiceDeathNotifier(binder, p, MEDIAEXTRACTOR_PROCESS_DEATH);
+ binder->linkToDeath(extractorDeathListener);
+ sp<ServiceDeathNotifier> codecDeathListener;
if (property_get_bool("persist.media.treble_omx", true)) {
// Treble IOmx
sp<IOmx> omx = IOmx::getService();
@@ -736,8 +737,8 @@
ALOGE("Treble IOmx not available");
return NULL;
}
- mCodecDeathListener = new ServiceDeathNotifier(omx, p, MEDIACODEC_PROCESS_DEATH);
- omx->linkToDeath(mCodecDeathListener, 0);
+ codecDeathListener = new ServiceDeathNotifier(omx, p, MEDIACODEC_PROCESS_DEATH);
+ omx->linkToDeath(codecDeathListener, 0);
} else {
// Legacy IOMX
binder = sm->getService(String16("media.codec"));
@@ -745,12 +746,17 @@
ALOGE("codec service not available");
return NULL;
}
- mCodecDeathListener = new ServiceDeathNotifier(binder, p, MEDIACODEC_PROCESS_DEATH);
- binder->linkToDeath(mCodecDeathListener);
+ codecDeathListener = new ServiceDeathNotifier(binder, p, MEDIACODEC_PROCESS_DEATH);
+ binder->linkToDeath(codecDeathListener);
}
+ Mutex::Autolock lock(mLock);
+
+ clearDeathNotifiers_l();
+ mExtractorDeathListener = extractorDeathListener;
+ mCodecDeathListener = codecDeathListener;
+
if (!p->hardwareOutput()) {
- Mutex::Autolock l(mLock);
mAudioOutput = new AudioOutput(mAudioSessionId, IPCThreadState::self()->getCallingUid(),
mPid, mAudioAttributes);
static_cast<MediaPlayerInterface*>(p.get())->setAudioSink(mAudioOutput);
@@ -759,29 +765,29 @@
return p;
}
-void MediaPlayerService::Client::setDataSource_post(
+status_t MediaPlayerService::Client::setDataSource_post(
const sp<MediaPlayerBase>& p,
status_t status)
{
ALOGV(" setDataSource");
- mStatus = status;
- if (mStatus != OK) {
- ALOGE(" error: %d", mStatus);
- return;
+ if (status != OK) {
+ ALOGE(" error: %d", status);
+ return status;
}
// Set the re-transmission endpoint if one was chosen.
if (mRetransmitEndpointValid) {
- mStatus = p->setRetransmitEndpoint(&mRetransmitEndpoint);
- if (mStatus != NO_ERROR) {
- ALOGE("setRetransmitEndpoint error: %d", mStatus);
+ status = p->setRetransmitEndpoint(&mRetransmitEndpoint);
+ if (status != NO_ERROR) {
+ ALOGE("setRetransmitEndpoint error: %d", status);
}
}
- if (mStatus == OK) {
- Mutex::Autolock l(mLock);
+ if (status == OK) {
+ Mutex::Autolock lock(mLock);
mPlayer = p;
}
+ return status;
}
status_t MediaPlayerService::Client::setDataSource(
@@ -812,9 +818,9 @@
ALOGE("Couldn't open fd for %s", url);
return UNKNOWN_ERROR;
}
- setDataSource(fd, 0, 0x7fffffffffLL); // this sets mStatus
+ status_t status = setDataSource(fd, 0, 0x7fffffffffLL); // this sets mStatus
close(fd);
- return mStatus;
+ return mStatus = status;
} else {
player_type playerType = MediaPlayerFactory::getPlayerType(this, url);
sp<MediaPlayerBase> p = setDataSource_pre(playerType);
@@ -822,8 +828,9 @@
return NO_INIT;
}
- setDataSource_post(p, p->setDataSource(httpService, url, headers));
- return mStatus;
+ return mStatus =
+ setDataSource_post(
+ p, p->setDataSource(httpService, url, headers));
}
}
@@ -863,8 +870,7 @@
}
// now set data source
- setDataSource_post(p, p->setDataSource(fd, offset, length));
- return mStatus;
+ return mStatus = setDataSource_post(p, p->setDataSource(fd, offset, length));
}
status_t MediaPlayerService::Client::setDataSource(
@@ -877,8 +883,7 @@
}
// now set data source
- setDataSource_post(p, p->setDataSource(source));
- return mStatus;
+ return mStatus = setDataSource_post(p, p->setDataSource(source));
}
status_t MediaPlayerService::Client::setDataSource(
@@ -890,11 +895,10 @@
return NO_INIT;
}
// now set data source
- setDataSource_post(p, p->setDataSource(dataSource));
- return mStatus;
+ return mStatus = setDataSource_post(p, p->setDataSource(dataSource));
}
-void MediaPlayerService::Client::disconnectNativeWindow() {
+void MediaPlayerService::Client::disconnectNativeWindow_l() {
if (mConnectedWindow != NULL) {
status_t err = nativeWindowDisconnect(
mConnectedWindow.get(), "disconnectNativeWindow");
@@ -931,7 +935,8 @@
// ANW, which may result in errors.
reset();
- disconnectNativeWindow();
+ Mutex::Autolock lock(mLock);
+ disconnectNativeWindow_l();
return err;
}
@@ -942,14 +947,22 @@
// on the disconnected ANW, which may result in errors.
status_t err = p->setVideoSurfaceTexture(bufferProducer);
- disconnectNativeWindow();
-
- mConnectedWindow = anw;
+ mLock.lock();
+ disconnectNativeWindow_l();
if (err == OK) {
+ mConnectedWindow = anw;
mConnectedWindowBinder = binder;
+ mLock.unlock();
} else {
- disconnectNativeWindow();
+ mLock.unlock();
+ status_t err = nativeWindowDisconnect(
+ anw.get(), "disconnectNativeWindow");
+
+ if (err != OK) {
+ ALOGW("nativeWindowDisconnect returned an error: %s (%d)",
+ strerror(-err), err);
+ }
}
return err;
@@ -1375,9 +1388,11 @@
if (p != 0) return INVALID_OPERATION;
if (NULL != endpoint) {
+ Mutex::Autolock lock(mLock);
mRetransmitEndpoint = *endpoint;
mRetransmitEndpointValid = true;
} else {
+ Mutex::Autolock lock(mLock);
mRetransmitEndpointValid = false;
}
@@ -1395,6 +1410,7 @@
if (p != NULL)
return p->getRetransmitEndpoint(endpoint);
+ Mutex::Autolock lock(mLock);
if (!mRetransmitEndpointValid)
return NO_INIT;
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index f1d43a2..9038e97 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -361,7 +361,7 @@
sp<MediaPlayerBase> setDataSource_pre(player_type playerType);
- void setDataSource_post(const sp<MediaPlayerBase>& p,
+ status_t setDataSource_post(const sp<MediaPlayerBase>& p,
status_t status);
static void notify(void* cookie, int msg,
@@ -403,7 +403,7 @@
wp<MediaPlayerBase> mListener;
};
- void clearDeathNotifiers();
+ void clearDeathNotifiers_l();
friend class MediaPlayerService;
Client( const sp<MediaPlayerService>& service,
@@ -432,7 +432,7 @@
void addNewMetadataUpdate(media::Metadata::Type type);
// Disconnect from the currently connected ANativeWindow.
- void disconnectNativeWindow();
+ void disconnectNativeWindow_l();
status_t setAudioAttributes_l(const Parcel &request);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 6400481..9b9b3bb 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -339,7 +339,7 @@
wp<MediaRecorderClient> client(this);
mMediaPlayerService->removeMediaRecorderClient(client);
}
- clearDeathNotifiers();
+ clearDeathNotifiers_l();
return NO_ERROR;
}
@@ -411,7 +411,7 @@
}
}
-void MediaRecorderClient::clearDeathNotifiers() {
+void MediaRecorderClient::clearDeathNotifiers_l() {
if (mCameraDeathListener != nullptr) {
mCameraDeathListener->unlinkToDeath();
mCameraDeathListener = nullptr;
@@ -425,8 +425,8 @@
status_t MediaRecorderClient::setListener(const sp<IMediaRecorderClient>& listener)
{
ALOGV("setListener");
- clearDeathNotifiers();
Mutex::Autolock lock(mLock);
+ clearDeathNotifiers_l();
if (mRecorder == NULL) {
ALOGE("recorder is not initialized");
return NO_INIT;
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 7868a91..711db2c 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -58,7 +58,7 @@
wp<IMediaRecorderClient> mListener;
};
- void clearDeathNotifiers();
+ void clearDeathNotifiers_l();
public:
virtual status_t setCamera(const sp<hardware::ICamera>& camera,
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.cpp b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
index 3aab9b0..16ed530 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.cpp
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.cpp
@@ -194,6 +194,25 @@
Mutex MetadataRetrieverClient::sLock;
+static sp<IMemory> getThumbnail(VideoFrame* frame) {
+ std::unique_ptr<VideoFrame> frameDeleter(frame);
+
+ size_t size = frame->getFlattenedSize();
+ sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
+ if (heap == NULL) {
+ ALOGE("failed to create MemoryDealer");
+ return NULL;
+ }
+ sp<IMemory> thrumbnail = new MemoryBase(heap, 0, size);
+ if (thrumbnail == NULL) {
+ ALOGE("not enough memory for VideoFrame size=%zu", size);
+ return NULL;
+ }
+ VideoFrame *frameCopy = static_cast<VideoFrame *>(thrumbnail->pointer());
+ frameCopy->copyFlattened(*frame);
+ return thrumbnail;
+}
+
sp<IMemory> MetadataRetrieverClient::getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly)
{
@@ -206,29 +225,55 @@
ALOGE("retriever is not initialized");
return NULL;
}
- VideoFrame *frame = mRetriever->getFrameAtTime(
- timeUs, option, colorFormat, metaOnly);
+ VideoFrame *frame = mRetriever->getFrameAtTime(timeUs, option, colorFormat, metaOnly);
if (frame == NULL) {
ALOGE("failed to capture a video frame");
return NULL;
}
- size_t size = frame->getFlattenedSize();
- sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
- if (heap == NULL) {
- ALOGE("failed to create MemoryDealer");
- delete frame;
+ return getThumbnail(frame);
+}
+
+sp<IMemory> MetadataRetrieverClient::getImageAtIndex(
+ int index, int colorFormat, bool metaOnly) {
+ ALOGV("getFrameAtTime: index(%d) colorFormat(%d), metaOnly(%d)",
+ index, colorFormat, metaOnly);
+ Mutex::Autolock lock(mLock);
+ Mutex::Autolock glock(sLock);
+ mThumbnail.clear();
+ if (mRetriever == NULL) {
+ ALOGE("retriever is not initialized");
return NULL;
}
- mThumbnail = new MemoryBase(heap, 0, size);
- if (mThumbnail == NULL) {
- ALOGE("not enough memory for VideoFrame size=%zu", size);
- delete frame;
+ VideoFrame *frame = mRetriever->getImageAtIndex(index, colorFormat, metaOnly);
+ if (frame == NULL) {
+ ALOGE("failed to extract image");
return NULL;
}
- VideoFrame *frameCopy = static_cast<VideoFrame *>(mThumbnail->pointer());
- frameCopy->copyFlattened(*frame);
- delete frame; // Fix memory leakage
- return mThumbnail;
+ return getThumbnail(frame);
+}
+
+status_t MetadataRetrieverClient::getFrameAtIndex(
+ std::vector<sp<IMemory> > *frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
+ ALOGV("getFrameAtIndex: frameIndex(%d), numFrames(%d), colorFormat(%d), metaOnly(%d)",
+ frameIndex, numFrames, colorFormat, metaOnly);
+ Mutex::Autolock lock(mLock);
+ Mutex::Autolock glock(sLock);
+ if (mRetriever == NULL) {
+ ALOGE("retriever is not initialized");
+ return INVALID_OPERATION;
+ }
+
+ std::vector<VideoFrame*> videoFrames;
+ status_t err = mRetriever->getFrameAtIndex(
+ &videoFrames, frameIndex, numFrames, colorFormat, metaOnly);
+ if (err != OK) {
+ return err;
+ }
+ for (size_t i = 0; i < videoFrames.size(); i++) {
+ frames->push_back(getThumbnail(videoFrames[i]));
+ }
+ return OK;
}
sp<IMemory> MetadataRetrieverClient::extractAlbumArt()
diff --git a/media/libmediaplayerservice/MetadataRetrieverClient.h b/media/libmediaplayerservice/MetadataRetrieverClient.h
index c78cd4b..f71891a 100644
--- a/media/libmediaplayerservice/MetadataRetrieverClient.h
+++ b/media/libmediaplayerservice/MetadataRetrieverClient.h
@@ -52,6 +52,11 @@
virtual status_t setDataSource(const sp<IDataSource>& source, const char *mime);
virtual sp<IMemory> getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly);
+ virtual sp<IMemory> getImageAtIndex(
+ int index, int colorFormat, bool metaOnly);
+ virtual status_t getFrameAtIndex(
+ std::vector<sp<IMemory> > *frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly);
virtual sp<IMemory> extractAlbumArt();
virtual const char* extractMetadata(int keyCode);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 4729d59..94e3395 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -49,9 +49,6 @@
static const int kHighWaterMarkMs = 5000; // 5secs
static const int kHighWaterMarkRebufferMs = 15000; // 15secs
-static const int kLowWaterMarkKB = 40;
-static const int kHighWaterMarkKB = 200;
-
NuPlayer::GenericSource::GenericSource(
const sp<AMessage> ¬ify,
bool uidValid,
@@ -62,6 +59,11 @@
mAudioLastDequeueTimeUs(0),
mVideoTimeUs(0),
mVideoLastDequeueTimeUs(0),
+ mPrevBufferPercentage(-1),
+ mPollBufferingGeneration(0),
+ mSentPauseOnBuffering(false),
+ mAudioDataGeneration(0),
+ mVideoDataGeneration(0),
mFetchSubtitleDataGeneration(0),
mFetchTimedTextDataGeneration(0),
mDurationUs(-1ll),
@@ -77,7 +79,7 @@
ALOGV("GenericSource");
CHECK(mediaClock != NULL);
- mBufferingMonitor = new BufferingMonitor(notify);
+ getDefaultBufferingSettings(&mBufferingSettings);
resetDataSource();
}
@@ -95,14 +97,7 @@
mOffset = 0;
mLength = 0;
mStarted = false;
- mStopRead = true;
-
- if (mBufferingMonitorLooper != NULL) {
- mBufferingMonitorLooper->unregisterHandler(mBufferingMonitor->id());
- mBufferingMonitorLooper->stop();
- mBufferingMonitorLooper = NULL;
- }
- mBufferingMonitor->stop();
+ mPreparing = false;
mIsDrmProtected = false;
mIsDrmReleased = false;
@@ -114,6 +109,7 @@
const sp<IMediaHTTPService> &httpService,
const char *url,
const KeyedVector<String8, String8> *headers) {
+ Mutex::Autolock _l(mLock);
ALOGV("setDataSource url: %s", url);
resetDataSource();
@@ -132,6 +128,7 @@
status_t NuPlayer::GenericSource::setDataSource(
int fd, int64_t offset, int64_t length) {
+ Mutex::Autolock _l(mLock);
ALOGV("setDataSource %d/%lld/%lld", fd, (long long)offset, (long long)length);
resetDataSource();
@@ -146,6 +143,7 @@
}
status_t NuPlayer::GenericSource::setDataSource(const sp<DataSource>& source) {
+ Mutex::Autolock _l(mLock);
ALOGV("setDataSource (source: %p)", source.get());
resetDataSource();
@@ -154,6 +152,7 @@
}
sp<MetaData> NuPlayer::GenericSource::getFileFormatMeta() const {
+ Mutex::Autolock _l(mLock);
return mFileMeta;
}
@@ -268,12 +267,36 @@
status_t NuPlayer::GenericSource::getDefaultBufferingSettings(
BufferingSettings* buffering /* nonnull */) {
- mBufferingMonitor->getDefaultBufferingSettings(buffering);
+ buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
+ buffering->mRebufferingMode = BUFFERING_MODE_TIME_ONLY;
+ buffering->mInitialWatermarkMs = kHighWaterMarkMs;
+ buffering->mRebufferingWatermarkLowMs = kLowWaterMarkMs;
+ buffering->mRebufferingWatermarkHighMs = kHighWaterMarkRebufferMs;
+
+ ALOGV("getDefaultBufferingSettings{%s}", buffering->toString().string());
return OK;
}
status_t NuPlayer::GenericSource::setBufferingSettings(const BufferingSettings& buffering) {
- return mBufferingMonitor->setBufferingSettings(buffering);
+ ALOGV("setBufferingSettings{%s}", buffering.toString().string());
+
+ if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
+ || buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
+ || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
+ && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)) {
+ return BAD_VALUE;
+ }
+
+ Mutex::Autolock _l(mLock);
+ mBufferingSettings = buffering;
+ if (mBufferingSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
+ mBufferingSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
+ }
+ if (mBufferingSettings.mRebufferingMode == BUFFERING_MODE_NONE) {
+ mBufferingSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
+ mBufferingSettings.mRebufferingWatermarkHighMs = INT32_MAX;
+ }
+ return OK;
}
status_t NuPlayer::GenericSource::startSources() {
@@ -309,6 +332,7 @@
status_t NuPlayer::GenericSource::setBuffers(
bool audio, Vector<MediaBuffer *> &buffers) {
+ Mutex::Autolock _l(mLock);
if (mIsSecure && !audio && mVideoTrack.mSource != NULL) {
return mVideoTrack.mSource->setBuffers(buffers);
}
@@ -316,13 +340,10 @@
}
bool NuPlayer::GenericSource::isStreaming() const {
+ Mutex::Autolock _l(mLock);
return mIsStreaming;
}
-void NuPlayer::GenericSource::setOffloadAudio(bool offload) {
- mBufferingMonitor->setOffloadAudio(offload);
-}
-
NuPlayer::GenericSource::~GenericSource() {
ALOGV("~GenericSource");
if (mLooper != NULL) {
@@ -333,6 +354,7 @@
}
void NuPlayer::GenericSource::prepareAsync() {
+ Mutex::Autolock _l(mLock);
ALOGV("prepareAsync: (looper: %d)", (mLooper != NULL));
if (mLooper == NULL) {
@@ -434,7 +456,7 @@
}
if (mVideoTrack.mSource != NULL) {
- sp<MetaData> meta = doGetFormatMeta(false /* audio */);
+ sp<MetaData> meta = getFormatMeta_l(false /* audio */);
sp<AMessage> msg = new AMessage;
err = convertMetaDataToMessage(meta, &msg);
if(err != OK) {
@@ -468,47 +490,39 @@
}
if (mIsStreaming) {
- if (mBufferingMonitorLooper == NULL) {
- mBufferingMonitor->prepare(mCachedSource, mDurationUs, mBitrate,
- mIsStreaming);
-
- mBufferingMonitorLooper = new ALooper;
- mBufferingMonitorLooper->setName("GSBMonitor");
- mBufferingMonitorLooper->start();
- mBufferingMonitorLooper->registerHandler(mBufferingMonitor);
- }
-
- mBufferingMonitor->ensureCacheIsFetching();
- mBufferingMonitor->restartPollBuffering();
+ mCachedSource->resumeFetchingIfNecessary();
+ mPreparing = true;
+ schedulePollBuffering();
} else {
notifyPrepared();
}
+
+ if (mAudioTrack.mSource != NULL) {
+ postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
+ }
+
+ if (mVideoTrack.mSource != NULL) {
+ postReadBuffer(MEDIA_TRACK_TYPE_VIDEO);
+ }
}
void NuPlayer::GenericSource::notifyPreparedAndCleanup(status_t err) {
if (err != OK) {
- {
- sp<DataSource> dataSource = mDataSource;
- sp<NuCachedSource2> cachedSource = mCachedSource;
- sp<DataSource> httpSource = mHttpSource;
- {
- Mutex::Autolock _l(mDisconnectLock);
- mDataSource.clear();
- mCachedSource.clear();
- mHttpSource.clear();
- }
- }
- mBitrate = -1;
+ mDataSource.clear();
+ mCachedSource.clear();
+ mHttpSource.clear();
- mBufferingMonitor->cancelPollBuffering();
+ mBitrate = -1;
+ mPrevBufferPercentage = -1;
+ ++mPollBufferingGeneration;
}
notifyPrepared(err);
}
void NuPlayer::GenericSource::start() {
+ Mutex::Autolock _l(mLock);
ALOGI("start");
- mStopRead = false;
if (mAudioTrack.mSource != NULL) {
postReadBuffer(MEDIA_TRACK_TYPE_AUDIO);
}
@@ -518,28 +532,27 @@
}
mStarted = true;
-
- (new AMessage(kWhatStart, this))->post();
}
void NuPlayer::GenericSource::stop() {
+ Mutex::Autolock _l(mLock);
mStarted = false;
}
void NuPlayer::GenericSource::pause() {
+ Mutex::Autolock _l(mLock);
mStarted = false;
}
void NuPlayer::GenericSource::resume() {
+ Mutex::Autolock _l(mLock);
mStarted = true;
-
- (new AMessage(kWhatResume, this))->post();
}
void NuPlayer::GenericSource::disconnect() {
sp<DataSource> dataSource, httpSource;
{
- Mutex::Autolock _l(mDisconnectLock);
+ Mutex::Autolock _l(mLock);
dataSource = mDataSource;
httpSource = mHttpSource;
}
@@ -558,7 +571,24 @@
return OK;
}
+void NuPlayer::GenericSource::sendCacheStats() {
+ int32_t kbps = 0;
+ status_t err = UNKNOWN_ERROR;
+
+ if (mCachedSource != NULL) {
+ err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
+ }
+
+ if (err == OK) {
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatCacheStats);
+ notify->setInt32("bandwidth", kbps);
+ notify->post();
+ }
+}
+
void NuPlayer::GenericSource::onMessageReceived(const sp<AMessage> &msg) {
+ Mutex::Autolock _l(mLock);
switch (msg->what()) {
case kWhatPrepareAsync:
{
@@ -627,6 +657,8 @@
track->mSource = source;
track->mSource->start();
track->mIndex = trackIndex;
+ ++mAudioDataGeneration;
+ ++mVideoDataGeneration;
int64_t timeUs, actualTimeUs;
const bool formatChange = true;
@@ -644,68 +676,19 @@
break;
}
- case kWhatStart:
- case kWhatResume:
- {
- mBufferingMonitor->restartPollBuffering();
- break;
- }
-
- case kWhatGetFormat:
- {
- onGetFormatMeta(msg);
- break;
- }
-
- case kWhatGetSelectedTrack:
- {
- onGetSelectedTrack(msg);
- break;
- }
-
- case kWhatGetTrackInfo:
- {
- onGetTrackInfo(msg);
- break;
- }
-
- case kWhatSelectTrack:
- {
- onSelectTrack(msg);
- break;
- }
-
- case kWhatSeek:
- {
- onSeek(msg);
- break;
- }
-
case kWhatReadBuffer:
{
onReadBuffer(msg);
break;
}
- case kWhatPrepareDrm:
+ case kWhatPollBuffering:
{
- status_t status = onPrepareDrm(msg);
- sp<AMessage> response = new AMessage;
- response->setInt32("status", status);
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
- break;
- }
-
- case kWhatReleaseDrm:
- {
- status_t status = onReleaseDrm();
- sp<AMessage> response = new AMessage;
- response->setInt32("status", status);
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
+ int32_t generation;
+ CHECK(msg->findInt32("generation", &generation));
+ if (generation == mPollBufferingGeneration) {
+ onPollBuffering();
+ }
break;
}
@@ -820,34 +803,11 @@
}
sp<MetaData> NuPlayer::GenericSource::getFormatMeta(bool audio) {
- sp<AMessage> msg = new AMessage(kWhatGetFormat, this);
- msg->setInt32("audio", audio);
-
- sp<AMessage> response;
- sp<RefBase> format;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findObject("format", &format));
- return static_cast<MetaData*>(format.get());
- } else {
- return NULL;
- }
+ Mutex::Autolock _l(mLock);
+ return getFormatMeta_l(audio);
}
-void NuPlayer::GenericSource::onGetFormatMeta(const sp<AMessage>& msg) const {
- int32_t audio;
- CHECK(msg->findInt32("audio", &audio));
-
- sp<AMessage> response = new AMessage;
- sp<MetaData> format = doGetFormatMeta(audio);
- response->setObject("format", format);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-}
-
-sp<MetaData> NuPlayer::GenericSource::doGetFormatMeta(bool audio) const {
+sp<MetaData> NuPlayer::GenericSource::getFormatMeta_l(bool audio) {
sp<IMediaSource> source = audio ? mAudioTrack.mSource : mVideoTrack.mSource;
if (source == NULL) {
@@ -859,6 +819,7 @@
status_t NuPlayer::GenericSource::dequeueAccessUnit(
bool audio, sp<ABuffer> *accessUnit) {
+ Mutex::Autolock _l(mLock);
// If has gone through stop/releaseDrm sequence, we no longer send down any buffer b/c
// the codec's crypto object has gone away (b/37960096).
// Note: This will be unnecessary when stop() changes behavior and releases codec (b/35248283).
@@ -884,10 +845,30 @@
status_t result = track->mPackets->dequeueAccessUnit(accessUnit);
- // start pulling in more buffers if we only have one (or no) buffer left
+ // start pulling in more buffers if cache is running low
// so that decoder has less chance of being starved
- if (track->mPackets->getAvailableBufferCount(&finalResult) < 2) {
- postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
+ if (!mIsStreaming) {
+ if (track->mPackets->getAvailableBufferCount(&finalResult) < 2) {
+ postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
+ }
+ } else {
+ int64_t durationUs = track->mPackets->getBufferedDurationUs(&finalResult);
+ int64_t restartBufferingMarkUs =
+ mBufferingSettings.mRebufferingWatermarkHighMs * 1000ll / 2;
+ if (finalResult == OK) {
+ if (durationUs < restartBufferingMarkUs) {
+ postReadBuffer(audio? MEDIA_TRACK_TYPE_AUDIO : MEDIA_TRACK_TYPE_VIDEO);
+ }
+ if (track->mPackets->getAvailableBufferCount(&finalResult) < 2
+ && !mSentPauseOnBuffering && !mPreparing) {
+ mCachedSource->resumeFetchingIfNecessary();
+ sendCacheStats();
+ mSentPauseOnBuffering = true;
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatPauseOnBufferingStart);
+ notify->post();
+ }
+ }
}
if (result != OK) {
@@ -907,7 +888,6 @@
CHECK((*accessUnit)->meta()->findInt64("timeUs", &timeUs));
if (audio) {
mAudioLastDequeueTimeUs = timeUs;
- mBufferingMonitor->updateDequeuedBufferTime(timeUs);
} else {
mVideoLastDequeueTimeUs = timeUs;
}
@@ -932,43 +912,18 @@
}
status_t NuPlayer::GenericSource::getDuration(int64_t *durationUs) {
+ Mutex::Autolock _l(mLock);
*durationUs = mDurationUs;
return OK;
}
size_t NuPlayer::GenericSource::getTrackCount() const {
+ Mutex::Autolock _l(mLock);
return mSources.size();
}
sp<AMessage> NuPlayer::GenericSource::getTrackInfo(size_t trackIndex) const {
- sp<AMessage> msg = new AMessage(kWhatGetTrackInfo, this);
- msg->setSize("trackIndex", trackIndex);
-
- sp<AMessage> response;
- sp<RefBase> format;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findObject("format", &format));
- return static_cast<AMessage*>(format.get());
- } else {
- return NULL;
- }
-}
-
-void NuPlayer::GenericSource::onGetTrackInfo(const sp<AMessage>& msg) const {
- size_t trackIndex;
- CHECK(msg->findSize("trackIndex", &trackIndex));
-
- sp<AMessage> response = new AMessage;
- sp<AMessage> format = doGetTrackInfo(trackIndex);
- response->setObject("format", format);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-}
-
-sp<AMessage> NuPlayer::GenericSource::doGetTrackInfo(size_t trackIndex) const {
+ Mutex::Autolock _l(mLock);
size_t trackCount = mSources.size();
if (trackIndex >= trackCount) {
return NULL;
@@ -1018,35 +973,7 @@
}
ssize_t NuPlayer::GenericSource::getSelectedTrack(media_track_type type) const {
- sp<AMessage> msg = new AMessage(kWhatGetSelectedTrack, this);
- msg->setInt32("type", type);
-
- sp<AMessage> response;
- int32_t index;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("index", &index));
- return index;
- } else {
- return -1;
- }
-}
-
-void NuPlayer::GenericSource::onGetSelectedTrack(const sp<AMessage>& msg) const {
- int32_t tmpType;
- CHECK(msg->findInt32("type", &tmpType));
- media_track_type type = (media_track_type)tmpType;
-
- sp<AMessage> response = new AMessage;
- ssize_t index = doGetSelectedTrack(type);
- response->setInt32("index", index);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-}
-
-ssize_t NuPlayer::GenericSource::doGetSelectedTrack(media_track_type type) const {
+ Mutex::Autolock _l(mLock);
const Track *track = NULL;
switch (type) {
case MEDIA_TRACK_TYPE_VIDEO:
@@ -1073,38 +1000,9 @@
}
status_t NuPlayer::GenericSource::selectTrack(size_t trackIndex, bool select, int64_t timeUs) {
+ Mutex::Autolock _l(mLock);
ALOGV("%s track: %zu", select ? "select" : "deselect", trackIndex);
- sp<AMessage> msg = new AMessage(kWhatSelectTrack, this);
- msg->setInt32("trackIndex", trackIndex);
- msg->setInt32("select", select);
- msg->setInt64("timeUs", timeUs);
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
-
- return err;
-}
-
-void NuPlayer::GenericSource::onSelectTrack(const sp<AMessage>& msg) {
- int32_t trackIndex, select;
- int64_t timeUs;
- CHECK(msg->findInt32("trackIndex", &trackIndex));
- CHECK(msg->findInt32("select", &select));
- CHECK(msg->findInt64("timeUs", &timeUs));
-
- sp<AMessage> response = new AMessage;
- status_t err = doSelectTrack(trackIndex, select, timeUs);
- response->setInt32("err", err);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-}
-
-status_t NuPlayer::GenericSource::doSelectTrack(size_t trackIndex, bool select, int64_t timeUs) {
if (trackIndex >= mSources.size()) {
return BAD_INDEX;
}
@@ -1196,46 +1094,11 @@
}
status_t NuPlayer::GenericSource::seekTo(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
- sp<AMessage> msg = new AMessage(kWhatSeek, this);
- msg->setInt64("seekTimeUs", seekTimeUs);
- msg->setInt32("mode", mode);
-
- sp<AMessage> response;
- status_t err = msg->postAndAwaitResponse(&response);
- if (err == OK && response != NULL) {
- CHECK(response->findInt32("err", &err));
- }
-
- return err;
-}
-
-void NuPlayer::GenericSource::onSeek(const sp<AMessage>& msg) {
- int64_t seekTimeUs;
- int32_t mode;
- CHECK(msg->findInt64("seekTimeUs", &seekTimeUs));
- CHECK(msg->findInt32("mode", &mode));
-
- sp<AMessage> response = new AMessage;
- status_t err = doSeek(seekTimeUs, (MediaPlayerSeekMode)mode);
- response->setInt32("err", err);
-
- sp<AReplyToken> replyID;
- CHECK(msg->senderAwaitsResponse(&replyID));
- response->postReply(replyID);
-}
-
-status_t NuPlayer::GenericSource::doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode) {
- mBufferingMonitor->updateDequeuedBufferTime(-1ll);
-
- // If the Widevine source is stopped, do not attempt to read any
- // more buffers.
- //
- // TODO: revisit after widevine is removed. May be able to
- // combine mStopRead with mStarted.
- if (mStopRead) {
- return INVALID_OPERATION;
- }
+ Mutex::Autolock _l(mLock);
+ ALOGV("seekTo: %lld, %d", (long long)seekTimeUs, mode);
if (mVideoTrack.mSource != NULL) {
+ ++mVideoDataGeneration;
+
int64_t actualTimeUs;
readBuffer(MEDIA_TRACK_TYPE_VIDEO, seekTimeUs, mode, &actualTimeUs);
@@ -1246,6 +1109,7 @@
}
if (mAudioTrack.mSource != NULL) {
+ ++mAudioDataGeneration;
readBuffer(MEDIA_TRACK_TYPE_AUDIO, seekTimeUs, MediaPlayerSeekMode::SEEK_CLOSEST);
mAudioLastDequeueTimeUs = seekTimeUs;
}
@@ -1260,12 +1124,8 @@
mFetchTimedTextDataGeneration++;
}
- // If currently buffering, post kWhatBufferingEnd first, so that
- // NuPlayer resumes. Otherwise, if cache hits high watermark
- // before new polling happens, no one will resume the playback.
- mBufferingMonitor->stopBufferingIfNecessary();
- mBufferingMonitor->restartPollBuffering();
-
+ ++mPollBufferingGeneration;
+ schedulePollBuffering();
return OK;
}
@@ -1368,9 +1228,29 @@
return ab;
}
-void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) {
- Mutex::Autolock _l(mReadBufferLock);
+int32_t NuPlayer::GenericSource::getDataGeneration(media_track_type type) const {
+ int32_t generation = -1;
+ switch (type) {
+ case MEDIA_TRACK_TYPE_VIDEO:
+ generation = mVideoDataGeneration;
+ break;
+ case MEDIA_TRACK_TYPE_AUDIO:
+ generation = mAudioDataGeneration;
+ break;
+ case MEDIA_TRACK_TYPE_TIMEDTEXT:
+ generation = mFetchTimedTextDataGeneration;
+ break;
+ case MEDIA_TRACK_TYPE_SUBTITLE:
+ generation = mFetchSubtitleDataGeneration;
+ break;
+ default:
+ break;
+ }
+ return generation;
+}
+
+void NuPlayer::GenericSource::postReadBuffer(media_track_type trackType) {
if ((mPendingReadBufferTypes & (1 << trackType)) == 0) {
mPendingReadBufferTypes |= (1 << trackType);
sp<AMessage> msg = new AMessage(kWhatReadBuffer, this);
@@ -1383,25 +1263,13 @@
int32_t tmpType;
CHECK(msg->findInt32("trackType", &tmpType));
media_track_type trackType = (media_track_type)tmpType;
+ mPendingReadBufferTypes &= ~(1 << trackType);
readBuffer(trackType);
- {
- // only protect the variable change, as readBuffer may
- // take considerable time.
- Mutex::Autolock _l(mReadBufferLock);
- mPendingReadBufferTypes &= ~(1 << trackType);
- }
}
void NuPlayer::GenericSource::readBuffer(
media_track_type trackType, int64_t seekTimeUs, MediaPlayerSeekMode mode,
int64_t *actualTimeUs, bool formatChange) {
- // Do not read data if Widevine source is stopped
- //
- // TODO: revisit after widevine is removed. May be able to
- // combine mStopRead with mStarted.
- if (mStopRead) {
- return;
- }
Track *track;
size_t maxBuffers = 1;
switch (trackType) {
@@ -1445,10 +1313,12 @@
options.setNonBlocking();
}
+ int32_t generation = getDataGeneration(trackType);
for (size_t numBuffers = 0; numBuffers < maxBuffers; ) {
Vector<MediaBuffer *> mediaBuffers;
status_t err = NO_ERROR;
+ mLock.unlock();
if (couldReadMultiple) {
err = track->mSource->readMultiple(
&mediaBuffers, maxBuffers - numBuffers, &options);
@@ -1459,11 +1329,21 @@
mediaBuffers.push_back(mbuf);
}
}
+ mLock.lock();
options.clearNonPersistent();
size_t id = 0;
size_t count = mediaBuffers.size();
+
+ // in case track has been changed since we don't have lock for some time.
+ if (generation != getDataGeneration(trackType)) {
+ for (; id < count; ++id) {
+ mediaBuffers[id]->release();
+ }
+ break;
+ }
+
for (; id < count; ++id) {
int64_t timeUs;
MediaBuffer *mbuf = mediaBuffers[id];
@@ -1474,10 +1354,8 @@
}
if (trackType == MEDIA_TRACK_TYPE_AUDIO) {
mAudioTimeUs = timeUs;
- mBufferingMonitor->updateQueuedTime(true /* isAudio */, timeUs);
} else if (trackType == MEDIA_TRACK_TYPE_VIDEO) {
mVideoTimeUs = timeUs;
- mBufferingMonitor->updateQueuedTime(false /* isAudio */, timeUs);
}
queueDiscontinuityIfNeeded(seeking, formatChange, trackType, track);
@@ -1524,6 +1402,39 @@
break;
}
}
+
+ if (mIsStreaming
+ && (trackType == MEDIA_TRACK_TYPE_VIDEO || trackType == MEDIA_TRACK_TYPE_AUDIO)) {
+ status_t finalResult;
+ int64_t durationUs = track->mPackets->getBufferedDurationUs(&finalResult);
+
+ int64_t markUs = (mPreparing ? mBufferingSettings.mInitialWatermarkMs
+ : mBufferingSettings.mRebufferingWatermarkHighMs) * 1000ll;
+ if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
+ if (mPreparing || mSentPauseOnBuffering) {
+ Track *counterTrack =
+ (trackType == MEDIA_TRACK_TYPE_VIDEO ? &mAudioTrack : &mVideoTrack);
+ if (counterTrack->mSource != NULL) {
+ durationUs = counterTrack->mPackets->getBufferedDurationUs(&finalResult);
+ }
+ if (finalResult == ERROR_END_OF_STREAM || durationUs >= markUs) {
+ if (mPreparing) {
+ notifyPrepared();
+ mPreparing = false;
+ } else {
+ sendCacheStats();
+ mSentPauseOnBuffering = false;
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatResumeOnBufferingEnd);
+ notify->post();
+ }
+ }
+ }
+ return;
+ }
+
+ postReadBuffer(trackType);
+ }
}
void NuPlayer::GenericSource::queueDiscontinuityIfNeeded(
@@ -1541,160 +1452,7 @@
}
}
-NuPlayer::GenericSource::BufferingMonitor::BufferingMonitor(const sp<AMessage> ¬ify)
- : mNotify(notify),
- mDurationUs(-1ll),
- mBitrate(-1ll),
- mIsStreaming(false),
- mAudioTimeUs(0),
- mVideoTimeUs(0),
- mPollBufferingGeneration(0),
- mPrepareBuffering(false),
- mBuffering(false),
- mPrevBufferPercentage(-1),
- mOffloadAudio(false),
- mFirstDequeuedBufferRealUs(-1ll),
- mFirstDequeuedBufferMediaUs(-1ll),
- mlastDequeuedBufferMediaUs(-1ll) {
- getDefaultBufferingSettings(&mSettings);
-}
-
-NuPlayer::GenericSource::BufferingMonitor::~BufferingMonitor() {
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::getDefaultBufferingSettings(
- BufferingSettings *buffering /* nonnull */) {
- buffering->mInitialBufferingMode = BUFFERING_MODE_TIME_ONLY;
- buffering->mRebufferingMode = BUFFERING_MODE_TIME_THEN_SIZE;
- buffering->mInitialWatermarkMs = kHighWaterMarkMs;
- buffering->mRebufferingWatermarkLowMs = kLowWaterMarkMs;
- buffering->mRebufferingWatermarkHighMs = kHighWaterMarkRebufferMs;
- buffering->mRebufferingWatermarkLowKB = kLowWaterMarkKB;
- buffering->mRebufferingWatermarkHighKB = kHighWaterMarkKB;
-
- ALOGV("BufferingMonitor::getDefaultBufferingSettings{%s}",
- buffering->toString().string());
-}
-
-status_t NuPlayer::GenericSource::BufferingMonitor::setBufferingSettings(
- const BufferingSettings &buffering) {
- ALOGV("BufferingMonitor::setBufferingSettings{%s}",
- buffering.toString().string());
-
- Mutex::Autolock _l(mLock);
- if (buffering.IsSizeBasedBufferingMode(buffering.mInitialBufferingMode)
- || (buffering.IsTimeBasedBufferingMode(buffering.mRebufferingMode)
- && buffering.mRebufferingWatermarkLowMs > buffering.mRebufferingWatermarkHighMs)
- || (buffering.IsSizeBasedBufferingMode(buffering.mRebufferingMode)
- && buffering.mRebufferingWatermarkLowKB > buffering.mRebufferingWatermarkHighKB)) {
- return BAD_VALUE;
- }
- mSettings = buffering;
- if (mSettings.mInitialBufferingMode == BUFFERING_MODE_NONE) {
- mSettings.mInitialWatermarkMs = BufferingSettings::kNoWatermark;
- }
- if (!mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
- mSettings.mRebufferingWatermarkLowMs = BufferingSettings::kNoWatermark;
- mSettings.mRebufferingWatermarkHighMs = INT32_MAX;
- }
- if (!mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
- mSettings.mRebufferingWatermarkLowKB = BufferingSettings::kNoWatermark;
- mSettings.mRebufferingWatermarkHighKB = INT32_MAX;
- }
- return OK;
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::prepare(
- const sp<NuCachedSource2> &cachedSource,
- int64_t durationUs,
- int64_t bitrate,
- bool isStreaming) {
- Mutex::Autolock _l(mLock);
- prepare_l(cachedSource, durationUs, bitrate, isStreaming);
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::stop() {
- Mutex::Autolock _l(mLock);
- prepare_l(NULL /* cachedSource */, -1 /* durationUs */,
- -1 /* bitrate */, false /* isStreaming */);
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::cancelPollBuffering() {
- Mutex::Autolock _l(mLock);
- cancelPollBuffering_l();
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::restartPollBuffering() {
- Mutex::Autolock _l(mLock);
- if (mIsStreaming) {
- cancelPollBuffering_l();
- onPollBuffering_l();
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::stopBufferingIfNecessary() {
- Mutex::Autolock _l(mLock);
- stopBufferingIfNecessary_l();
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::ensureCacheIsFetching() {
- Mutex::Autolock _l(mLock);
- ensureCacheIsFetching_l();
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::updateQueuedTime(bool isAudio, int64_t timeUs) {
- Mutex::Autolock _l(mLock);
- if (isAudio) {
- mAudioTimeUs = timeUs;
- } else {
- mVideoTimeUs = timeUs;
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::setOffloadAudio(bool offload) {
- Mutex::Autolock _l(mLock);
- mOffloadAudio = offload;
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::updateDequeuedBufferTime(int64_t mediaUs) {
- Mutex::Autolock _l(mLock);
- if (mediaUs < 0) {
- mFirstDequeuedBufferRealUs = -1ll;
- mFirstDequeuedBufferMediaUs = -1ll;
- } else if (mFirstDequeuedBufferRealUs < 0) {
- mFirstDequeuedBufferRealUs = ALooper::GetNowUs();
- mFirstDequeuedBufferMediaUs = mediaUs;
- }
- mlastDequeuedBufferMediaUs = mediaUs;
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::prepare_l(
- const sp<NuCachedSource2> &cachedSource,
- int64_t durationUs,
- int64_t bitrate,
- bool isStreaming) {
-
- mCachedSource = cachedSource;
- mDurationUs = durationUs;
- mBitrate = bitrate;
- mIsStreaming = isStreaming;
- mAudioTimeUs = 0;
- mVideoTimeUs = 0;
- mPrepareBuffering = (cachedSource != NULL);
- cancelPollBuffering_l();
- mOffloadAudio = false;
- mFirstDequeuedBufferRealUs = -1ll;
- mFirstDequeuedBufferMediaUs = -1ll;
- mlastDequeuedBufferMediaUs = -1ll;
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::cancelPollBuffering_l() {
- mBuffering = false;
- ++mPollBufferingGeneration;
- mPrevBufferPercentage = -1;
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::notifyBufferingUpdate_l(int32_t percentage) {
+void NuPlayer::GenericSource::notifyBufferingUpdate(int32_t percentage) {
// Buffering percent could go backward as it's estimated from remaining
// data and last access time. This could cause the buffering position
// drawn on media control to jitter slightly. Remember previously reported
@@ -1707,106 +1465,28 @@
mPrevBufferPercentage = percentage;
- ALOGV("notifyBufferingUpdate_l: buffering %d%%", percentage);
+ ALOGV("notifyBufferingUpdate: buffering %d%%", percentage);
- sp<AMessage> msg = mNotify->dup();
- msg->setInt32("what", kWhatBufferingUpdate);
- msg->setInt32("percentage", percentage);
- msg->post();
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatBufferingUpdate);
+ notify->setInt32("percentage", percentage);
+ notify->post();
}
-void NuPlayer::GenericSource::BufferingMonitor::startBufferingIfNecessary_l() {
- if (mPrepareBuffering) {
- return;
- }
-
- if (!mBuffering) {
- ALOGD("startBufferingIfNecessary_l");
-
- mBuffering = true;
-
- ensureCacheIsFetching_l();
- sendCacheStats_l();
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatPauseOnBufferingStart);
- notify->post();
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::stopBufferingIfNecessary_l() {
- if (mPrepareBuffering) {
- ALOGD("stopBufferingIfNecessary_l, mBuffering=%d", mBuffering);
-
- mPrepareBuffering = false;
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatPrepared);
- notify->setInt32("err", OK);
- notify->post();
-
- return;
- }
-
- if (mBuffering) {
- ALOGD("stopBufferingIfNecessary_l");
- mBuffering = false;
-
- sendCacheStats_l();
-
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatResumeOnBufferingEnd);
- notify->post();
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::sendCacheStats_l() {
- int32_t kbps = 0;
- status_t err = UNKNOWN_ERROR;
-
- if (mCachedSource != NULL) {
- err = mCachedSource->getEstimatedBandwidthKbps(&kbps);
- }
-
- if (err == OK) {
- sp<AMessage> notify = mNotify->dup();
- notify->setInt32("what", kWhatCacheStats);
- notify->setInt32("bandwidth", kbps);
- notify->post();
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::ensureCacheIsFetching_l() {
- if (mCachedSource != NULL) {
- mCachedSource->resumeFetchingIfNecessary();
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::schedulePollBuffering_l() {
+void NuPlayer::GenericSource::schedulePollBuffering() {
sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
msg->setInt32("generation", mPollBufferingGeneration);
// Enquires buffering status every second.
msg->post(1000000ll);
}
-int64_t NuPlayer::GenericSource::BufferingMonitor::getLastReadPosition_l() {
- if (mAudioTimeUs > 0) {
- return mAudioTimeUs;
- } else if (mVideoTimeUs > 0) {
- return mVideoTimeUs;
- } else {
- return 0;
- }
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::onPollBuffering_l() {
+void NuPlayer::GenericSource::onPollBuffering() {
status_t finalStatus = UNKNOWN_ERROR;
int64_t cachedDurationUs = -1ll;
ssize_t cachedDataRemaining = -1;
if (mCachedSource != NULL) {
- cachedDataRemaining =
- mCachedSource->approxDataRemaining(&finalStatus);
+ cachedDataRemaining = mCachedSource->approxDataRemaining(&finalStatus);
if (finalStatus == OK) {
off64_t size;
@@ -1824,157 +1504,49 @@
}
if (finalStatus != OK) {
- ALOGV("onPollBuffering_l: EOS (finalStatus = %d)", finalStatus);
+ ALOGV("onPollBuffering: EOS (finalStatus = %d)", finalStatus);
if (finalStatus == ERROR_END_OF_STREAM) {
- notifyBufferingUpdate_l(100);
+ notifyBufferingUpdate(100);
}
- stopBufferingIfNecessary_l();
return;
}
if (cachedDurationUs >= 0ll) {
if (mDurationUs > 0ll) {
- int64_t cachedPosUs = getLastReadPosition_l() + cachedDurationUs;
+ int64_t cachedPosUs = getLastReadPosition() + cachedDurationUs;
int percentage = 100.0 * cachedPosUs / mDurationUs;
if (percentage > 100) {
percentage = 100;
}
- notifyBufferingUpdate_l(percentage);
+ notifyBufferingUpdate(percentage);
}
- ALOGV("onPollBuffering_l: cachedDurationUs %.1f sec", cachedDurationUs / 1000000.0f);
-
- if (mPrepareBuffering) {
- if (cachedDurationUs > mSettings.mInitialWatermarkMs * 1000) {
- stopBufferingIfNecessary_l();
- }
- } else if (mSettings.IsTimeBasedBufferingMode(mSettings.mRebufferingMode)) {
- if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
- // Take into account the data cached in downstream components to try to avoid
- // unnecessary pause.
- if (mOffloadAudio && mFirstDequeuedBufferRealUs >= 0) {
- int64_t downStreamCacheUs =
- mlastDequeuedBufferMediaUs - mFirstDequeuedBufferMediaUs
- - (ALooper::GetNowUs() - mFirstDequeuedBufferRealUs);
- if (downStreamCacheUs > 0) {
- cachedDurationUs += downStreamCacheUs;
- }
- }
-
- if (cachedDurationUs < mSettings.mRebufferingWatermarkLowMs * 1000) {
- startBufferingIfNecessary_l();
- }
- } else if (cachedDurationUs > mSettings.mRebufferingWatermarkHighMs * 1000) {
- stopBufferingIfNecessary_l();
- }
- }
- } else if (cachedDataRemaining >= 0
- && mSettings.IsSizeBasedBufferingMode(mSettings.mRebufferingMode)) {
- ALOGV("onPollBuffering_l: cachedDataRemaining %zd bytes",
- cachedDataRemaining);
-
- if (cachedDataRemaining < (mSettings.mRebufferingWatermarkLowKB << 10)) {
- startBufferingIfNecessary_l();
- } else if (cachedDataRemaining > (mSettings.mRebufferingWatermarkHighKB << 10)) {
- stopBufferingIfNecessary_l();
- }
+ ALOGV("onPollBuffering: cachedDurationUs %.1f sec", cachedDurationUs / 1000000.0f);
}
- schedulePollBuffering_l();
-}
-
-void NuPlayer::GenericSource::BufferingMonitor::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatPollBuffering:
- {
- int32_t generation;
- CHECK(msg->findInt32("generation", &generation));
- Mutex::Autolock _l(mLock);
- if (generation == mPollBufferingGeneration) {
- onPollBuffering_l();
- }
- break;
- }
- default:
- TRESPASS();
- break;
- }
+ schedulePollBuffering();
}
// Modular DRM
status_t NuPlayer::GenericSource::prepareDrm(
- const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *crypto)
-{
+ const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *outCrypto) {
+ Mutex::Autolock _l(mLock);
ALOGV("prepareDrm");
- sp<AMessage> msg = new AMessage(kWhatPrepareDrm, this);
- // synchronous call so just passing the address but with local copies of "const" args
- uint8_t UUID[16];
- memcpy(UUID, uuid, sizeof(UUID));
- Vector<uint8_t> sessionId = drmSessionId;
- msg->setPointer("uuid", (void*)UUID);
- msg->setPointer("drmSessionId", (void*)&sessionId);
- msg->setPointer("crypto", (void*)crypto);
-
- sp<AMessage> response;
- status_t status = msg->postAndAwaitResponse(&response);
-
- if (status == OK && response != NULL) {
- CHECK(response->findInt32("status", &status));
- ALOGV_IF(status == OK, "prepareDrm: mCrypto: %p (%d)", crypto->get(),
- (*crypto != NULL ? (*crypto)->getStrongCount() : 0));
- ALOGD("prepareDrm ret: %d ", status);
- } else {
- ALOGE("prepareDrm err: %d", status);
- }
-
- return status;
-}
-
-status_t NuPlayer::GenericSource::releaseDrm()
-{
- ALOGV("releaseDrm");
-
- sp<AMessage> msg = new AMessage(kWhatReleaseDrm, this);
-
- // synchronous call to update the source states before the player proceedes with crypto cleanup
- sp<AMessage> response;
- status_t status = msg->postAndAwaitResponse(&response);
-
- if (status == OK && response != NULL) {
- ALOGD("releaseDrm ret: OK ");
- } else {
- ALOGE("releaseDrm err: %d", status);
- }
-
- return status;
-}
-
-status_t NuPlayer::GenericSource::onPrepareDrm(const sp<AMessage> &msg)
-{
- ALOGV("onPrepareDrm ");
-
mIsDrmProtected = false;
mIsDrmReleased = false;
mIsSecure = false;
- uint8_t *uuid;
- Vector<uint8_t> *drmSessionId;
- sp<ICrypto> *outCrypto;
- CHECK(msg->findPointer("uuid", (void**)&uuid));
- CHECK(msg->findPointer("drmSessionId", (void**)&drmSessionId));
- CHECK(msg->findPointer("crypto", (void**)&outCrypto));
-
status_t status = OK;
- sp<ICrypto> crypto = NuPlayerDrm::createCryptoAndPlugin(uuid, *drmSessionId, status);
+ sp<ICrypto> crypto = NuPlayerDrm::createCryptoAndPlugin(uuid, drmSessionId, status);
if (crypto == NULL) {
- ALOGE("onPrepareDrm: createCrypto failed. status: %d", status);
+ ALOGE("prepareDrm: createCrypto failed. status: %d", status);
return status;
}
- ALOGV("onPrepareDrm: createCryptoAndPlugin succeeded for uuid: %s",
+ ALOGV("prepareDrm: createCryptoAndPlugin succeeded for uuid: %s",
DrmUUID::toHexString(uuid).string());
*outCrypto = crypto;
@@ -1983,14 +1555,14 @@
if (mMimes.size() == 0) {
status = UNKNOWN_ERROR;
- ALOGE("onPrepareDrm: Unexpected. Must have at least one track. status: %d", status);
+ ALOGE("prepareDrm: Unexpected. Must have at least one track. status: %d", status);
return status;
}
// first mime in this list is either the video track, or the first audio track
const char *mime = mMimes[0].string();
mIsSecure = crypto->requiresSecureDecoderComponent(mime);
- ALOGV("onPrepareDrm: requiresSecureDecoderComponent mime: %s isSecure: %d",
+ ALOGV("prepareDrm: requiresSecureDecoderComponent mime: %s isSecure: %d",
mime, mIsSecure);
// Checking the member flags while in the looper to send out the notification.
@@ -2004,18 +1576,27 @@
FLAG_CAN_SEEK_FORWARD |
FLAG_CAN_SEEK);
+ if (status == OK) {
+ ALOGV("prepareDrm: mCrypto: %p (%d)", outCrypto->get(),
+ (*outCrypto != NULL ? (*outCrypto)->getStrongCount() : 0));
+ ALOGD("prepareDrm ret: %d ", status);
+ } else {
+ ALOGE("prepareDrm err: %d", status);
+ }
return status;
}
-status_t NuPlayer::GenericSource::onReleaseDrm()
-{
+status_t NuPlayer::GenericSource::releaseDrm() {
+ Mutex::Autolock _l(mLock);
+ ALOGV("releaseDrm");
+
if (mIsDrmProtected) {
mIsDrmProtected = false;
// to prevent returning any more buffer after stop/releaseDrm (b/37960096)
mIsDrmReleased = true;
- ALOGV("onReleaseDrm: mIsDrmProtected is reset.");
+ ALOGV("releaseDrm: mIsDrmProtected is reset.");
} else {
- ALOGE("onReleaseDrm: mIsDrmProtected is already false.");
+ ALOGE("releaseDrm: mIsDrmProtected is already false.");
}
return OK;
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.h b/media/libmediaplayerservice/nuplayer/GenericSource.h
index 8188a14..f4debc1 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.h
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.h
@@ -86,13 +86,11 @@
virtual bool isStreaming() const;
- virtual void setOffloadAudio(bool offload);
-
// Modular DRM
virtual void signalBufferReturned(MediaBuffer *buffer);
virtual status_t prepareDrm(
- const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *crypto);
+ const uint8_t uuid[16], const Vector<uint8_t> &drmSessionId, sp<ICrypto> *outCrypto);
virtual status_t releaseDrm();
@@ -114,17 +112,10 @@
kWhatSendTimedTextData,
kWhatChangeAVSource,
kWhatPollBuffering,
- kWhatGetFormat,
- kWhatGetSelectedTrack,
- kWhatSelectTrack,
- kWhatSeek,
kWhatReadBuffer,
kWhatStart,
kWhatResume,
kWhatSecureDecodersInstantiated,
- // Modular DRM
- kWhatPrepareDrm,
- kWhatReleaseDrm,
};
struct Track {
@@ -133,84 +124,6 @@
sp<AnotherPacketSource> mPackets;
};
- // Helper to monitor buffering status. The polling happens every second.
- // When necessary, it will send out buffering events to the player.
- struct BufferingMonitor : public AHandler {
- public:
- explicit BufferingMonitor(const sp<AMessage> ¬ify);
-
- void getDefaultBufferingSettings(BufferingSettings *buffering /* nonnull */);
- status_t setBufferingSettings(const BufferingSettings &buffering);
-
- // Set up state.
- void prepare(const sp<NuCachedSource2> &cachedSource,
- int64_t durationUs,
- int64_t bitrate,
- bool isStreaming);
- // Stop and reset buffering monitor.
- void stop();
- // Cancel the current monitor task.
- void cancelPollBuffering();
- // Restart the monitor task.
- void restartPollBuffering();
- // Stop buffering task and send out corresponding events.
- void stopBufferingIfNecessary();
- // Make sure data source is getting data.
- void ensureCacheIsFetching();
- // Update media time of just extracted buffer from data source.
- void updateQueuedTime(bool isAudio, int64_t timeUs);
-
- // Set the offload mode.
- void setOffloadAudio(bool offload);
- // Update media time of last dequeued buffer which is sent to the decoder.
- void updateDequeuedBufferTime(int64_t mediaUs);
-
- protected:
- virtual ~BufferingMonitor();
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
- private:
- enum {
- kWhatPollBuffering,
- };
-
- sp<AMessage> mNotify;
-
- sp<NuCachedSource2> mCachedSource;
- int64_t mDurationUs;
- int64_t mBitrate;
- bool mIsStreaming;
-
- int64_t mAudioTimeUs;
- int64_t mVideoTimeUs;
- int32_t mPollBufferingGeneration;
- bool mPrepareBuffering;
- bool mBuffering;
- int32_t mPrevBufferPercentage;
-
- mutable Mutex mLock;
-
- BufferingSettings mSettings;
- bool mOffloadAudio;
- int64_t mFirstDequeuedBufferRealUs;
- int64_t mFirstDequeuedBufferMediaUs;
- int64_t mlastDequeuedBufferMediaUs;
-
- void prepare_l(const sp<NuCachedSource2> &cachedSource,
- int64_t durationUs,
- int64_t bitrate,
- bool isStreaming);
- void cancelPollBuffering_l();
- void notifyBufferingUpdate_l(int32_t percentage);
- void startBufferingIfNecessary_l();
- void stopBufferingIfNecessary_l();
- void sendCacheStats_l();
- void ensureCacheIsFetching_l();
- int64_t getLastReadPosition_l();
- void onPollBuffering_l();
- void schedulePollBuffering_l();
- };
-
Vector<sp<IMediaSource> > mSources;
Track mAudioTrack;
int64_t mAudioTimeUs;
@@ -221,6 +134,13 @@
Track mSubtitleTrack;
Track mTimedTextTrack;
+ BufferingSettings mBufferingSettings;
+ int32_t mPrevBufferPercentage;
+ int32_t mPollBufferingGeneration;
+ bool mSentPauseOnBuffering;
+
+ int32_t mAudioDataGeneration;
+ int32_t mVideoDataGeneration;
int32_t mFetchSubtitleDataGeneration;
int32_t mFetchTimedTextDataGeneration;
int64_t mDurationUs;
@@ -243,17 +163,14 @@
sp<DataSource> mHttpSource;
sp<MetaData> mFileMeta;
bool mStarted;
- bool mStopRead;
+ bool mPreparing;
int64_t mBitrate;
- sp<BufferingMonitor> mBufferingMonitor;
uint32_t mPendingReadBufferTypes;
sp<ABuffer> mGlobalTimedText;
- mutable Mutex mReadBufferLock;
- mutable Mutex mDisconnectLock;
+ mutable Mutex mLock;
sp<ALooper> mLooper;
- sp<ALooper> mBufferingMonitorLooper;
void resetDataSource();
@@ -265,21 +182,6 @@
void finishPrepareAsync();
status_t startSources();
- void onGetFormatMeta(const sp<AMessage>& msg) const;
- sp<MetaData> doGetFormatMeta(bool audio) const;
-
- void onGetTrackInfo(const sp<AMessage>& msg) const;
- sp<AMessage> doGetTrackInfo(size_t trackIndex) const;
-
- void onGetSelectedTrack(const sp<AMessage>& msg) const;
- ssize_t doGetSelectedTrack(media_track_type type) const;
-
- void onSelectTrack(const sp<AMessage>& msg);
- status_t doSelectTrack(size_t trackIndex, bool select, int64_t timeUs);
-
- void onSeek(const sp<AMessage>& msg);
- status_t doSeek(int64_t seekTimeUs, MediaPlayerSeekMode mode);
-
void onPrepareAsync();
void fetchTextData(
@@ -314,6 +216,15 @@
void queueDiscontinuityIfNeeded(
bool seeking, bool formatChange, media_track_type trackType, Track *track);
+ void schedulePollBuffering();
+ void onPollBuffering();
+ void notifyBufferingUpdate(int32_t percentage);
+
+ void sendCacheStats();
+
+ sp<MetaData> getFormatMeta_l(bool audio);
+ int32_t getDataGeneration(media_track_type type) const;
+
// Modular DRM
// The source is DRM protected and is prepared for DRM.
bool mIsDrmProtected;
@@ -322,8 +233,6 @@
Vector<String8> mMimes;
status_t checkDrmInfo();
- status_t onPrepareDrm(const sp<AMessage> &msg);
- status_t onReleaseDrm();
DISALLOW_EVIL_CONSTRUCTORS(GenericSource);
};
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 2b09479..a7a1b05 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -3781,6 +3781,8 @@
} else {
mFps = (double)framerate;
}
+ // propagate framerate to the output so that the muxer has it
+ outputFormat->setInt32("frame-rate", (int32_t)mFps);
video_def->xFramerate = (OMX_U32)(mFps * 65536);
video_def->eCompressionFormat = OMX_VIDEO_CodingUnused;
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index fe1b285..4c7259f 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -46,6 +46,7 @@
"DataSourceFactory.cpp",
"DataURISource.cpp",
"FileSource.cpp",
+ "FrameDecoder.cpp",
"FrameRenderTracker.cpp",
"HTTPBase.cpp",
"HevcUtils.cpp",
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
new file mode 100644
index 0000000..fa5f37ec
--- /dev/null
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -0,0 +1,608 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "FrameDecoder"
+
+#include <inttypes.h>
+
+#include <utils/Log.h>
+#include <gui/Surface.h>
+
+#include "include/FrameDecoder.h"
+#include <media/ICrypto.h>
+#include <media/IMediaSource.h>
+#include <media/MediaCodecBuffer.h>
+#include <media/stagefright/foundation/avc_utils.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/ColorConverter.h>
+#include <media/stagefright/MediaBuffer.h>
+#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaErrors.h>
+#include <media/stagefright/Utils.h>
+#include <private/media/VideoFrame.h>
+
+namespace android {
+
+static const int64_t kBufferTimeOutUs = 30000ll; // 30 msec
+static const size_t kRetryCount = 20; // must be >0
+
+VideoFrame *FrameDecoder::allocVideoFrame(
+ int32_t width, int32_t height, bool metaOnly) {
+ int32_t rotationAngle;
+ if (!mTrackMeta->findInt32(kKeyRotation, &rotationAngle)) {
+ rotationAngle = 0; // By default, no rotation
+ }
+
+ uint32_t type;
+ const void *iccData;
+ size_t iccSize;
+ if (!mTrackMeta->findData(kKeyIccProfile, &type, &iccData, &iccSize)){
+ iccData = NULL;
+ iccSize = 0;
+ }
+
+ int32_t sarWidth, sarHeight;
+ int32_t displayWidth, displayHeight;
+ if (mTrackMeta->findInt32(kKeySARWidth, &sarWidth)
+ && mTrackMeta->findInt32(kKeySARHeight, &sarHeight)
+ && sarHeight != 0) {
+ displayWidth = (width * sarWidth) / sarHeight;
+ displayHeight = height;
+ } else if (mTrackMeta->findInt32(kKeyDisplayWidth, &displayWidth)
+ && mTrackMeta->findInt32(kKeyDisplayHeight, &displayHeight)
+ && displayWidth > 0 && displayHeight > 0
+ && width > 0 && height > 0) {
+ ALOGV("found display size %dx%d", displayWidth, displayHeight);
+ } else {
+ displayWidth = width;
+ displayHeight = height;
+ }
+
+ return new VideoFrame(width, height, displayWidth, displayHeight,
+ rotationAngle, mDstBpp, !metaOnly, iccData, iccSize);
+}
+
+bool FrameDecoder::setDstColorFormat(android_pixel_format_t colorFormat) {
+ switch (colorFormat) {
+ case HAL_PIXEL_FORMAT_RGB_565:
+ {
+ mDstFormat = OMX_COLOR_Format16bitRGB565;
+ mDstBpp = 2;
+ return true;
+ }
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ {
+ mDstFormat = OMX_COLOR_Format32BitRGBA8888;
+ mDstBpp = 4;
+ return true;
+ }
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ {
+ mDstFormat = OMX_COLOR_Format32bitBGRA8888;
+ mDstBpp = 4;
+ return true;
+ }
+ default:
+ {
+ ALOGE("Unsupported color format: %d", colorFormat);
+ break;
+ }
+ }
+ return false;
+}
+
+VideoFrame* FrameDecoder::extractFrame(
+ int64_t frameTimeUs, int option, int colorFormat, bool metaOnly) {
+ if (!setDstColorFormat((android_pixel_format_t)colorFormat)) {
+ return NULL;
+ }
+
+ if (metaOnly) {
+ int32_t width, height;
+ CHECK(trackMeta()->findInt32(kKeyWidth, &width));
+ CHECK(trackMeta()->findInt32(kKeyHeight, &height));
+ return allocVideoFrame(width, height, true);
+ }
+
+ status_t err = extractInternal(frameTimeUs, 1, option);
+ if (err != OK) {
+ return NULL;
+ }
+
+ return mFrames.size() > 0 ? mFrames[0].release() : NULL;
+}
+
+status_t FrameDecoder::extractFrames(
+ int64_t frameTimeUs, size_t numFrames, int option, int colorFormat,
+ std::vector<VideoFrame*>* frames) {
+ if (!setDstColorFormat((android_pixel_format_t)colorFormat)) {
+ return ERROR_UNSUPPORTED;
+ }
+
+ status_t err = extractInternal(frameTimeUs, numFrames, option);
+ if (err != OK) {
+ return err;
+ }
+
+ for (size_t i = 0; i < mFrames.size(); i++) {
+ frames->push_back(mFrames[i].release());
+ }
+ return OK;
+}
+
+status_t FrameDecoder::extractInternal(
+ int64_t frameTimeUs, size_t numFrames, int option) {
+
+ MediaSource::ReadOptions options;
+ sp<AMessage> videoFormat = onGetFormatAndSeekOptions(
+ frameTimeUs, numFrames, option, &options);
+ if (videoFormat == NULL) {
+ ALOGE("video format or seek mode not supported");
+ return ERROR_UNSUPPORTED;
+ }
+
+ status_t err;
+ sp<ALooper> looper = new ALooper;
+ looper->start();
+ sp<MediaCodec> decoder = MediaCodec::CreateByComponentName(
+ looper, mComponentName, &err);
+ if (decoder.get() == NULL || err != OK) {
+ ALOGW("Failed to instantiate decoder [%s]", mComponentName.c_str());
+ return (decoder.get() == NULL) ? NO_MEMORY : err;
+ }
+
+ err = decoder->configure(videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
+ if (err != OK) {
+ ALOGW("configure returned error %d (%s)", err, asString(err));
+ decoder->release();
+ return err;
+ }
+
+ err = decoder->start();
+ if (err != OK) {
+ ALOGW("start returned error %d (%s)", err, asString(err));
+ decoder->release();
+ return err;
+ }
+
+ err = mSource->start();
+ if (err != OK) {
+ ALOGW("source failed to start: %d (%s)", err, asString(err));
+ decoder->release();
+ return err;
+ }
+
+ Vector<sp<MediaCodecBuffer> > inputBuffers;
+ err = decoder->getInputBuffers(&inputBuffers);
+ if (err != OK) {
+ ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
+ decoder->release();
+ mSource->stop();
+ return err;
+ }
+
+ Vector<sp<MediaCodecBuffer> > outputBuffers;
+ err = decoder->getOutputBuffers(&outputBuffers);
+ if (err != OK) {
+ ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
+ decoder->release();
+ mSource->stop();
+ return err;
+ }
+
+ sp<AMessage> outputFormat = NULL;
+ bool haveMoreInputs = true;
+ size_t index, offset, size;
+ int64_t timeUs;
+ size_t retriesLeft = kRetryCount;
+ bool done = false;
+ bool firstSample = true;
+ do {
+ size_t inputIndex = -1;
+ int64_t ptsUs = 0ll;
+ uint32_t flags = 0;
+ sp<MediaCodecBuffer> codecBuffer = NULL;
+
+ while (haveMoreInputs) {
+ err = decoder->dequeueInputBuffer(&inputIndex, kBufferTimeOutUs);
+ if (err != OK) {
+ ALOGW("Timed out waiting for input");
+ if (retriesLeft) {
+ err = OK;
+ }
+ break;
+ }
+ codecBuffer = inputBuffers[inputIndex];
+
+ MediaBuffer *mediaBuffer = NULL;
+
+ err = mSource->read(&mediaBuffer, &options);
+ options.clearSeekTo();
+ if (err != OK) {
+ ALOGW("Input Error or EOS");
+ haveMoreInputs = false;
+ if (!firstSample && err == ERROR_END_OF_STREAM) {
+ err = OK;
+ }
+ break;
+ }
+
+ if (mediaBuffer->range_length() > codecBuffer->capacity()) {
+ ALOGE("buffer size (%zu) too large for codec input size (%zu)",
+ mediaBuffer->range_length(), codecBuffer->capacity());
+ haveMoreInputs = false;
+ err = BAD_VALUE;
+ } else {
+ codecBuffer->setRange(0, mediaBuffer->range_length());
+
+ CHECK(mediaBuffer->meta_data()->findInt64(kKeyTime, &ptsUs));
+ memcpy(codecBuffer->data(),
+ (const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
+ mediaBuffer->range_length());
+
+ onInputReceived(codecBuffer, mediaBuffer->meta_data(), firstSample, &flags);
+ firstSample = false;
+ }
+
+ mediaBuffer->release();
+ break;
+ }
+
+ if (haveMoreInputs && inputIndex < inputBuffers.size()) {
+ ALOGV("QueueInput: size=%zu ts=%" PRId64 " us flags=%x",
+ codecBuffer->size(), ptsUs, flags);
+
+ err = decoder->queueInputBuffer(
+ inputIndex,
+ codecBuffer->offset(),
+ codecBuffer->size(),
+ ptsUs,
+ flags);
+
+ if (flags & MediaCodec::BUFFER_FLAG_EOS) {
+ haveMoreInputs = false;
+ }
+
+ // we don't expect an output from codec config buffer
+ if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
+ continue;
+ }
+ }
+
+ while (err == OK) {
+ // wait for a decoded buffer
+ err = decoder->dequeueOutputBuffer(
+ &index,
+ &offset,
+ &size,
+ &timeUs,
+ &flags,
+ kBufferTimeOutUs);
+
+ if (err == INFO_FORMAT_CHANGED) {
+ ALOGV("Received format change");
+ err = decoder->getOutputFormat(&outputFormat);
+ } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
+ ALOGV("Output buffers changed");
+ err = decoder->getOutputBuffers(&outputBuffers);
+ } else {
+ if (err == -EAGAIN /* INFO_TRY_AGAIN_LATER */ && --retriesLeft > 0) {
+ ALOGV("Timed-out waiting for output.. retries left = %zu", retriesLeft);
+ err = OK;
+ } else if (err == OK) {
+ // If we're seeking with CLOSEST option and obtained a valid targetTimeUs
+ // from the extractor, decode to the specified frame. Otherwise we're done.
+ ALOGV("Received an output buffer, timeUs=%lld", (long long)timeUs);
+ sp<MediaCodecBuffer> videoFrameBuffer = outputBuffers.itemAt(index);
+
+ err = onOutputReceived(videoFrameBuffer, outputFormat, timeUs, &done);
+
+ decoder->releaseOutputBuffer(index);
+ } else {
+ ALOGW("Received error %d (%s) instead of output", err, asString(err));
+ done = true;
+ }
+ break;
+ }
+ }
+ } while (err == OK && !done);
+
+ mSource->stop();
+ decoder->release();
+
+ if (err != OK) {
+ ALOGE("failed to get video frame (err %d)", err);
+ }
+
+ return err;
+}
+
+sp<AMessage> VideoFrameDecoder::onGetFormatAndSeekOptions(
+ int64_t frameTimeUs, size_t numFrames, int seekMode, MediaSource::ReadOptions *options) {
+ mSeekMode = static_cast<MediaSource::ReadOptions::SeekMode>(seekMode);
+ if (mSeekMode < MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC ||
+ mSeekMode > MediaSource::ReadOptions::SEEK_FRAME_INDEX) {
+ ALOGE("Unknown seek mode: %d", mSeekMode);
+ return NULL;
+ }
+ mNumFrames = numFrames;
+
+ const char *mime;
+ if (!trackMeta()->findCString(kKeyMIMEType, &mime)) {
+ ALOGE("Could not find mime type");
+ return NULL;
+ }
+
+ mIsAvcOrHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
+ || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
+
+ if (frameTimeUs < 0) {
+ int64_t thumbNailTime;
+ if (!trackMeta()->findInt64(kKeyThumbnailTime, &thumbNailTime)
+ || thumbNailTime < 0) {
+ thumbNailTime = 0;
+ }
+ options->setSeekTo(thumbNailTime, mSeekMode);
+ } else {
+ options->setSeekTo(frameTimeUs, mSeekMode);
+ }
+
+ sp<AMessage> videoFormat;
+ if (convertMetaDataToMessage(trackMeta(), &videoFormat) != OK) {
+ ALOGE("b/23680780");
+ ALOGW("Failed to convert meta data to message");
+ return NULL;
+ }
+
+ // TODO: Use Flexible color instead
+ videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+
+ // For the thumbnail extraction case, try to allocate single buffer in both
+ // input and output ports, if seeking to a sync frame. NOTE: This request may
+ // fail if component requires more than that for decoding.
+ bool isSeekingClosest = (mSeekMode == MediaSource::ReadOptions::SEEK_CLOSEST)
+ || (mSeekMode == MediaSource::ReadOptions::SEEK_FRAME_INDEX);
+ if (!isSeekingClosest) {
+ videoFormat->setInt32("android._num-input-buffers", 1);
+ videoFormat->setInt32("android._num-output-buffers", 1);
+ }
+ return videoFormat;
+}
+
+status_t VideoFrameDecoder::onInputReceived(
+ const sp<MediaCodecBuffer> &codecBuffer,
+ const sp<MetaData> &sampleMeta, bool firstSample, uint32_t *flags) {
+ bool isSeekingClosest = (mSeekMode == MediaSource::ReadOptions::SEEK_CLOSEST)
+ || (mSeekMode == MediaSource::ReadOptions::SEEK_FRAME_INDEX);
+
+ if (firstSample && isSeekingClosest) {
+ sampleMeta->findInt64(kKeyTargetTime, &mTargetTimeUs);
+ ALOGV("Seeking closest: targetTimeUs=%lld", (long long)mTargetTimeUs);
+ }
+
+ if (mIsAvcOrHevc && !isSeekingClosest
+ && IsIDR(codecBuffer->data(), codecBuffer->size())) {
+ // Only need to decode one IDR frame, unless we're seeking with CLOSEST
+ // option, in which case we need to actually decode to targetTimeUs.
+ *flags |= MediaCodec::BUFFER_FLAG_EOS;
+ }
+ return OK;
+}
+
+status_t VideoFrameDecoder::onOutputReceived(
+ const sp<MediaCodecBuffer> &videoFrameBuffer,
+ const sp<AMessage> &outputFormat,
+ int64_t timeUs, bool *done) {
+ bool shouldOutput = (mTargetTimeUs < 0ll) || (timeUs >= mTargetTimeUs);
+
+ // If this is not the target frame, skip color convert.
+ if (!shouldOutput) {
+ *done = false;
+ return OK;
+ }
+
+ *done = (++mNumFramesDecoded >= mNumFrames);
+
+ int32_t width, height;
+ CHECK(outputFormat != NULL);
+ CHECK(outputFormat->findInt32("width", &width));
+ CHECK(outputFormat->findInt32("height", &height));
+
+ int32_t crop_left, crop_top, crop_right, crop_bottom;
+ if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
+ crop_left = crop_top = 0;
+ crop_right = width - 1;
+ crop_bottom = height - 1;
+ }
+
+ VideoFrame *frame = allocVideoFrame(
+ (crop_right - crop_left + 1),
+ (crop_bottom - crop_top + 1),
+ false /*metaOnly*/);
+ addFrame(frame);
+
+ int32_t srcFormat;
+ CHECK(outputFormat->findInt32("color-format", &srcFormat));
+
+ ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
+
+ if (converter.isValid()) {
+ converter.convert(
+ (const uint8_t *)videoFrameBuffer->data(),
+ width, height,
+ crop_left, crop_top, crop_right, crop_bottom,
+ frame->mData,
+ frame->mWidth,
+ frame->mHeight,
+ crop_left, crop_top, crop_right, crop_bottom);
+ return OK;
+ }
+
+ ALOGE("Unable to convert from format 0x%08x to 0x%08x",
+ srcFormat, dstFormat());
+ return ERROR_UNSUPPORTED;
+}
+
+sp<AMessage> ImageDecoder::onGetFormatAndSeekOptions(
+ int64_t frameTimeUs, size_t /*numFrames*/,
+ int /*seekMode*/, MediaSource::ReadOptions *options) {
+ sp<MetaData> overrideMeta;
+ if (frameTimeUs < 0) {
+ uint32_t type;
+ const void *data;
+ size_t size;
+ int64_t thumbNailTime = 0;
+ int32_t thumbnailWidth, thumbnailHeight;
+
+ // if we have a stand-alone thumbnail, set up the override meta,
+ // and set seekTo time to -1.
+ if (trackMeta()->findInt32(kKeyThumbnailWidth, &thumbnailWidth)
+ && trackMeta()->findInt32(kKeyThumbnailHeight, &thumbnailHeight)
+ && trackMeta()->findData(kKeyThumbnailHVCC, &type, &data, &size)){
+ overrideMeta = new MetaData(*(trackMeta()));
+ overrideMeta->remove(kKeyDisplayWidth);
+ overrideMeta->remove(kKeyDisplayHeight);
+ overrideMeta->setInt32(kKeyWidth, thumbnailWidth);
+ overrideMeta->setInt32(kKeyHeight, thumbnailHeight);
+ overrideMeta->setData(kKeyHVCC, type, data, size);
+ thumbNailTime = -1ll;
+ ALOGV("thumbnail: %dx%d", thumbnailWidth, thumbnailHeight);
+ }
+ options->setSeekTo(thumbNailTime);
+ } else {
+ options->setSeekTo(frameTimeUs);
+ }
+
+ mGridRows = mGridCols = 1;
+ if (overrideMeta == NULL) {
+ // check if we're dealing with a tiled heif
+ int32_t gridWidth, gridHeight, gridRows, gridCols;
+ if (trackMeta()->findInt32(kKeyGridWidth, &gridWidth) && gridWidth > 0
+ && trackMeta()->findInt32(kKeyGridHeight, &gridHeight) && gridHeight > 0
+ && trackMeta()->findInt32(kKeyGridRows, &gridRows) && gridRows > 0
+ && trackMeta()->findInt32(kKeyGridCols, &gridCols) && gridCols > 0) {
+ int32_t width, height;
+ CHECK(trackMeta()->findInt32(kKeyWidth, &width));
+ CHECK(trackMeta()->findInt32(kKeyHeight, &height));
+
+ if (width <= gridWidth * gridCols && height <= gridHeight * gridRows) {
+ ALOGV("grid: %dx%d, size: %dx%d, picture size: %dx%d",
+ gridCols, gridRows, gridWidth, gridHeight, width, height);
+
+ overrideMeta = new MetaData(*(trackMeta()));
+ overrideMeta->setInt32(kKeyWidth, gridWidth);
+ overrideMeta->setInt32(kKeyHeight, gridHeight);
+ mGridCols = gridCols;
+ mGridRows = gridRows;
+ } else {
+ ALOGE("bad grid: %dx%d, size: %dx%d, picture size: %dx%d",
+ gridCols, gridRows, gridWidth, gridHeight, width, height);
+ }
+ }
+ if (overrideMeta == NULL) {
+ overrideMeta = trackMeta();
+ }
+ }
+
+ sp<AMessage> videoFormat;
+ if (convertMetaDataToMessage(overrideMeta, &videoFormat) != OK) {
+ ALOGE("b/23680780");
+ ALOGW("Failed to convert meta data to message");
+ return NULL;
+ }
+
+ // TODO: Use Flexible color instead
+ videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+
+ if ((mGridRows == 1) && (mGridCols == 1)) {
+ videoFormat->setInt32("android._num-input-buffers", 1);
+ videoFormat->setInt32("android._num-output-buffers", 1);
+ }
+ return videoFormat;
+}
+
+status_t ImageDecoder::onOutputReceived(
+ const sp<MediaCodecBuffer> &videoFrameBuffer,
+ const sp<AMessage> &outputFormat, int64_t /*timeUs*/, bool *done) {
+ int32_t width, height;
+ CHECK(outputFormat != NULL);
+ CHECK(outputFormat->findInt32("width", &width));
+ CHECK(outputFormat->findInt32("height", &height));
+
+ int32_t imageWidth, imageHeight;
+ CHECK(trackMeta()->findInt32(kKeyWidth, &imageWidth));
+ CHECK(trackMeta()->findInt32(kKeyHeight, &imageHeight));
+
+ if (mFrame == NULL) {
+ mFrame = allocVideoFrame(imageWidth, imageHeight, false /*metaOnly*/);
+
+ addFrame(mFrame);
+ }
+
+ int32_t srcFormat;
+ CHECK(outputFormat->findInt32("color-format", &srcFormat));
+
+ ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
+
+ int32_t dstLeft, dstTop, dstRight, dstBottom;
+ int32_t numTiles = mGridRows * mGridCols;
+
+ dstLeft = mTilesDecoded % mGridCols * width;
+ dstTop = mTilesDecoded / mGridCols * height;
+ dstRight = dstLeft + width - 1;
+ dstBottom = dstTop + height - 1;
+
+ int32_t crop_left, crop_top, crop_right, crop_bottom;
+ if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
+ crop_left = crop_top = 0;
+ crop_right = width - 1;
+ crop_bottom = height - 1;
+ }
+
+ // apply crop on bottom-right
+ // TODO: need to move this into the color converter itself.
+ if (dstRight >= imageWidth) {
+ crop_right = imageWidth - dstLeft - 1;
+ dstRight = dstLeft + crop_right;
+ }
+ if (dstBottom >= imageHeight) {
+ crop_bottom = imageHeight - dstTop - 1;
+ dstBottom = dstTop + crop_bottom;
+ }
+
+ *done = (++mTilesDecoded >= numTiles);
+
+ if (converter.isValid()) {
+ converter.convert(
+ (const uint8_t *)videoFrameBuffer->data(),
+ width, height,
+ crop_left, crop_top, crop_right, crop_bottom,
+ mFrame->mData,
+ mFrame->mWidth,
+ mFrame->mHeight,
+ dstLeft, dstTop, dstRight, dstBottom);
+ return OK;
+ }
+
+ ALOGE("Unable to convert from format 0x%08x to 0x%08x",
+ srcFormat, dstFormat());
+ return ERROR_UNSUPPORTED;
+}
+
+} // namespace android
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index 25656c3..a176382 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -40,13 +40,23 @@
namespace android {
+NuMediaExtractor::Sample::Sample()
+ : mBuffer(NULL),
+ mSampleTimeUs(-1ll) {
+}
+
+NuMediaExtractor::Sample::Sample(MediaBuffer *buffer, int64_t timeUs)
+ : mBuffer(buffer),
+ mSampleTimeUs(timeUs) {
+}
+
NuMediaExtractor::NuMediaExtractor()
: mTotalBitrate(-1ll),
mDurationUs(-1ll) {
}
NuMediaExtractor::~NuMediaExtractor() {
- releaseTrackSamples();
+ releaseAllTrackSamples();
for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
TrackInfo *info = &mSelectedTracks.editItemAt(i);
@@ -288,7 +298,8 @@
return OK;
}
-status_t NuMediaExtractor::selectTrack(size_t index) {
+status_t NuMediaExtractor::selectTrack(size_t index,
+ int64_t startTimeUs, MediaSource::ReadOptions::SeekMode mode) {
Mutex::Autolock autoLock(mLock);
if (mImpl == NULL) {
@@ -311,31 +322,56 @@
sp<IMediaSource> source = mImpl->getTrack(index);
if (source == nullptr) {
+ ALOGE("track %zu is empty", index);
return ERROR_MALFORMED;
}
status_t ret = source->start();
if (ret != OK) {
+ ALOGE("track %zu failed to start", index);
return ret;
}
+ sp<MetaData> meta = source->getFormat();
+ if (meta == NULL) {
+ ALOGE("track %zu has no meta data", index);
+ return ERROR_MALFORMED;
+ }
+
+ const char *mime;
+ if (!meta->findCString(kKeyMIMEType, &mime)) {
+ ALOGE("track %zu has no mime type in meta data", index);
+ return ERROR_MALFORMED;
+ }
+ ALOGV("selectTrack, track[%zu]: %s", index, mime);
+
mSelectedTracks.push();
TrackInfo *info = &mSelectedTracks.editItemAt(mSelectedTracks.size() - 1);
info->mSource = source;
info->mTrackIndex = index;
+ if (!strncasecmp(mime, "audio/", 6)) {
+ info->mTrackType = MEDIA_TRACK_TYPE_AUDIO;
+ info->mMaxFetchCount = 64;
+ } else if (!strncasecmp(mime, "video/", 6)) {
+ info->mTrackType = MEDIA_TRACK_TYPE_VIDEO;
+ info->mMaxFetchCount = 8;
+ } else {
+ info->mTrackType = MEDIA_TRACK_TYPE_UNKNOWN;
+ info->mMaxFetchCount = 1;
+ }
info->mFinalResult = OK;
- info->mSample = NULL;
- info->mSampleTimeUs = -1ll;
+ releaseTrackSamples(info);
info->mTrackFlags = 0;
- const char *mime;
- CHECK(source->getFormat()->findCString(kKeyMIMEType, &mime));
-
if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_VORBIS)) {
info->mTrackFlags |= kIsVorbis;
}
+ if (startTimeUs >= 0) {
+ fetchTrackSamples(info, startTimeUs, mode);
+ }
+
return OK;
}
@@ -366,12 +402,7 @@
TrackInfo *info = &mSelectedTracks.editItemAt(i);
- if (info->mSample != NULL) {
- info->mSample->release();
- info->mSample = NULL;
-
- info->mSampleTimeUs = -1ll;
- }
+ releaseTrackSamples(info);
CHECK_EQ((status_t)OK, info->mSource->stop());
@@ -380,79 +411,136 @@
return OK;
}
-void NuMediaExtractor::releaseTrackSamples() {
- for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
- TrackInfo *info = &mSelectedTracks.editItemAt(i);
+void NuMediaExtractor::releaseOneSample(TrackInfo *info) {
+ if (info == NULL || info->mSamples.empty()) {
+ return;
+ }
- if (info->mSample != NULL) {
- info->mSample->release();
- info->mSample = NULL;
+ auto it = info->mSamples.begin();
+ if (it->mBuffer != NULL) {
+ it->mBuffer->release();
+ }
+ info->mSamples.erase(it);
+}
- info->mSampleTimeUs = -1ll;
+void NuMediaExtractor::releaseTrackSamples(TrackInfo *info) {
+ if (info == NULL) {
+ return;
+ }
+
+ auto it = info->mSamples.begin();
+ while (it != info->mSamples.end()) {
+ if (it->mBuffer != NULL) {
+ it->mBuffer->release();
}
+ it = info->mSamples.erase(it);
}
}
-ssize_t NuMediaExtractor::fetchTrackSamples(
+void NuMediaExtractor::releaseAllTrackSamples() {
+ for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
+ releaseTrackSamples(&mSelectedTracks.editItemAt(i));
+ }
+}
+
+ssize_t NuMediaExtractor::fetchAllTrackSamples(
int64_t seekTimeUs, MediaSource::ReadOptions::SeekMode mode) {
TrackInfo *minInfo = NULL;
ssize_t minIndex = -1;
for (size_t i = 0; i < mSelectedTracks.size(); ++i) {
TrackInfo *info = &mSelectedTracks.editItemAt(i);
+ fetchTrackSamples(info, seekTimeUs, mode);
- if (seekTimeUs >= 0ll) {
- info->mFinalResult = OK;
-
- if (info->mSample != NULL) {
- info->mSample->release();
- info->mSample = NULL;
- info->mSampleTimeUs = -1ll;
- }
- } else if (info->mFinalResult != OK) {
+ if (info->mSamples.empty()) {
continue;
}
- if (info->mSample == NULL) {
- MediaSource::ReadOptions options;
- if (seekTimeUs >= 0ll) {
- options.setSeekTo(seekTimeUs, mode);
- }
- status_t err = info->mSource->read(&info->mSample, &options);
-
- if (err != OK) {
- CHECK(info->mSample == NULL);
-
- info->mFinalResult = err;
-
- if (info->mFinalResult != ERROR_END_OF_STREAM) {
- ALOGW("read on track %zu failed with error %d",
- info->mTrackIndex, err);
- }
-
- info->mSampleTimeUs = -1ll;
- continue;
- } else {
- CHECK(info->mSample != NULL);
- CHECK(info->mSample->meta_data()->findInt64(
- kKeyTime, &info->mSampleTimeUs));
- }
- }
-
- if (minInfo == NULL || info->mSampleTimeUs < minInfo->mSampleTimeUs) {
+ if (minInfo == NULL) {
minInfo = info;
minIndex = i;
+ } else {
+ auto it = info->mSamples.begin();
+ auto itMin = minInfo->mSamples.begin();
+ if (it->mSampleTimeUs < itMin->mSampleTimeUs) {
+ minInfo = info;
+ minIndex = i;
+ }
}
}
return minIndex;
}
+void NuMediaExtractor::fetchTrackSamples(TrackInfo *info,
+ int64_t seekTimeUs, MediaSource::ReadOptions::SeekMode mode) {
+ if (info == NULL) {
+ return;
+ }
+
+ MediaSource::ReadOptions options;
+ if (seekTimeUs >= 0ll) {
+ options.setSeekTo(seekTimeUs, mode);
+ info->mFinalResult = OK;
+ releaseTrackSamples(info);
+ } else if (info->mFinalResult != OK || !info->mSamples.empty()) {
+ return;
+ }
+
+ status_t err = OK;
+ Vector<MediaBuffer *> mediaBuffers;
+ if (info->mSource->supportReadMultiple()) {
+ options.setNonBlocking();
+ err = info->mSource->readMultiple(&mediaBuffers, info->mMaxFetchCount, &options);
+ } else {
+ MediaBuffer *mbuf = NULL;
+ err = info->mSource->read(&mbuf, &options);
+ if (err == OK && mbuf != NULL) {
+ mediaBuffers.push_back(mbuf);
+ }
+ }
+
+ info->mFinalResult = err;
+ if (err != OK && err != ERROR_END_OF_STREAM) {
+ ALOGW("read on track %zu failed with error %d", info->mTrackIndex, err);
+ size_t count = mediaBuffers.size();
+ for (size_t id = 0; id < count; ++id) {
+ MediaBuffer *mbuf = mediaBuffers[id];
+ if (mbuf != NULL) {
+ mbuf->release();
+ }
+ }
+ return;
+ }
+
+ size_t count = mediaBuffers.size();
+ bool releaseRemaining = false;
+ for (size_t id = 0; id < count; ++id) {
+ int64_t timeUs;
+ MediaBuffer *mbuf = mediaBuffers[id];
+ if (mbuf == NULL) {
+ continue;
+ }
+ if (releaseRemaining) {
+ mbuf->release();
+ continue;
+ }
+ if (mbuf->meta_data()->findInt64(kKeyTime, &timeUs)) {
+ info->mSamples.emplace_back(mbuf, timeUs);
+ } else {
+ mbuf->meta_data()->dumpToLog();
+ info->mFinalResult = ERROR_MALFORMED;
+ mbuf->release();
+ releaseRemaining = true;
+ }
+ }
+}
+
status_t NuMediaExtractor::seekTo(
int64_t timeUs, MediaSource::ReadOptions::SeekMode mode) {
Mutex::Autolock autoLock(mLock);
- ssize_t minIndex = fetchTrackSamples(timeUs, mode);
+ ssize_t minIndex = fetchAllTrackSamples(timeUs, mode);
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
@@ -464,7 +552,7 @@
status_t NuMediaExtractor::advance() {
Mutex::Autolock autoLock(mLock);
- ssize_t minIndex = fetchTrackSamples();
+ ssize_t minIndex = fetchAllTrackSamples();
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
@@ -472,28 +560,26 @@
TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
- info->mSample->release();
- info->mSample = NULL;
- info->mSampleTimeUs = -1ll;
+ releaseOneSample(info);
return OK;
}
-status_t NuMediaExtractor::appendVorbisNumPageSamples(TrackInfo *info, const sp<ABuffer> &buffer) {
+status_t NuMediaExtractor::appendVorbisNumPageSamples(MediaBuffer *mbuf, const sp<ABuffer> &buffer) {
int32_t numPageSamples;
- if (!info->mSample->meta_data()->findInt32(
+ if (!mbuf->meta_data()->findInt32(
kKeyValidSamples, &numPageSamples)) {
numPageSamples = -1;
}
- memcpy((uint8_t *)buffer->data() + info->mSample->range_length(),
+ memcpy((uint8_t *)buffer->data() + mbuf->range_length(),
&numPageSamples,
sizeof(numPageSamples));
uint32_t type;
const void *data;
size_t size, size2;
- if (info->mSample->meta_data()->findData(kKeyEncryptedSizes, &type, &data, &size)) {
+ if (mbuf->meta_data()->findData(kKeyEncryptedSizes, &type, &data, &size)) {
// Signal numPageSamples (a plain int32_t) is appended at the end,
// i.e. sizeof(numPageSamples) plain bytes + 0 encrypted bytes
if (SIZE_MAX - size < sizeof(int32_t)) {
@@ -511,9 +597,9 @@
int32_t zero = 0;
memcpy(adata, data, size);
memcpy(adata + size, &zero, sizeof(zero));
- info->mSample->meta_data()->setData(kKeyEncryptedSizes, type, adata, newSize);
+ mbuf->meta_data()->setData(kKeyEncryptedSizes, type, adata, newSize);
- if (info->mSample->meta_data()->findData(kKeyPlainSizes, &type, &data, &size2)) {
+ if (mbuf->meta_data()->findData(kKeyPlainSizes, &type, &data, &size2)) {
if (size2 != size) {
return ERROR_MALFORMED;
}
@@ -526,7 +612,7 @@
// append sizeof(numPageSamples) to plain sizes.
int32_t int32Size = sizeof(numPageSamples);
memcpy(adata + size, &int32Size, sizeof(int32Size));
- info->mSample->meta_data()->setData(kKeyPlainSizes, type, adata, newSize);
+ mbuf->meta_data()->setData(kKeyPlainSizes, type, adata, newSize);
}
return OK;
@@ -535,7 +621,7 @@
status_t NuMediaExtractor::readSampleData(const sp<ABuffer> &buffer) {
Mutex::Autolock autoLock(mLock);
- ssize_t minIndex = fetchTrackSamples();
+ ssize_t minIndex = fetchAllTrackSamples();
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
@@ -543,7 +629,8 @@
TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
- size_t sampleSize = info->mSample->range_length();
+ auto it = info->mSamples.begin();
+ size_t sampleSize = it->mBuffer->range_length();
if (info->mTrackFlags & kIsVorbis) {
// Each sample's data is suffixed by the number of page samples
@@ -556,14 +643,14 @@
}
const uint8_t *src =
- (const uint8_t *)info->mSample->data()
- + info->mSample->range_offset();
+ (const uint8_t *)it->mBuffer->data()
+ + it->mBuffer->range_offset();
- memcpy((uint8_t *)buffer->data(), src, info->mSample->range_length());
+ memcpy((uint8_t *)buffer->data(), src, it->mBuffer->range_length());
status_t err = OK;
if (info->mTrackFlags & kIsVorbis) {
- err = appendVorbisNumPageSamples(info, buffer);
+ err = appendVorbisNumPageSamples(it->mBuffer, buffer);
}
if (err == OK) {
@@ -576,7 +663,7 @@
status_t NuMediaExtractor::getSampleTrackIndex(size_t *trackIndex) {
Mutex::Autolock autoLock(mLock);
- ssize_t minIndex = fetchTrackSamples();
+ ssize_t minIndex = fetchAllTrackSamples();
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
@@ -591,14 +678,14 @@
status_t NuMediaExtractor::getSampleTime(int64_t *sampleTimeUs) {
Mutex::Autolock autoLock(mLock);
- ssize_t minIndex = fetchTrackSamples();
+ ssize_t minIndex = fetchAllTrackSamples();
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
}
TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
- *sampleTimeUs = info->mSampleTimeUs;
+ *sampleTimeUs = info->mSamples.begin()->mSampleTimeUs;
return OK;
}
@@ -608,14 +695,14 @@
*sampleMeta = NULL;
- ssize_t minIndex = fetchTrackSamples();
+ ssize_t minIndex = fetchAllTrackSamples();
if (minIndex < 0) {
return ERROR_END_OF_STREAM;
}
TrackInfo *info = &mSelectedTracks.editItemAt(minIndex);
- *sampleMeta = info->mSample->meta_data();
+ *sampleMeta = info->mSamples.begin()->mBuffer->meta_data();
return OK;
}
@@ -626,7 +713,7 @@
}
bool NuMediaExtractor::getTotalBitrate(int64_t *bitrate) const {
- if (mTotalBitrate >= 0) {
+ if (mTotalBitrate > 0) {
*bitrate = mTotalBitrate;
return true;
}
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 9babd1a..dfaa8b6 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -20,39 +20,24 @@
#include <inttypes.h>
#include <utils/Log.h>
-#include <gui/Surface.h>
+#include "include/FrameDecoder.h"
#include "include/StagefrightMetadataRetriever.h"
-#include <media/ICrypto.h>
#include <media/IMediaHTTPService.h>
-#include <media/MediaCodecBuffer.h>
-
-#include <media/DataSource.h>
-#include <media/MediaExtractor.h>
-#include <media/MediaSource.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/foundation/avc_utils.h>
-#include <media/stagefright/ColorConverter.h>
#include <media/stagefright/DataSourceFactory.h>
#include <media/stagefright/FileSource.h>
-#include <media/stagefright/MediaBuffer.h>
-#include <media/stagefright/MediaCodec.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/MediaExtractorFactory.h>
#include <media/stagefright/MetaData.h>
-#include <media/stagefright/Utils.h>
-
#include <media/CharacterEncodingDetector.h>
namespace android {
-static const int64_t kBufferTimeOutUs = 30000ll; // 30 msec
-static const size_t kRetryCount = 20; // must be >0
-
StagefrightMetadataRetriever::StagefrightMetadataRetriever()
: mParsedMetaData(false),
mAlbumArt(NULL) {
@@ -145,470 +130,123 @@
return OK;
}
-static VideoFrame *allocVideoFrame(
- const sp<MetaData> &trackMeta, int32_t width, int32_t height, int32_t bpp, bool metaOnly) {
- int32_t rotationAngle;
- if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
- rotationAngle = 0; // By default, no rotation
- }
+VideoFrame* StagefrightMetadataRetriever::getImageAtIndex(
+ int index, int colorFormat, bool metaOnly) {
- uint32_t type;
- const void *iccData;
- size_t iccSize;
- if (!trackMeta->findData(kKeyIccProfile, &type, &iccData, &iccSize)){
- iccData = NULL;
- iccSize = 0;
- }
+ ALOGV("getImageAtIndex: index: %d colorFormat: %d, metaOnly: %d",
+ index, colorFormat, metaOnly);
- int32_t sarWidth, sarHeight;
- int32_t displayWidth, displayHeight;
- if (trackMeta->findInt32(kKeySARWidth, &sarWidth)
- && trackMeta->findInt32(kKeySARHeight, &sarHeight)
- && sarHeight != 0) {
- displayWidth = (width * sarWidth) / sarHeight;
- displayHeight = height;
- } else if (trackMeta->findInt32(kKeyDisplayWidth, &displayWidth)
- && trackMeta->findInt32(kKeyDisplayHeight, &displayHeight)
- && displayWidth > 0 && displayHeight > 0
- && width > 0 && height > 0) {
- ALOGV("found display size %dx%d", displayWidth, displayHeight);
- } else {
- displayWidth = width;
- displayHeight = height;
- }
-
- return new VideoFrame(width, height, displayWidth, displayHeight,
- rotationAngle, bpp, !metaOnly, iccData, iccSize);
-}
-
-static bool getDstColorFormat(android_pixel_format_t colorFormat,
- OMX_COLOR_FORMATTYPE *omxColorFormat, int32_t *bpp) {
- switch (colorFormat) {
- case HAL_PIXEL_FORMAT_RGB_565:
- {
- *omxColorFormat = OMX_COLOR_Format16bitRGB565;
- *bpp = 2;
- return true;
- }
- case HAL_PIXEL_FORMAT_RGBA_8888:
- {
- *omxColorFormat = OMX_COLOR_Format32BitRGBA8888;
- *bpp = 4;
- return true;
- }
- case HAL_PIXEL_FORMAT_BGRA_8888:
- {
- *omxColorFormat = OMX_COLOR_Format32bitBGRA8888;
- *bpp = 4;
- return true;
- }
- default:
- {
- ALOGE("Unsupported color format: %d", colorFormat);
- break;
- }
- }
- return false;
-}
-
-static VideoFrame *extractVideoFrame(
- const AString &componentName,
- const sp<MetaData> &trackMeta,
- const sp<IMediaSource> &source,
- int64_t frameTimeUs,
- int seekMode,
- int colorFormat,
- bool metaOnly) {
- sp<MetaData> format = source->getFormat();
-
- MediaSource::ReadOptions::SeekMode mode =
- static_cast<MediaSource::ReadOptions::SeekMode>(seekMode);
- if (seekMode < MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC ||
- seekMode > MediaSource::ReadOptions::SEEK_CLOSEST) {
- ALOGE("Unknown seek mode: %d", seekMode);
+ if (mExtractor.get() == NULL) {
+ ALOGE("no extractor.");
return NULL;
}
- int32_t dstBpp;
- OMX_COLOR_FORMATTYPE dstFormat;
- if (!getDstColorFormat(
- (android_pixel_format_t)colorFormat, &dstFormat, &dstBpp)) {
- return NULL;
- }
+ size_t n = mExtractor->countTracks();
+ size_t i;
+ int imageCount = 0;
- if (metaOnly) {
- int32_t width, height;
- CHECK(trackMeta->findInt32(kKeyWidth, &width));
- CHECK(trackMeta->findInt32(kKeyHeight, &height));
- return allocVideoFrame(trackMeta, width, height, dstBpp, true);
- }
+ for (i = 0; i < n; ++i) {
+ sp<MetaData> meta = mExtractor->getTrackMetaData(i);
+ ALOGV("getting track %zu of %zu, meta=%s", i, n, meta->toString().c_str());
- MediaSource::ReadOptions options;
- sp<MetaData> overrideMeta;
- if (frameTimeUs < 0) {
- uint32_t type;
- const void *data;
- size_t size;
- int64_t thumbNailTime;
- int32_t thumbnailWidth, thumbnailHeight;
+ const char *mime;
+ CHECK(meta->findCString(kKeyMIMEType, &mime));
- // if we have a stand-alone thumbnail, set up the override meta,
- // and set seekTo time to -1.
- if (trackMeta->findInt32(kKeyThumbnailWidth, &thumbnailWidth)
- && trackMeta->findInt32(kKeyThumbnailHeight, &thumbnailHeight)
- && trackMeta->findData(kKeyThumbnailHVCC, &type, &data, &size)){
- overrideMeta = new MetaData(*trackMeta);
- overrideMeta->remove(kKeyDisplayWidth);
- overrideMeta->remove(kKeyDisplayHeight);
- overrideMeta->setInt32(kKeyWidth, thumbnailWidth);
- overrideMeta->setInt32(kKeyHeight, thumbnailHeight);
- overrideMeta->setData(kKeyHVCC, type, data, size);
- thumbNailTime = -1ll;
- ALOGV("thumbnail: %dx%d", thumbnailWidth, thumbnailHeight);
- } else if (!trackMeta->findInt64(kKeyThumbnailTime, &thumbNailTime)
- || thumbNailTime < 0) {
- thumbNailTime = 0;
- }
-
- options.setSeekTo(thumbNailTime, mode);
- } else {
- options.setSeekTo(frameTimeUs, mode);
- }
-
- int32_t gridRows = 1, gridCols = 1;
- if (overrideMeta == NULL) {
- // check if we're dealing with a tiled heif
- int32_t gridWidth, gridHeight;
- if (trackMeta->findInt32(kKeyGridWidth, &gridWidth) && gridWidth > 0
- && trackMeta->findInt32(kKeyGridHeight, &gridHeight) && gridHeight > 0) {
- int32_t width, height, displayWidth, displayHeight;
- CHECK(trackMeta->findInt32(kKeyWidth, &width));
- CHECK(trackMeta->findInt32(kKeyHeight, &height));
- CHECK(trackMeta->findInt32(kKeyDisplayWidth, &displayWidth));
- CHECK(trackMeta->findInt32(kKeyDisplayHeight, &displayHeight));
-
- if (width >= displayWidth && height >= displayHeight
- && (width % gridWidth == 0) && (height % gridHeight == 0)) {
- ALOGV("grid config: %dx%d, display %dx%d, grid %dx%d",
- width, height, displayWidth, displayHeight, gridWidth, gridHeight);
-
- overrideMeta = new MetaData(*trackMeta);
- overrideMeta->remove(kKeyDisplayWidth);
- overrideMeta->remove(kKeyDisplayHeight);
- overrideMeta->setInt32(kKeyWidth, gridWidth);
- overrideMeta->setInt32(kKeyHeight, gridHeight);
- gridCols = width / gridWidth;
- gridRows = height / gridHeight;
- } else {
- ALOGE("Bad grid config: %dx%d, display %dx%d, grid %dx%d",
- width, height, displayWidth, displayHeight, gridWidth, gridHeight);
+ if (!strncasecmp(mime, "image/", 6)) {
+ int32_t isPrimary;
+ if ((index < 0 && meta->findInt32(kKeyIsPrimaryImage, &isPrimary) && isPrimary)
+ || (index == imageCount++)) {
+ break;
}
}
- if (overrideMeta == NULL) {
- overrideMeta = trackMeta;
- }
}
- int32_t numTiles = gridRows * gridCols;
- sp<AMessage> videoFormat;
- if (convertMetaDataToMessage(overrideMeta, &videoFormat) != OK) {
- ALOGE("b/23680780");
- ALOGW("Failed to convert meta data to message");
+ if (i == n) {
+ ALOGE("image track not found.");
return NULL;
}
- // TODO: Use Flexible color instead
- videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+ sp<MetaData> trackMeta = mExtractor->getTrackMetaData(i);
- // For the thumbnail extraction case, try to allocate single buffer in both
- // input and output ports, if seeking to a sync frame. NOTE: This request may
- // fail if component requires more than that for decoding.
- bool isSeekingClosest = (seekMode == MediaSource::ReadOptions::SEEK_CLOSEST);
- bool decodeSingleFrame = !isSeekingClosest && (numTiles == 1);
- if (decodeSingleFrame) {
- videoFormat->setInt32("android._num-input-buffers", 1);
- videoFormat->setInt32("android._num-output-buffers", 1);
- }
+ sp<IMediaSource> source = mExtractor->getTrack(i);
- status_t err;
- sp<ALooper> looper = new ALooper;
- looper->start();
- sp<MediaCodec> decoder = MediaCodec::CreateByComponentName(
- looper, componentName, &err);
-
- if (decoder.get() == NULL || err != OK) {
- ALOGW("Failed to instantiate decoder [%s]", componentName.c_str());
+ if (source.get() == NULL) {
+ ALOGE("unable to instantiate image track.");
return NULL;
}
- err = decoder->configure(videoFormat, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
- if (err != OK) {
- ALOGW("configure returned error %d (%s)", err, asString(err));
- decoder->release();
- return NULL;
- }
-
- err = decoder->start();
- if (err != OK) {
- ALOGW("start returned error %d (%s)", err, asString(err));
- decoder->release();
- return NULL;
- }
-
- err = source->start();
- if (err != OK) {
- ALOGW("source failed to start: %d (%s)", err, asString(err));
- decoder->release();
- return NULL;
- }
-
- Vector<sp<MediaCodecBuffer> > inputBuffers;
- err = decoder->getInputBuffers(&inputBuffers);
- if (err != OK) {
- ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
- decoder->release();
- source->stop();
- return NULL;
- }
-
- Vector<sp<MediaCodecBuffer> > outputBuffers;
- err = decoder->getOutputBuffers(&outputBuffers);
- if (err != OK) {
- ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
- decoder->release();
- source->stop();
- return NULL;
- }
-
- sp<AMessage> outputFormat = NULL;
- bool haveMoreInputs = true;
- size_t index, offset, size;
- int64_t timeUs;
- size_t retriesLeft = kRetryCount;
- bool done = false;
const char *mime;
- bool success = format->findCString(kKeyMIMEType, &mime);
- if (!success) {
- ALOGE("Could not find mime type");
- return NULL;
+ CHECK(trackMeta->findCString(kKeyMIMEType, &mime));
+ ALOGV("extracting from %s track", mime);
+ if (!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
+ mime = MEDIA_MIMETYPE_VIDEO_HEVC;
+ trackMeta = new MetaData(*trackMeta);
+ trackMeta->setCString(kKeyMIMEType, mime);
}
- bool isAvcOrHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)
- || !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
+ Vector<AString> matchingCodecs;
+ MediaCodecList::findMatchingCodecs(
+ mime,
+ false, /* encoder */
+ MediaCodecList::kPreferSoftwareCodecs,
+ &matchingCodecs);
- bool firstSample = true;
- int64_t targetTimeUs = -1ll;
+ for (size_t i = 0; i < matchingCodecs.size(); ++i) {
+ const AString &componentName = matchingCodecs[i];
+ ImageDecoder decoder(componentName, trackMeta, source);
+ VideoFrame* frame = decoder.extractFrame(
+ 0 /*frameTimeUs*/, 0 /*seekMode*/, colorFormat, metaOnly);
- VideoFrame *frame = NULL;
- int32_t tilesDecoded = 0;
-
- do {
- size_t inputIndex = -1;
- int64_t ptsUs = 0ll;
- uint32_t flags = 0;
- sp<MediaCodecBuffer> codecBuffer = NULL;
-
- while (haveMoreInputs) {
- err = decoder->dequeueInputBuffer(&inputIndex, kBufferTimeOutUs);
- if (err != OK) {
- ALOGW("Timed out waiting for input");
- if (retriesLeft) {
- err = OK;
- }
- break;
- }
- codecBuffer = inputBuffers[inputIndex];
-
- MediaBuffer *mediaBuffer = NULL;
-
- err = source->read(&mediaBuffer, &options);
- options.clearSeekTo();
- if (err != OK) {
- ALOGW("Input Error or EOS");
- haveMoreInputs = false;
- if (err == ERROR_END_OF_STREAM) {
- err = OK;
- }
- break;
- }
- if (firstSample && isSeekingClosest) {
- mediaBuffer->meta_data()->findInt64(kKeyTargetTime, &targetTimeUs);
- ALOGV("Seeking closest: targetTimeUs=%lld", (long long)targetTimeUs);
- }
- firstSample = false;
-
- if (mediaBuffer->range_length() > codecBuffer->capacity()) {
- ALOGE("buffer size (%zu) too large for codec input size (%zu)",
- mediaBuffer->range_length(), codecBuffer->capacity());
- haveMoreInputs = false;
- err = BAD_VALUE;
- } else {
- codecBuffer->setRange(0, mediaBuffer->range_length());
-
- CHECK(mediaBuffer->meta_data()->findInt64(kKeyTime, &ptsUs));
- memcpy(codecBuffer->data(),
- (const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
- mediaBuffer->range_length());
- }
-
- mediaBuffer->release();
- break;
+ if (frame != NULL) {
+ return frame;
}
-
- if (haveMoreInputs && inputIndex < inputBuffers.size()) {
- if (isAvcOrHevc && IsIDR(codecBuffer->data(), codecBuffer->size())
- && decodeSingleFrame) {
- // Only need to decode one IDR frame, unless we're seeking with CLOSEST
- // option, in which case we need to actually decode to targetTimeUs.
- haveMoreInputs = false;
- flags |= MediaCodec::BUFFER_FLAG_EOS;
- }
-
- ALOGV("QueueInput: size=%zu ts=%" PRId64 " us flags=%x",
- codecBuffer->size(), ptsUs, flags);
- err = decoder->queueInputBuffer(
- inputIndex,
- codecBuffer->offset(),
- codecBuffer->size(),
- ptsUs,
- flags);
-
- // we don't expect an output from codec config buffer
- if (flags & MediaCodec::BUFFER_FLAG_CODECCONFIG) {
- continue;
- }
- }
-
- while (err == OK) {
- // wait for a decoded buffer
- err = decoder->dequeueOutputBuffer(
- &index,
- &offset,
- &size,
- &timeUs,
- &flags,
- kBufferTimeOutUs);
-
- if (err == INFO_FORMAT_CHANGED) {
- ALOGV("Received format change");
- err = decoder->getOutputFormat(&outputFormat);
- } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
- ALOGV("Output buffers changed");
- err = decoder->getOutputBuffers(&outputBuffers);
- } else {
- if (err == -EAGAIN /* INFO_TRY_AGAIN_LATER */ && --retriesLeft > 0) {
- ALOGV("Timed-out waiting for output.. retries left = %zu", retriesLeft);
- err = OK;
- } else if (err == OK) {
- // If we're seeking with CLOSEST option and obtained a valid targetTimeUs
- // from the extractor, decode to the specified frame. Otherwise we're done.
- ALOGV("Received an output buffer, timeUs=%lld", (long long)timeUs);
- sp<MediaCodecBuffer> videoFrameBuffer = outputBuffers.itemAt(index);
-
- int32_t width, height;
- CHECK(outputFormat != NULL);
- CHECK(outputFormat->findInt32("width", &width));
- CHECK(outputFormat->findInt32("height", &height));
-
- int32_t crop_left, crop_top, crop_right, crop_bottom;
- if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
- crop_left = crop_top = 0;
- crop_right = width - 1;
- crop_bottom = height - 1;
- }
-
- if (frame == NULL) {
- frame = allocVideoFrame(
- trackMeta,
- (crop_right - crop_left + 1) * gridCols,
- (crop_bottom - crop_top + 1) * gridRows,
- dstBpp,
- false /*metaOnly*/);
- }
-
- int32_t srcFormat;
- CHECK(outputFormat->findInt32("color-format", &srcFormat));
-
- ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat);
-
- int32_t dstLeft, dstTop, dstRight, dstBottom;
- if (numTiles == 1) {
- dstLeft = crop_left;
- dstTop = crop_top;
- dstRight = crop_right;
- dstBottom = crop_bottom;
- } else {
- dstLeft = tilesDecoded % gridCols * width;
- dstTop = tilesDecoded / gridCols * height;
- dstRight = dstLeft + width - 1;
- dstBottom = dstTop + height - 1;
- }
-
- if (converter.isValid()) {
- err = converter.convert(
- (const uint8_t *)videoFrameBuffer->data(),
- width, height,
- crop_left, crop_top, crop_right, crop_bottom,
- frame->mData,
- frame->mWidth,
- frame->mHeight,
- dstLeft, dstTop, dstRight, dstBottom);
- } else {
- ALOGE("Unable to convert from format 0x%08x to 0x%08x",
- srcFormat, dstFormat);
-
- err = ERROR_UNSUPPORTED;
- }
-
- done = (targetTimeUs < 0ll) || (timeUs >= targetTimeUs);
- if (numTiles > 1) {
- tilesDecoded++;
- done &= (tilesDecoded >= numTiles);
- }
- err = decoder->releaseOutputBuffer(index);
- } else {
- ALOGW("Received error %d (%s) instead of output", err, asString(err));
- done = true;
- }
- break;
- }
- }
- } while (err == OK && !done);
-
- source->stop();
- decoder->release();
-
- if (err != OK) {
- ALOGE("failed to get video frame (err %d)", err);
- delete frame;
- frame = NULL;
+ ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName.c_str());
}
- return frame;
+ return NULL;
}
-VideoFrame *StagefrightMetadataRetriever::getFrameAtTime(
+VideoFrame* StagefrightMetadataRetriever::getFrameAtTime(
int64_t timeUs, int option, int colorFormat, bool metaOnly) {
-
ALOGV("getFrameAtTime: %" PRId64 " us option: %d colorFormat: %d, metaOnly: %d",
timeUs, option, colorFormat, metaOnly);
+ VideoFrame *frame;
+ status_t err = getFrameInternal(
+ timeUs, 1, option, colorFormat, metaOnly, &frame, NULL /*outFrames*/);
+ return (err == OK) ? frame : NULL;
+}
+
+status_t StagefrightMetadataRetriever::getFrameAtIndex(
+ std::vector<VideoFrame*>* frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly) {
+ ALOGV("getFrameAtIndex: frameIndex %d, numFrames %d, colorFormat: %d, metaOnly: %d",
+ frameIndex, numFrames, colorFormat, metaOnly);
+
+ return getFrameInternal(
+ frameIndex, numFrames, MediaSource::ReadOptions::SEEK_FRAME_INDEX,
+ colorFormat, metaOnly, NULL /*outFrame*/, frames);
+}
+
+status_t StagefrightMetadataRetriever::getFrameInternal(
+ int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
+ VideoFrame **outFrame, std::vector<VideoFrame*>* outFrames) {
if (mExtractor.get() == NULL) {
- ALOGV("no extractor.");
- return NULL;
+ ALOGE("no extractor.");
+ return NO_INIT;
}
sp<MetaData> fileMeta = mExtractor->getMetaData();
if (fileMeta == NULL) {
- ALOGV("extractor doesn't publish metadata, failed to initialize?");
- return NULL;
+ ALOGE("extractor doesn't publish metadata, failed to initialize?");
+ return NO_INIT;
}
int32_t drm = 0;
if (fileMeta->findInt32(kKeyIsDRM, &drm) && drm != 0) {
ALOGE("frame grab not allowed.");
- return NULL;
+ return ERROR_DRM_UNKNOWN;
}
size_t n = mExtractor->countTracks();
@@ -625,8 +263,8 @@
}
if (i == n) {
- ALOGV("no video track found.");
- return NULL;
+ ALOGE("no video track found.");
+ return INVALID_OPERATION;
}
sp<MetaData> trackMeta = mExtractor->getTrackMetaData(
@@ -636,7 +274,7 @@
if (source.get() == NULL) {
ALOGV("unable to instantiate video track.");
- return NULL;
+ return UNKNOWN_ERROR;
}
const void *data;
@@ -659,16 +297,25 @@
for (size_t i = 0; i < matchingCodecs.size(); ++i) {
const AString &componentName = matchingCodecs[i];
- VideoFrame *frame = extractVideoFrame(
- componentName, trackMeta, source, timeUs, option, colorFormat, metaOnly);
-
- if (frame != NULL) {
- return frame;
+ VideoFrameDecoder decoder(componentName, trackMeta, source);
+ if (outFrame != NULL) {
+ *outFrame = decoder.extractFrame(
+ timeUs, option, colorFormat, metaOnly);
+ if (*outFrame != NULL) {
+ return OK;
+ }
+ } else if (outFrames != NULL) {
+ status_t err = decoder.extractFrames(
+ timeUs, numFrames, option, colorFormat, outFrames);
+ if (err == OK) {
+ return OK;
+ }
}
- ALOGV("%s failed to extract thumbnail, trying next decoder.", componentName.c_str());
+ ALOGV("%s failed to extract frame, trying next decoder.", componentName.c_str());
}
- return NULL;
+ ALOGE("all codecs failed to extract frame.");
+ return UNKNOWN_ERROR;
}
MediaAlbumArt *StagefrightMetadataRetriever::extractAlbumArt() {
@@ -800,8 +447,14 @@
bool hasVideo = false;
int32_t videoWidth = -1;
int32_t videoHeight = -1;
+ int32_t videoFrameCount = 0;
int32_t audioBitrate = -1;
int32_t rotationAngle = -1;
+ int32_t imageCount = 0;
+ int32_t imagePrimary = 0;
+ int32_t imageWidth = -1;
+ int32_t imageHeight = -1;
+ int32_t imageRotation = -1;
// The overall duration is the duration of the longest track.
int64_t maxDurationUs = 0;
@@ -832,6 +485,20 @@
if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
rotationAngle = 0;
}
+ if (!trackMeta->findInt32(kKeyFrameCount, &videoFrameCount)) {
+ videoFrameCount = 0;
+ }
+ } else if (!strncasecmp("image/", mime, 6)) {
+ int32_t isPrimary;
+ if (trackMeta->findInt32(kKeyIsPrimaryImage, &isPrimary) && isPrimary) {
+ imagePrimary = imageCount;
+ CHECK(trackMeta->findInt32(kKeyWidth, &imageWidth));
+ CHECK(trackMeta->findInt32(kKeyHeight, &imageHeight));
+ if (!trackMeta->findInt32(kKeyRotation, &imageRotation)) {
+ imageRotation = 0;
+ }
+ }
+ imageCount++;
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_TEXT_3GPP)) {
const char *lang;
if (trackMeta->findCString(kKeyMediaLanguage, &lang)) {
@@ -870,6 +537,30 @@
sprintf(tmp, "%d", rotationAngle);
mMetaData.add(METADATA_KEY_VIDEO_ROTATION, String8(tmp));
+
+ if (videoFrameCount > 0) {
+ sprintf(tmp, "%d", videoFrameCount);
+ mMetaData.add(METADATA_KEY_VIDEO_FRAME_COUNT, String8(tmp));
+ }
+ }
+
+ if (imageCount > 0) {
+ mMetaData.add(METADATA_KEY_HAS_IMAGE, String8("yes"));
+
+ sprintf(tmp, "%d", imageCount);
+ mMetaData.add(METADATA_KEY_IMAGE_COUNT, String8(tmp));
+
+ sprintf(tmp, "%d", imagePrimary);
+ mMetaData.add(METADATA_KEY_IMAGE_PRIMARY, String8(tmp));
+
+ sprintf(tmp, "%d", imageWidth);
+ mMetaData.add(METADATA_KEY_IMAGE_WIDTH, String8(tmp));
+
+ sprintf(tmp, "%d", imageHeight);
+ mMetaData.add(METADATA_KEY_IMAGE_HEIGHT, String8(tmp));
+
+ sprintf(tmp, "%d", imageRotation);
+ mMetaData.add(METADATA_KEY_IMAGE_ROTATION, String8(tmp));
}
if (numTracks == 1 && hasAudio && audioBitrate >= 0) {
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index bd80e45..6e77f15 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -639,7 +639,8 @@
msg->setString("language", lang);
}
- if (!strncasecmp("video/", mime, 6)) {
+ if (!strncasecmp("video/", mime, 6) ||
+ !strncasecmp("image/", mime, 6)) {
int32_t width, height;
if (!meta->findInt32(kKeyWidth, &width)
|| !meta->findInt32(kKeyHeight, &height)) {
@@ -663,6 +664,19 @@
msg->setInt32("sar-height", sarHeight);
}
+ if (!strncasecmp("image/", mime, 6)) {
+ int32_t gridWidth, gridHeight, gridRows, gridCols;
+ if (meta->findInt32(kKeyGridWidth, &gridWidth)
+ && meta->findInt32(kKeyHeight, &gridHeight)
+ && meta->findInt32(kKeyGridRows, &gridRows)
+ && meta->findInt32(kKeyGridCols, &gridCols)) {
+ msg->setInt32("grid-width", gridWidth);
+ msg->setInt32("grid-height", gridHeight);
+ msg->setInt32("grid-rows", gridRows);
+ msg->setInt32("grid-cols", gridCols);
+ }
+ }
+
int32_t colorFormat;
if (meta->findInt32(kKeyColorFormat, &colorFormat)) {
msg->setInt32("color-format", colorFormat);
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index 0982006..cbb38fd 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -129,6 +129,12 @@
dstWidth, dstHeight,
dstCropLeft, dstCropTop, dstCropRight, dstCropBottom, mDstFormat);
+ if (!((src.mCropLeft & 1) == 0
+ && src.cropWidth() == dst.cropWidth()
+ && src.cropHeight() == dst.cropHeight())) {
+ return ERROR_UNSUPPORTED;
+ }
+
status_t err;
switch (mSrcFormat) {
@@ -172,12 +178,6 @@
uint8_t *kAdjustedClip = initClip();
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ dst.mCropTop * dst.mWidth + dst.mCropLeft;
@@ -232,12 +232,6 @@
status_t ColorConverter::convertYUV420PlanarUseLibYUV(
const BitmapParams &src, const BitmapParams &dst) {
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint8_t *dst_ptr = (uint8_t *)dst.mBits
+ dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
@@ -338,12 +332,6 @@
}
status_t ColorConverter::convertYUV420Planar(
const BitmapParams &src, const BitmapParams &dst) {
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint8_t *kAdjustedClip = initClip();
uint8_t *dst_ptr = (uint8_t *)dst.mBits
@@ -422,12 +410,6 @@
const BitmapParams &src, const BitmapParams &dst) {
uint8_t *kAdjustedClip = initClip();
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ dst.mCropTop * dst.mWidth + dst.mCropLeft;
@@ -496,12 +478,6 @@
uint8_t *kAdjustedClip = initClip();
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ dst.mCropTop * dst.mWidth + dst.mCropLeft;
@@ -568,12 +544,6 @@
const BitmapParams &src, const BitmapParams &dst) {
uint8_t *kAdjustedClip = initClip();
- if (!((src.mCropLeft & 1) == 0
- && src.cropWidth() == dst.cropWidth()
- && src.cropHeight() == dst.cropHeight())) {
- return ERROR_UNSUPPORTED;
- }
-
uint16_t *dst_ptr = (uint16_t *)dst.mBits
+ dst.mCropTop * dst.mWidth + dst.mCropLeft;
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index 7caebc6..1695c75 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -19,6 +19,7 @@
namespace android {
const char *MEDIA_MIMETYPE_IMAGE_JPEG = "image/jpeg";
+const char *MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC = "image/vnd.android.heic";
const char *MEDIA_MIMETYPE_VIDEO_VP8 = "video/x-vnd.on2.vp8";
const char *MEDIA_MIMETYPE_VIDEO_VP9 = "video/x-vnd.on2.vp9";
@@ -58,6 +59,7 @@
const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS = "video/mp2ts";
const char *MEDIA_MIMETYPE_CONTAINER_AVI = "video/avi";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS = "video/mp2p";
+const char *MEDIA_MIMETYPE_CONTAINER_HEIF = "image/heif";
const char *MEDIA_MIMETYPE_TEXT_3GPP = "text/3gpp-tt";
const char *MEDIA_MIMETYPE_TEXT_SUBRIP = "application/x-subrip";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index 7f17013..25be89f 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -21,6 +21,7 @@
namespace android {
extern const char *MEDIA_MIMETYPE_IMAGE_JPEG;
+extern const char *MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC;
extern const char *MEDIA_MIMETYPE_VIDEO_VP8;
extern const char *MEDIA_MIMETYPE_VIDEO_VP9;
@@ -60,6 +61,7 @@
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2TS;
extern const char *MEDIA_MIMETYPE_CONTAINER_AVI;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG2PS;
+extern const char *MEDIA_MIMETYPE_CONTAINER_HEIF;
extern const char *MEDIA_MIMETYPE_TEXT_3GPP;
extern const char *MEDIA_MIMETYPE_TEXT_SUBRIP;
diff --git a/media/libstagefright/id3/ID3.cpp b/media/libstagefright/id3/ID3.cpp
index 750f1de..61403be 100644
--- a/media/libstagefright/id3/ID3.cpp
+++ b/media/libstagefright/id3/ID3.cpp
@@ -392,7 +392,12 @@
--mSize;
--dataSize;
}
- mData[writeOffset++] = mData[readOffset++];
+ if (i + 1 < dataSize) {
+ // Only move data if there's actually something to move.
+ // This handles the special case of the data being only [0xff, 0x00]
+ // which should be converted to just 0xff if unsynchronization is on.
+ mData[writeOffset++] = mData[readOffset++];
+ }
}
// move the remaining data following this frame
if (readOffset <= oldSize) {
diff --git a/media/libstagefright/include/FrameDecoder.h b/media/libstagefright/include/FrameDecoder.h
new file mode 100644
index 0000000..d7c074c
--- /dev/null
+++ b/media/libstagefright/include/FrameDecoder.h
@@ -0,0 +1,176 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef FRAME_DECODER_H_
+#define FRAME_DECODER_H_
+
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/ABase.h>
+#include <media/MediaSource.h>
+#include <media/openmax/OMX_Video.h>
+#include <system/graphics-base.h>
+
+namespace android {
+
+struct AMessage;
+class MediaCodecBuffer;
+class VideoFrame;
+
+struct FrameDecoder {
+ FrameDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source) :
+ mComponentName(componentName),
+ mTrackMeta(trackMeta),
+ mSource(source),
+ mDstFormat(OMX_COLOR_Format16bitRGB565),
+ mDstBpp(2) {}
+
+ VideoFrame* extractFrame(
+ int64_t frameTimeUs,
+ int option,
+ int colorFormat,
+ bool metaOnly);
+
+ status_t extractFrames(
+ int64_t frameTimeUs,
+ size_t numFrames,
+ int option,
+ int colorFormat,
+ std::vector<VideoFrame*>* frames);
+
+protected:
+ virtual ~FrameDecoder() {}
+
+ virtual sp<AMessage> onGetFormatAndSeekOptions(
+ int64_t frameTimeUs,
+ size_t numFrames,
+ int seekMode,
+ MediaSource::ReadOptions *options) = 0;
+
+ virtual status_t onInputReceived(
+ const sp<MediaCodecBuffer> &codecBuffer,
+ const sp<MetaData> &sampleMeta,
+ bool firstSample,
+ uint32_t *flags) = 0;
+
+ virtual status_t onOutputReceived(
+ const sp<MediaCodecBuffer> &videoFrameBuffer,
+ const sp<AMessage> &outputFormat,
+ int64_t timeUs,
+ bool *done) = 0;
+
+ VideoFrame *allocVideoFrame(int32_t width, int32_t height, bool metaOnly);
+
+ sp<MetaData> trackMeta() const { return mTrackMeta; }
+ OMX_COLOR_FORMATTYPE dstFormat() const { return mDstFormat; }
+ int32_t dstBpp() const { return mDstBpp; }
+
+ void addFrame(VideoFrame *frame) {
+ mFrames.push_back(std::unique_ptr<VideoFrame>(frame));
+ }
+
+private:
+ AString mComponentName;
+ sp<MetaData> mTrackMeta;
+ sp<IMediaSource> mSource;
+ OMX_COLOR_FORMATTYPE mDstFormat;
+ int32_t mDstBpp;
+ std::vector<std::unique_ptr<VideoFrame> > mFrames;
+
+ bool setDstColorFormat(android_pixel_format_t colorFormat);
+ status_t extractInternal(int64_t frameTimeUs, size_t numFrames, int option);
+
+ DISALLOW_EVIL_CONSTRUCTORS(FrameDecoder);
+};
+
+struct VideoFrameDecoder : public FrameDecoder {
+ VideoFrameDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source) :
+ FrameDecoder(componentName, trackMeta, source),
+ mIsAvcOrHevc(false),
+ mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
+ mTargetTimeUs(-1ll),
+ mNumFrames(0),
+ mNumFramesDecoded(0) {}
+
+protected:
+ virtual sp<AMessage> onGetFormatAndSeekOptions(
+ int64_t frameTimeUs,
+ size_t numFrames,
+ int seekMode,
+ MediaSource::ReadOptions *options) override;
+
+ virtual status_t onInputReceived(
+ const sp<MediaCodecBuffer> &codecBuffer,
+ const sp<MetaData> &sampleMeta,
+ bool firstSample,
+ uint32_t *flags) override;
+
+ virtual status_t onOutputReceived(
+ const sp<MediaCodecBuffer> &videoFrameBuffer,
+ const sp<AMessage> &outputFormat,
+ int64_t timeUs,
+ bool *done) override;
+
+private:
+ bool mIsAvcOrHevc;
+ MediaSource::ReadOptions::SeekMode mSeekMode;
+ int64_t mTargetTimeUs;
+ size_t mNumFrames;
+ size_t mNumFramesDecoded;
+};
+
+struct ImageDecoder : public FrameDecoder {
+ ImageDecoder(
+ const AString &componentName,
+ const sp<MetaData> &trackMeta,
+ const sp<IMediaSource> &source) :
+ FrameDecoder(componentName, trackMeta, source),
+ mFrame(NULL), mGridRows(1), mGridCols(1), mTilesDecoded(0) {}
+
+protected:
+ virtual sp<AMessage> onGetFormatAndSeekOptions(
+ int64_t frameTimeUs,
+ size_t numFrames,
+ int seekMode,
+ MediaSource::ReadOptions *options) override;
+
+ virtual status_t onInputReceived(
+ const sp<MediaCodecBuffer> &codecBuffer __unused,
+ const sp<MetaData> &sampleMeta __unused,
+ bool firstSample __unused,
+ uint32_t *flags __unused) override { return OK; }
+
+ virtual status_t onOutputReceived(
+ const sp<MediaCodecBuffer> &videoFrameBuffer,
+ const sp<AMessage> &outputFormat,
+ int64_t timeUs,
+ bool *done) override;
+
+private:
+ VideoFrame *mFrame;
+ int32_t mGridRows;
+ int32_t mGridCols;
+ int32_t mTilesDecoded;
+};
+
+} // namespace android
+
+#endif // FRAME_DECODER_H_
diff --git a/media/libstagefright/include/StagefrightMetadataRetriever.h b/media/libstagefright/include/StagefrightMetadataRetriever.h
index 277eb3e..58442fe 100644
--- a/media/libstagefright/include/StagefrightMetadataRetriever.h
+++ b/media/libstagefright/include/StagefrightMetadataRetriever.h
@@ -40,7 +40,14 @@
virtual status_t setDataSource(int fd, int64_t offset, int64_t length);
virtual status_t setDataSource(const sp<DataSource>& source, const char *mime);
- virtual VideoFrame *getFrameAtTime(int64_t timeUs, int option, int colorFormat, bool metaOnly);
+ virtual VideoFrame* getFrameAtTime(
+ int64_t timeUs, int option, int colorFormat, bool metaOnly);
+ virtual VideoFrame* getImageAtIndex(
+ int index, int colorFormat, bool metaOnly);
+ virtual status_t getFrameAtIndex(
+ std::vector<VideoFrame*>* frames,
+ int frameIndex, int numFrames, int colorFormat, bool metaOnly);
+
virtual MediaAlbumArt *extractAlbumArt();
virtual const char *extractMetadata(int keyCode);
@@ -56,6 +63,10 @@
// Delete album art and clear metadata.
void clearMetadata();
+ status_t getFrameInternal(
+ int64_t timeUs, int numFrames, int option, int colorFormat, bool metaOnly,
+ VideoFrame **outFrame, std::vector<VideoFrame*>* outFrames);
+
StagefrightMetadataRetriever(const StagefrightMetadataRetriever &);
StagefrightMetadataRetriever &operator=(
diff --git a/media/libstagefright/include/media/stagefright/MetaData.h b/media/libstagefright/include/media/stagefright/MetaData.h
index 6cfde9c..3438c56 100644
--- a/media/libstagefright/include/media/stagefright/MetaData.h
+++ b/media/libstagefright/include/media/stagefright/MetaData.h
@@ -215,7 +215,11 @@
kKeyGridWidth = 'grdW', // int32_t, HEIF grid width
kKeyGridHeight = 'grdH', // int32_t, HEIF grid height
+ kKeyGridRows = 'grdR', // int32_t, HEIF grid rows
+ kKeyGridCols = 'grdC', // int32_t, HEIF grid columns
kKeyIccProfile = 'prof', // raw data, ICC prifile data
+ kKeyIsPrimaryImage = 'prim', // bool (int32_t), image track is the primary image
+ kKeyFrameCount = 'nfrm', // int32_t, total number of frame in video track
};
enum {
diff --git a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
index 2902682..5af0745 100644
--- a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
@@ -17,6 +17,8 @@
#ifndef NU_MEDIA_EXTRACTOR_H_
#define NU_MEDIA_EXTRACTOR_H_
+#include <list>
+#include <media/mediaplayer.h>
#include <media/stagefright/foundation/ABase.h>
#include <media/IMediaExtractor.h>
#include <media/MediaSource.h>
@@ -67,7 +69,9 @@
status_t getFileFormat(sp<AMessage> *format) const;
- status_t selectTrack(size_t index);
+ status_t selectTrack(size_t index, int64_t startTimeUs = -1ll,
+ MediaSource::ReadOptions::SeekMode mode =
+ MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
status_t unselectTrack(size_t index);
status_t seekTo(
@@ -75,8 +79,12 @@
MediaSource::ReadOptions::SeekMode mode =
MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
+ // Each selected track has a read pointer.
+ // advance() advances the read pointer with the lowest timestamp.
status_t advance();
+ // readSampleData() reads the sample with the lowest timestamp.
status_t readSampleData(const sp<ABuffer> &buffer);
+
status_t getSampleTrackIndex(size_t *trackIndex);
status_t getSampleTime(int64_t *sampleTimeUs);
status_t getSampleMeta(sp<MetaData> *sampleMeta);
@@ -96,12 +104,20 @@
kMaxTrackCount = 16384,
};
+ struct Sample {
+ Sample();
+ Sample(MediaBuffer *buffer, int64_t timeUs);
+ MediaBuffer *mBuffer;
+ int64_t mSampleTimeUs;
+ };
+
struct TrackInfo {
sp<IMediaSource> mSource;
size_t mTrackIndex;
+ media_track_type mTrackType;
+ size_t mMaxFetchCount;
status_t mFinalResult;
- MediaBuffer *mSample;
- int64_t mSampleTimeUs;
+ std::list<Sample> mSamples;
uint32_t mTrackFlags; // bitmask of "TrackFlags"
};
@@ -117,16 +133,23 @@
int64_t mTotalBitrate; // in bits/sec
int64_t mDurationUs;
- ssize_t fetchTrackSamples(
+ ssize_t fetchAllTrackSamples(
+ int64_t seekTimeUs = -1ll,
+ MediaSource::ReadOptions::SeekMode mode =
+ MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
+ void fetchTrackSamples(
+ TrackInfo *info,
int64_t seekTimeUs = -1ll,
MediaSource::ReadOptions::SeekMode mode =
MediaSource::ReadOptions::SEEK_CLOSEST_SYNC);
- void releaseTrackSamples();
+ void releaseOneSample(TrackInfo *info);
+ void releaseTrackSamples(TrackInfo *info);
+ void releaseAllTrackSamples();
bool getTotalBitrate(int64_t *bitRate) const;
status_t updateDurationAndBitrate();
- status_t appendVorbisNumPageSamples(TrackInfo *info, const sp<ABuffer> &buffer);
+ status_t appendVorbisNumPageSamples(MediaBuffer *mbuf, const sp<ABuffer> &buffer);
DISALLOW_EVIL_CONSTRUCTORS(NuMediaExtractor);
};
diff --git a/media/mtp/Android.bp b/media/mtp/Android.bp
index 543ad5c..acea373 100644
--- a/media/mtp/Android.bp
+++ b/media/mtp/Android.bp
@@ -19,6 +19,7 @@
srcs: [
"MtpDataPacket.cpp",
"MtpDebug.cpp",
+ "MtpDescriptors.cpp",
"MtpDevHandle.cpp",
"MtpDevice.cpp",
"MtpDeviceInfo.cpp",
diff --git a/media/mtp/MtpDescriptors.cpp b/media/mtp/MtpDescriptors.cpp
new file mode 100644
index 0000000..d9b6060
--- /dev/null
+++ b/media/mtp/MtpDescriptors.cpp
@@ -0,0 +1,260 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MtpDescriptors.h"
+
+namespace android {
+
+const struct usb_interface_descriptor mtp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_STILL_IMAGE,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 1,
+ .iInterface = 1,
+};
+
+const struct usb_interface_descriptor ptp_interface_desc = {
+ .bLength = USB_DT_INTERFACE_SIZE,
+ .bDescriptorType = USB_DT_INTERFACE,
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 3,
+ .bInterfaceClass = USB_CLASS_STILL_IMAGE,
+ .bInterfaceSubClass = 1,
+ .bInterfaceProtocol = 1,
+};
+
+const struct usb_endpoint_descriptor_no_audio fs_sink = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 1 | USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = MAX_PACKET_SIZE_FS,
+};
+
+const struct usb_endpoint_descriptor_no_audio fs_source = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 2 | USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = MAX_PACKET_SIZE_FS,
+};
+
+const struct usb_endpoint_descriptor_no_audio intr = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 3 | USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = MAX_PACKET_SIZE_EV,
+ .bInterval = 6,
+};
+
+const struct usb_endpoint_descriptor_no_audio hs_sink = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 1 | USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = MAX_PACKET_SIZE_HS,
+};
+
+const struct usb_endpoint_descriptor_no_audio hs_source = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 2 | USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = MAX_PACKET_SIZE_HS,
+};
+
+const struct usb_endpoint_descriptor_no_audio ss_sink = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 1 | USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = MAX_PACKET_SIZE_SS,
+};
+
+const struct usb_endpoint_descriptor_no_audio ss_source = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = 2 | USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = MAX_PACKET_SIZE_SS,
+};
+
+const struct usb_ss_ep_comp_descriptor ss_sink_comp = {
+ .bLength = sizeof(ss_sink_comp),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 6,
+};
+
+const struct usb_ss_ep_comp_descriptor ss_source_comp = {
+ .bLength = sizeof(ss_source_comp),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ .bMaxBurst = 6,
+};
+
+const struct usb_ss_ep_comp_descriptor ss_intr_comp = {
+ .bLength = sizeof(ss_intr_comp),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+};
+
+const struct func_desc mtp_fs_descriptors = {
+ .intf = mtp_interface_desc,
+ .sink = fs_sink,
+ .source = fs_source,
+ .intr = intr,
+};
+
+const struct func_desc mtp_hs_descriptors = {
+ .intf = mtp_interface_desc,
+ .sink = hs_sink,
+ .source = hs_source,
+ .intr = intr,
+};
+
+const struct ss_func_desc mtp_ss_descriptors = {
+ .intf = mtp_interface_desc,
+ .sink = ss_sink,
+ .sink_comp = ss_sink_comp,
+ .source = ss_source,
+ .source_comp = ss_source_comp,
+ .intr = intr,
+ .intr_comp = ss_intr_comp,
+};
+
+const struct func_desc ptp_fs_descriptors = {
+ .intf = ptp_interface_desc,
+ .sink = fs_sink,
+ .source = fs_source,
+ .intr = intr,
+};
+
+const struct func_desc ptp_hs_descriptors = {
+ .intf = ptp_interface_desc,
+ .sink = hs_sink,
+ .source = hs_source,
+ .intr = intr,
+};
+
+const struct ss_func_desc ptp_ss_descriptors = {
+ .intf = ptp_interface_desc,
+ .sink = ss_sink,
+ .sink_comp = ss_sink_comp,
+ .source = ss_source,
+ .source_comp = ss_source_comp,
+ .intr = intr,
+ .intr_comp = ss_intr_comp,
+};
+
+const struct functionfs_strings mtp_strings = {
+ .header = {
+ .magic = htole32(FUNCTIONFS_STRINGS_MAGIC),
+ .length = htole32(sizeof(mtp_strings)),
+ .str_count = htole32(1),
+ .lang_count = htole32(1),
+ },
+ .lang0 = {
+ .code = htole16(0x0409),
+ .str1 = STR_INTERFACE,
+ },
+};
+
+const struct usb_os_desc_header mtp_os_desc_header = {
+ .interface = htole32(1),
+ .dwLength = htole32(sizeof(usb_os_desc_header) + sizeof(usb_ext_compat_desc)),
+ .bcdVersion = htole16(1),
+ .wIndex = htole16(4),
+ .bCount = htole16(1),
+ .Reserved = htole16(0),
+};
+
+const struct usb_ext_compat_desc mtp_os_desc_compat = {
+ .bFirstInterfaceNumber = 0,
+ .Reserved1 = htole32(1),
+ .CompatibleID = { 'M', 'T', 'P' },
+ .SubCompatibleID = {0},
+ .Reserved2 = {0},
+};
+
+const struct usb_ext_compat_desc ptp_os_desc_compat = {
+ .bFirstInterfaceNumber = 0,
+ .Reserved1 = htole32(1),
+ .CompatibleID = { 'P', 'T', 'P' },
+ .SubCompatibleID = {0},
+ .Reserved2 = {0},
+};
+
+const struct desc_v2 mtp_desc_v2 = {
+ .header = {
+ .magic = htole32(FUNCTIONFS_DESCRIPTORS_MAGIC_V2),
+ .length = htole32(sizeof(struct desc_v2)),
+ .flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC |
+ FUNCTIONFS_HAS_SS_DESC | FUNCTIONFS_HAS_MS_OS_DESC,
+ },
+ .fs_count = 4,
+ .hs_count = 4,
+ .ss_count = 7,
+ .os_count = 1,
+ .fs_descs = mtp_fs_descriptors,
+ .hs_descs = mtp_hs_descriptors,
+ .ss_descs = mtp_ss_descriptors,
+ .os_header = mtp_os_desc_header,
+ .os_desc = mtp_os_desc_compat,
+};
+
+const struct desc_v2 ptp_desc_v2 = {
+ .header = {
+ .magic = htole32(FUNCTIONFS_DESCRIPTORS_MAGIC_V2),
+ .length = htole32(sizeof(struct desc_v2)),
+ .flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC |
+ FUNCTIONFS_HAS_SS_DESC | FUNCTIONFS_HAS_MS_OS_DESC,
+ },
+ .fs_count = 4,
+ .hs_count = 4,
+ .ss_count = 7,
+ .os_count = 1,
+ .fs_descs = ptp_fs_descriptors,
+ .hs_descs = ptp_hs_descriptors,
+ .ss_descs = ptp_ss_descriptors,
+ .os_header = mtp_os_desc_header,
+ .os_desc = ptp_os_desc_compat,
+};
+
+const struct desc_v1 mtp_desc_v1 = {
+ .header = {
+ .magic = htole32(FUNCTIONFS_DESCRIPTORS_MAGIC),
+ .length = htole32(sizeof(struct desc_v1)),
+ .fs_count = 4,
+ .hs_count = 4,
+ },
+ .fs_descs = mtp_fs_descriptors,
+ .hs_descs = mtp_hs_descriptors,
+};
+
+const struct desc_v1 ptp_desc_v1 = {
+ .header = {
+ .magic = htole32(FUNCTIONFS_DESCRIPTORS_MAGIC),
+ .length = htole32(sizeof(struct desc_v1)),
+ .fs_count = 4,
+ .hs_count = 4,
+ },
+ .fs_descs = ptp_fs_descriptors,
+ .hs_descs = ptp_hs_descriptors,
+};
+
+}; // namespace android
diff --git a/media/mtp/MtpDescriptors.h b/media/mtp/MtpDescriptors.h
new file mode 100644
index 0000000..cfc3930
--- /dev/null
+++ b/media/mtp/MtpDescriptors.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MTP_DESCRIPTORS_H
+#define MTP_DESCRIPTORS_H
+
+#include <linux/usb/ch9.h>
+#include <linux/usb/functionfs.h>
+#include <sys/endian.h>
+
+namespace android {
+
+constexpr int MAX_PACKET_SIZE_FS = 64;
+constexpr int MAX_PACKET_SIZE_HS = 512;
+constexpr int MAX_PACKET_SIZE_SS = 1024;
+constexpr int MAX_PACKET_SIZE_EV = 28;
+
+struct func_desc {
+ struct usb_interface_descriptor intf;
+ struct usb_endpoint_descriptor_no_audio sink;
+ struct usb_endpoint_descriptor_no_audio source;
+ struct usb_endpoint_descriptor_no_audio intr;
+} __attribute__((packed));
+
+struct ss_func_desc {
+ struct usb_interface_descriptor intf;
+ struct usb_endpoint_descriptor_no_audio sink;
+ struct usb_ss_ep_comp_descriptor sink_comp;
+ struct usb_endpoint_descriptor_no_audio source;
+ struct usb_ss_ep_comp_descriptor source_comp;
+ struct usb_endpoint_descriptor_no_audio intr;
+ struct usb_ss_ep_comp_descriptor intr_comp;
+} __attribute__((packed));
+
+struct desc_v1 {
+ struct usb_functionfs_descs_head_v1 {
+ __le32 magic;
+ __le32 length;
+ __le32 fs_count;
+ __le32 hs_count;
+ } __attribute__((packed)) header;
+ struct func_desc fs_descs, hs_descs;
+} __attribute__((packed));
+
+struct desc_v2 {
+ struct usb_functionfs_descs_head_v2 header;
+ // The rest of the structure depends on the flags in the header.
+ __le32 fs_count;
+ __le32 hs_count;
+ __le32 ss_count;
+ __le32 os_count;
+ struct func_desc fs_descs, hs_descs;
+ struct ss_func_desc ss_descs;
+ struct usb_os_desc_header os_header;
+ struct usb_ext_compat_desc os_desc;
+} __attribute__((packed));
+
+// OS descriptor contents should not be changed. See b/64790536.
+static_assert(sizeof(struct desc_v2) == sizeof(usb_functionfs_descs_head_v2) +
+ 16 + 2 * sizeof(struct func_desc) + sizeof(struct ss_func_desc) +
+ sizeof(usb_os_desc_header) + sizeof(usb_ext_compat_desc),
+ "Size of mtp descriptor is incorrect!");
+
+#define STR_INTERFACE "MTP"
+struct functionfs_lang {
+ __le16 code;
+ char str1[sizeof(STR_INTERFACE)];
+} __attribute__((packed));
+
+struct functionfs_strings {
+ struct usb_functionfs_strings_head header;
+ struct functionfs_lang lang0;
+} __attribute__((packed));
+
+extern const struct desc_v2 mtp_desc_v2;
+extern const struct desc_v2 ptp_desc_v2;
+extern const struct desc_v1 mtp_desc_v1;
+extern const struct desc_v1 ptp_desc_v1;
+extern const struct functionfs_strings mtp_strings;
+
+}; // namespace android
+
+#endif // MTP_DESCRIPTORS_H
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index 965985d..cb9827f 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -20,13 +20,10 @@
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/functionfs.h>
#include <memory>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
-#include <sys/endian.h>
#include <sys/eventfd.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
@@ -36,6 +33,7 @@
#include <unistd.h>
#include "PosixAsyncIO.h"
+#include "MtpDescriptors.h"
#include "MtpFfsHandle.h"
#include "mtp.h"
@@ -45,11 +43,6 @@
constexpr char FFS_MTP_EP_OUT[] = "/dev/usb-ffs/mtp/ep2";
constexpr char FFS_MTP_EP_INTR[] = "/dev/usb-ffs/mtp/ep3";
-constexpr int MAX_PACKET_SIZE_FS = 64;
-constexpr int MAX_PACKET_SIZE_HS = 512;
-constexpr int MAX_PACKET_SIZE_SS = 1024;
-constexpr int MAX_PACKET_SIZE_EV = 28;
-
constexpr unsigned AIO_BUFS_MAX = 128;
constexpr unsigned AIO_BUF_LEN = 16384;
@@ -61,234 +54,6 @@
struct timespec ZERO_TIMEOUT = { 0, 0 };
-struct func_desc {
- struct usb_interface_descriptor intf;
- struct usb_endpoint_descriptor_no_audio sink;
- struct usb_endpoint_descriptor_no_audio source;
- struct usb_endpoint_descriptor_no_audio intr;
-} __attribute__((packed));
-
-struct ss_func_desc {
- struct usb_interface_descriptor intf;
- struct usb_endpoint_descriptor_no_audio sink;
- struct usb_ss_ep_comp_descriptor sink_comp;
- struct usb_endpoint_descriptor_no_audio source;
- struct usb_ss_ep_comp_descriptor source_comp;
- struct usb_endpoint_descriptor_no_audio intr;
- struct usb_ss_ep_comp_descriptor intr_comp;
-} __attribute__((packed));
-
-struct desc_v1 {
- struct usb_functionfs_descs_head_v1 {
- __le32 magic;
- __le32 length;
- __le32 fs_count;
- __le32 hs_count;
- } __attribute__((packed)) header;
- struct func_desc fs_descs, hs_descs;
-} __attribute__((packed));
-
-struct desc_v2 {
- struct usb_functionfs_descs_head_v2 header;
- // The rest of the structure depends on the flags in the header.
- __le32 fs_count;
- __le32 hs_count;
- __le32 ss_count;
- __le32 os_count;
- struct func_desc fs_descs, hs_descs;
- struct ss_func_desc ss_descs;
- struct usb_os_desc_header os_header;
- struct usb_ext_compat_desc os_desc;
-} __attribute__((packed));
-
-const struct usb_interface_descriptor mtp_interface_desc = {
- .bLength = USB_DT_INTERFACE_SIZE,
- .bDescriptorType = USB_DT_INTERFACE,
- .bInterfaceNumber = 0,
- .bNumEndpoints = 3,
- .bInterfaceClass = USB_CLASS_STILL_IMAGE,
- .bInterfaceSubClass = 1,
- .bInterfaceProtocol = 1,
- .iInterface = 1,
-};
-
-const struct usb_interface_descriptor ptp_interface_desc = {
- .bLength = USB_DT_INTERFACE_SIZE,
- .bDescriptorType = USB_DT_INTERFACE,
- .bInterfaceNumber = 0,
- .bNumEndpoints = 3,
- .bInterfaceClass = USB_CLASS_STILL_IMAGE,
- .bInterfaceSubClass = 1,
- .bInterfaceProtocol = 1,
-};
-
-const struct usb_endpoint_descriptor_no_audio fs_sink = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = 1 | USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = MAX_PACKET_SIZE_FS,
-};
-
-const struct usb_endpoint_descriptor_no_audio fs_source = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = 2 | USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = MAX_PACKET_SIZE_FS,
-};
-
-const struct usb_endpoint_descriptor_no_audio intr = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = 3 | USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_INT,
- .wMaxPacketSize = MAX_PACKET_SIZE_EV,
- .bInterval = 6,
-};
-
-const struct usb_endpoint_descriptor_no_audio hs_sink = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = 1 | USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = MAX_PACKET_SIZE_HS,
-};
-
-const struct usb_endpoint_descriptor_no_audio hs_source = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = 2 | USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = MAX_PACKET_SIZE_HS,
-};
-
-const struct usb_endpoint_descriptor_no_audio ss_sink = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = 1 | USB_DIR_IN,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = MAX_PACKET_SIZE_SS,
-};
-
-const struct usb_endpoint_descriptor_no_audio ss_source = {
- .bLength = USB_DT_ENDPOINT_SIZE,
- .bDescriptorType = USB_DT_ENDPOINT,
- .bEndpointAddress = 2 | USB_DIR_OUT,
- .bmAttributes = USB_ENDPOINT_XFER_BULK,
- .wMaxPacketSize = MAX_PACKET_SIZE_SS,
-};
-
-const struct usb_ss_ep_comp_descriptor ss_sink_comp = {
- .bLength = sizeof(ss_sink_comp),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .bMaxBurst = 6,
-};
-
-const struct usb_ss_ep_comp_descriptor ss_source_comp = {
- .bLength = sizeof(ss_source_comp),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
- .bMaxBurst = 6,
-};
-
-const struct usb_ss_ep_comp_descriptor ss_intr_comp = {
- .bLength = sizeof(ss_intr_comp),
- .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
-};
-
-const struct func_desc mtp_fs_descriptors = {
- .intf = mtp_interface_desc,
- .sink = fs_sink,
- .source = fs_source,
- .intr = intr,
-};
-
-const struct func_desc mtp_hs_descriptors = {
- .intf = mtp_interface_desc,
- .sink = hs_sink,
- .source = hs_source,
- .intr = intr,
-};
-
-const struct ss_func_desc mtp_ss_descriptors = {
- .intf = mtp_interface_desc,
- .sink = ss_sink,
- .sink_comp = ss_sink_comp,
- .source = ss_source,
- .source_comp = ss_source_comp,
- .intr = intr,
- .intr_comp = ss_intr_comp,
-};
-
-const struct func_desc ptp_fs_descriptors = {
- .intf = ptp_interface_desc,
- .sink = fs_sink,
- .source = fs_source,
- .intr = intr,
-};
-
-const struct func_desc ptp_hs_descriptors = {
- .intf = ptp_interface_desc,
- .sink = hs_sink,
- .source = hs_source,
- .intr = intr,
-};
-
-const struct ss_func_desc ptp_ss_descriptors = {
- .intf = ptp_interface_desc,
- .sink = ss_sink,
- .sink_comp = ss_sink_comp,
- .source = ss_source,
- .source_comp = ss_source_comp,
- .intr = intr,
- .intr_comp = ss_intr_comp,
-};
-
-#define STR_INTERFACE "MTP"
-const struct {
- struct usb_functionfs_strings_head header;
- struct {
- __le16 code;
- const char str1[sizeof(STR_INTERFACE)];
- } __attribute__((packed)) lang0;
-} __attribute__((packed)) strings = {
- .header = {
- .magic = htole32(FUNCTIONFS_STRINGS_MAGIC),
- .length = htole32(sizeof(strings)),
- .str_count = htole32(1),
- .lang_count = htole32(1),
- },
- .lang0 = {
- .code = htole16(0x0409),
- .str1 = STR_INTERFACE,
- },
-};
-
-struct usb_os_desc_header mtp_os_desc_header = {
- .interface = htole32(1),
- .dwLength = htole32(sizeof(usb_os_desc_header) + sizeof(usb_ext_compat_desc)),
- .bcdVersion = htole16(1),
- .wIndex = htole16(4),
- .bCount = htole16(1),
- .Reserved = htole16(0),
-};
-
-struct usb_ext_compat_desc mtp_os_desc_compat = {
- .bFirstInterfaceNumber = 0,
- .Reserved1 = htole32(1),
- .CompatibleID = { 'M', 'T', 'P' },
- .SubCompatibleID = {0},
- .Reserved2 = {0},
-};
-
-struct usb_ext_compat_desc ptp_os_desc_compat = {
- .bFirstInterfaceNumber = 0,
- .Reserved1 = htole32(1),
- .CompatibleID = { 'P', 'T', 'P' },
- .SubCompatibleID = {0},
- .Reserved2 = {0},
-};
-
struct mtp_device_status {
uint16_t wLength;
uint16_t wCode;
@@ -357,58 +122,38 @@
}
bool MtpFfsHandle::initFunctionfs() {
- ssize_t ret;
- struct desc_v1 v1_descriptor;
- struct desc_v2 v2_descriptor;
-
- v2_descriptor.header.magic = htole32(FUNCTIONFS_DESCRIPTORS_MAGIC_V2);
- v2_descriptor.header.length = htole32(sizeof(v2_descriptor));
- v2_descriptor.header.flags = FUNCTIONFS_HAS_FS_DESC | FUNCTIONFS_HAS_HS_DESC |
- FUNCTIONFS_HAS_SS_DESC | FUNCTIONFS_HAS_MS_OS_DESC;
- v2_descriptor.fs_count = 4;
- v2_descriptor.hs_count = 4;
- v2_descriptor.ss_count = 7;
- v2_descriptor.os_count = 1;
- v2_descriptor.fs_descs = mPtp ? ptp_fs_descriptors : mtp_fs_descriptors;
- v2_descriptor.hs_descs = mPtp ? ptp_hs_descriptors : mtp_hs_descriptors;
- v2_descriptor.ss_descs = mPtp ? ptp_ss_descriptors : mtp_ss_descriptors;
- v2_descriptor.os_header = mtp_os_desc_header;
- v2_descriptor.os_desc = mPtp ? ptp_os_desc_compat : mtp_os_desc_compat;
-
if (mControl < 0) { // might have already done this before
mControl.reset(TEMP_FAILURE_RETRY(open(FFS_MTP_EP0, O_RDWR)));
if (mControl < 0) {
PLOG(ERROR) << FFS_MTP_EP0 << ": cannot open control endpoint";
- goto err;
+ return false;
}
-
- ret = TEMP_FAILURE_RETRY(::write(mControl, &v2_descriptor, sizeof(v2_descriptor)));
- if (ret < 0) {
- v1_descriptor.header.magic = htole32(FUNCTIONFS_DESCRIPTORS_MAGIC);
- v1_descriptor.header.length = htole32(sizeof(v1_descriptor));
- v1_descriptor.header.fs_count = 4;
- v1_descriptor.header.hs_count = 4;
- v1_descriptor.fs_descs = mPtp ? ptp_fs_descriptors : mtp_fs_descriptors;
- v1_descriptor.hs_descs = mPtp ? ptp_hs_descriptors : mtp_hs_descriptors;
- PLOG(ERROR) << FFS_MTP_EP0 << "Switching to V1 descriptor format";
- ret = TEMP_FAILURE_RETRY(::write(mControl, &v1_descriptor, sizeof(v1_descriptor)));
- if (ret < 0) {
- PLOG(ERROR) << FFS_MTP_EP0 << "Writing descriptors failed";
- goto err;
- }
- }
- ret = TEMP_FAILURE_RETRY(::write(mControl, &strings, sizeof(strings)));
- if (ret < 0) {
- PLOG(ERROR) << FFS_MTP_EP0 << "Writing strings failed";
- goto err;
+ if (!writeDescriptors()) {
+ closeConfig();
+ return false;
}
}
-
return true;
+}
-err:
- closeConfig();
- return false;
+bool MtpFfsHandle::writeDescriptors() {
+ ssize_t ret = TEMP_FAILURE_RETRY(::write(mControl,
+ &(mPtp ? ptp_desc_v2 : mtp_desc_v2), sizeof(desc_v2)));
+ if (ret < 0) {
+ PLOG(ERROR) << FFS_MTP_EP0 << "Switching to V1 descriptor format";
+ ret = TEMP_FAILURE_RETRY(::write(mControl,
+ &(mPtp ? ptp_desc_v1 : mtp_desc_v1), sizeof(desc_v1)));
+ if (ret < 0) {
+ PLOG(ERROR) << FFS_MTP_EP0 << "Writing descriptors failed";
+ return false;
+ }
+ }
+ ret = TEMP_FAILURE_RETRY(::write(mControl, &mtp_strings, sizeof(mtp_strings)));
+ if (ret < 0) {
+ PLOG(ERROR) << FFS_MTP_EP0 << "Writing strings failed";
+ return false;
+ }
+ return true;
}
void MtpFfsHandle::closeConfig() {
diff --git a/media/mtp/MtpFfsHandle.h b/media/mtp/MtpFfsHandle.h
index 2f90bd1..2347000 100644
--- a/media/mtp/MtpFfsHandle.h
+++ b/media/mtp/MtpFfsHandle.h
@@ -42,11 +42,14 @@
};
template <class T> class MtpFfsHandleTest;
+template <class T> class MtpFfsHandleTest_testControl_Test;
class MtpFfsHandle : public IMtpHandle {
- template <class T> friend class android::MtpFfsHandleTest;
+ template <class T> friend class MtpFfsHandleTest;
+ template <class T> friend class MtpFfsHandleTest_testControl_Test;
protected:
bool initFunctionfs();
+ bool writeDescriptors();
void closeConfig();
void closeEndpoints();
void advise(int fd);
diff --git a/media/mtp/OWNERS b/media/mtp/OWNERS
new file mode 100644
index 0000000..219307b
--- /dev/null
+++ b/media/mtp/OWNERS
@@ -0,0 +1 @@
+zhangjerry@google.com
diff --git a/media/mtp/tests/MtpFfsHandle_test.cpp b/media/mtp/tests/MtpFfsHandle_test.cpp
index 8d7301d..9c916b7 100644
--- a/media/mtp/tests/MtpFfsHandle_test.cpp
+++ b/media/mtp/tests/MtpFfsHandle_test.cpp
@@ -25,6 +25,7 @@
#include <unistd.h>
#include <utils/Log.h>
+#include "MtpDescriptors.h"
#include "MtpFfsHandle.h"
#include "MtpFfsCompatHandle.h"
@@ -66,8 +67,8 @@
handle = std::make_unique<T>();
EXPECT_EQ(pipe(fd), 0);
- handle->mControl.reset(fd[0]);
- control.reset(fd[1]);
+ control.reset(fd[0]);
+ handle->mControl.reset(fd[1]);
EXPECT_EQ(pipe(fd), 0);
EXPECT_EQ(fcntl(fd[0], F_SETPIPE_SZ, 1048576), 1048576);
@@ -83,7 +84,7 @@
intr.reset(fd[0]);
handle->mIntr.reset(fd[1]);
- handle->start();
+ EXPECT_EQ(handle->start(), 0);
}
~MtpFfsHandleTest() {
@@ -94,6 +95,16 @@
typedef ::testing::Types<MtpFfsHandle, MtpFfsCompatHandle> mtpHandles;
TYPED_TEST_CASE(MtpFfsHandleTest, mtpHandles);
+TYPED_TEST(MtpFfsHandleTest, testControl) {
+ EXPECT_TRUE(this->handle->writeDescriptors());
+ struct desc_v2 desc;
+ struct functionfs_strings strings;
+ EXPECT_EQ(read(this->control, &desc, sizeof(desc)), (long)sizeof(desc));
+ EXPECT_EQ(read(this->control, &strings, sizeof(strings)), (long)sizeof(strings));
+ EXPECT_TRUE(std::memcmp(&desc, &mtp_desc_v2, sizeof(desc)) == 0);
+ EXPECT_TRUE(std::memcmp(&strings, &mtp_strings, sizeof(strings)) == 0);
+}
+
TYPED_TEST(MtpFfsHandleTest, testRead) {
EXPECT_EQ(write(this->bulk_out, dummyDataStr.c_str(), TEST_PACKET_SIZE), TEST_PACKET_SIZE);
char buf[TEST_PACKET_SIZE + 1];
diff --git a/services/mediaanalytics/MediaAnalyticsService.cpp b/services/mediaanalytics/MediaAnalyticsService.cpp
index c7f9270..f08be50 100644
--- a/services/mediaanalytics/MediaAnalyticsService.cpp
+++ b/services/mediaanalytics/MediaAnalyticsService.cpp
@@ -299,6 +299,8 @@
bool finalizing = item->getFinalized();
+ Mutex::Autolock _l(mLock);
+
// if finalizing, we'll remove it
MediaAnalyticsItem *oitem = findItem(mOpen, item, finalizing | forcenew);
if (oitem != NULL) {
@@ -609,10 +611,9 @@
// XXX: rewrite this to manage persistence, etc.
// insert appropriately into queue
+// caller should hold mLock
void MediaAnalyticsService::saveItem(List<MediaAnalyticsItem *> *l, MediaAnalyticsItem * item, int front) {
- Mutex::Autolock _l(mLock);
-
// adding at back of queue (fifo order)
if (front) {
l->push_front(item);
@@ -682,6 +683,7 @@
}
// find the incomplete record that this will overlay
+// caller should hold mLock
MediaAnalyticsItem *MediaAnalyticsService::findItem(List<MediaAnalyticsItem*> *theList, MediaAnalyticsItem *nitem, bool removeit) {
if (nitem == NULL) {
return NULL;
@@ -689,8 +691,6 @@
MediaAnalyticsItem *item = NULL;
- Mutex::Autolock _l(mLock);
-
for (List<MediaAnalyticsItem *>::iterator it = theList->begin();
it != theList->end(); it++) {
MediaAnalyticsItem *tmp = (*it);
@@ -711,10 +711,9 @@
// delete the indicated record
+// caller should hold mLock
void MediaAnalyticsService::deleteItem(List<MediaAnalyticsItem *> *l, MediaAnalyticsItem *item) {
- Mutex::Autolock _l(mLock);
-
for (List<MediaAnalyticsItem *>::iterator it = l->begin();
it != l->end(); it++) {
if ((*it)->getSessionID() != item->getSessionID())
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
index 438a332..dd71ed7 100644
--- a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
@@ -19,6 +19,7 @@
setpriority: 1
sigaltstack: 1
clone: 1
+sched_setscheduler: 1
lseek: 1
newfstatat: 1
faccessat: 1
diff --git a/services/medialog/Android.bp b/services/medialog/Android.bp
new file mode 100644
index 0000000..29e6dfc
--- /dev/null
+++ b/services/medialog/Android.bp
@@ -0,0 +1,22 @@
+cc_library_shared {
+ name: "libmedialogservice",
+
+ srcs: [
+ "IMediaLogService.cpp",
+ "MediaLogService.cpp",
+ ],
+
+ shared_libs: [
+ "libaudioutils",
+ "libbinder",
+ "liblog",
+ "libnbaio",
+ "libnblog",
+ "libutils",
+ ],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ ],
+}
diff --git a/services/medialog/Android.mk b/services/medialog/Android.mk
deleted file mode 100644
index 4f2630e..0000000
--- a/services/medialog/Android.mk
+++ /dev/null
@@ -1,17 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := MediaLogService.cpp IMediaLogService.cpp
-
-LOCAL_SHARED_LIBRARIES := libbinder libutils liblog libnbaio libnblog libaudioutils
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_MODULE:= libmedialogservice
-
-LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
-
-LOCAL_CFLAGS := -Werror -Wall
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
index 952aa82..442653c 100644
--- a/services/oboeservice/AAudioMixer.cpp
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -49,10 +49,9 @@
memset(mOutputBuffer, 0, mBufferSizeInBytes);
}
-bool AAudioMixer::mix(int trackIndex, FifoBuffer *fifo, float volume) {
+bool AAudioMixer::mix(int streamIndex, FifoBuffer *fifo, bool allowUnderflow) {
WrappingBuffer wrappingBuffer;
float *destination = mOutputBuffer;
- fifo_frames_t framesLeft = mFramesPerBurst;
#if AAUDIO_MIXER_ATRACE_ENABLED
ATRACE_BEGIN("aaMix");
@@ -63,35 +62,44 @@
#if AAUDIO_MIXER_ATRACE_ENABLED
if (ATRACE_ENABLED()) {
char rdyText[] = "aaMixRdy#";
- char letter = 'A' + (trackIndex % 26);
+ char letter = 'A' + (streamIndex % 26);
rdyText[sizeof(rdyText) - 2] = letter;
ATRACE_INT(rdyText, fullFrames);
}
#else /* MIXER_ATRACE_ENABLED */
(void) trackIndex;
- (void) fullFrames;
#endif /* AAUDIO_MIXER_ATRACE_ENABLED */
+ // If allowUnderflow then always advance by one burst even if we do not have the data.
+ // Otherwise the stream timing will drift whenever there is an underflow.
+ // This actual underflow can then be detected by the client for XRun counting.
+ //
+ // Generally, allowUnderflow will be false when stopping a stream and we want to
+ // use up whatever data is in the queue.
+ fifo_frames_t framesDesired = mFramesPerBurst;
+ if (!allowUnderflow && fullFrames < framesDesired) {
+ framesDesired = fullFrames; // just use what is available then stop
+ }
+
// Mix data in one or two parts.
int partIndex = 0;
+ int32_t framesLeft = framesDesired;
while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
- fifo_frames_t framesToMix = framesLeft;
- fifo_frames_t framesAvailable = wrappingBuffer.numFrames[partIndex];
- if (framesAvailable > 0) {
- if (framesToMix > framesAvailable) {
- framesToMix = framesAvailable;
+ fifo_frames_t framesToMixFromPart = framesLeft;
+ fifo_frames_t framesAvailableFromPart = wrappingBuffer.numFrames[partIndex];
+ if (framesAvailableFromPart > 0) {
+ if (framesToMixFromPart > framesAvailableFromPart) {
+ framesToMixFromPart = framesAvailableFromPart;
}
- mixPart(destination, (float *)wrappingBuffer.data[partIndex], framesToMix, volume);
+ mixPart(destination, (float *)wrappingBuffer.data[partIndex],
+ framesToMixFromPart);
- destination += framesToMix * mSamplesPerFrame;
- framesLeft -= framesToMix;
+ destination += framesToMixFromPart * mSamplesPerFrame;
+ framesLeft -= framesToMixFromPart;
}
partIndex++;
}
- // Always advance by one burst even if we do not have the data.
- // Otherwise the stream timing will drift whenever there is an underflow.
- // This actual underflow can then be detected by the client for XRun counting.
- fifo->getFifoControllerBase()->advanceReadIndex(mFramesPerBurst);
+ fifo->getFifoControllerBase()->advanceReadIndex(framesDesired);
#if AAUDIO_MIXER_ATRACE_ENABLED
ATRACE_END();
@@ -100,11 +108,11 @@
return (framesLeft > 0); // did not get all the frames we needed, ie. "underflow"
}
-void AAudioMixer::mixPart(float *destination, float *source, int32_t numFrames, float volume) {
+void AAudioMixer::mixPart(float *destination, float *source, int32_t numFrames) {
int32_t numSamples = numFrames * mSamplesPerFrame;
// TODO maybe optimize using SIMD
for (int sampleIndex = 0; sampleIndex < numSamples; sampleIndex++) {
- *destination++ += *source++ * volume;
+ *destination++ += *source++;
}
}
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
index a8090bc..5625d4d 100644
--- a/services/oboeservice/AAudioMixer.h
+++ b/services/oboeservice/AAudioMixer.h
@@ -33,13 +33,14 @@
/**
* Mix from this FIFO
- * @param fifo
- * @param volume
- * @return true if underflowed
+ * @param streamIndex for marking stream variables in systrace
+ * @param fifo to read from
+ * @param allowUnderflow if true then allow mixer to advance read index past the write index
+ * @return true if actually underflowed
*/
- bool mix(int trackIndex, android::FifoBuffer *fifo, float volume);
+ bool mix(int streamIndex, android::FifoBuffer *fifo, bool allowUnderflow);
- void mixPart(float *destination, float *source, int32_t numFrames, float volume);
+ void mixPart(float *destination, float *source, int32_t numFrames);
float *getOutputBuffer();
@@ -50,5 +51,4 @@
int32_t mBufferSizeInBytes = 0;
};
-
#endif //AAUDIO_AAUDIO_MIXER_H
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 4be25c8..7e6e247 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -258,6 +258,13 @@
aaudio_result_t AAudioServiceEndpointMMAP::stopStream(sp<AAudioServiceStreamBase> stream,
audio_port_handle_t clientHandle) {
mFramesTransferred.reset32();
+
+ // Round 64-bit counter up to a multiple of the buffer capacity.
+ // This is required because the 64-bit counter is used as an index
+ // into a circular buffer and the actual HW position is reset to zero
+ // when the stream is stopped.
+ mFramesTransferred.roundUp64(getBufferCapacity());
+
return stopClient(mPortHandle);
}
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index 9b1833a..472a336 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -82,9 +82,13 @@
std::lock_guard <std::mutex> lock(mLockStreams);
for (const auto clientStream : mRegisteredStreams) {
int64_t clientFramesRead = 0;
+ bool allowUnderflow = true;
- if (!clientStream->isRunning()) {
- continue;
+ aaudio_stream_state_t state = clientStream->getState();
+ if (state == AAUDIO_STREAM_STATE_STOPPING) {
+ allowUnderflow = false; // just read what is already in the FIFO
+ } else if (state != AAUDIO_STREAM_STATE_STARTED) {
+ continue; // this stream is not running so skip it.
}
sp<AAudioServiceStreamShared> streamShared =
@@ -104,8 +108,7 @@
int64_t positionOffset = mmapFramesWritten - clientFramesRead;
streamShared->setTimestampPositionOffset(positionOffset);
- float volume = 1.0; // to match legacy volume
- bool underflowed = mMixer.mix(index, fifo, volume);
+ bool underflowed = mMixer.mix(index, fifo, allowUnderflow);
if (underflowed) {
streamShared->incrementXRunCount();
}
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index e670129..df1a021 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -202,19 +202,23 @@
ALOGE("AAudioServiceStreamShared::pause() missing endpoint");
return AAUDIO_ERROR_INVALID_STATE;
}
+
+ // Send it now because the timestamp gets rounded up when stopStream() is called below.
+ // Also we don't need the timestamps while we are shutting down.
+ sendCurrentTimestamp();
+
+ result = stopTimestampThread();
+ if (result != AAUDIO_OK) {
+ disconnect();
+ return result;
+ }
+
result = mServiceEndpoint->stopStream(this, mClientHandle);
if (result != AAUDIO_OK) {
ALOGE("AAudioServiceStreamShared::pause() mServiceEndpoint returned %d", result);
disconnect(); // TODO should we return or pause Base first?
}
- sendCurrentTimestamp();
- mThreadEnabled.store(false);
- result = mTimestampThread.stop();
- if (result != AAUDIO_OK) {
- disconnect();
- return result;
- }
sendServiceEvent(AAUDIO_SERVICE_EVENT_PAUSED);
setState(AAUDIO_STREAM_STATE_PAUSED);
return result;
@@ -231,6 +235,10 @@
return AAUDIO_ERROR_INVALID_STATE;
}
+ setState(AAUDIO_STREAM_STATE_STOPPING);
+
+ // Send it now because the timestamp gets rounded up when stopStream() is called below.
+ // Also we don't need the timestamps while we are shutting down.
sendCurrentTimestamp(); // warning - this calls a virtual function
result = stopTimestampThread();
if (result != AAUDIO_OK) {
diff --git a/services/soundtrigger/SoundTriggerHwService.cpp b/services/soundtrigger/SoundTriggerHwService.cpp
index 22519a3..46f388b 100644
--- a/services/soundtrigger/SoundTriggerHwService.cpp
+++ b/services/soundtrigger/SoundTriggerHwService.cpp
@@ -206,9 +206,10 @@
service->sendRecognitionEvent(event, module);
}
-sp<IMemory> SoundTriggerHwService::prepareRecognitionEvent_l(
+sp<IMemory> SoundTriggerHwService::prepareRecognitionEvent(
struct sound_trigger_recognition_event *event)
{
+ AutoMutex lock(mMemoryDealerLock);
sp<IMemory> eventMemory;
//sanitize event
@@ -216,21 +217,21 @@
case SOUND_MODEL_TYPE_KEYPHRASE:
ALOGW_IF(event->data_size != 0 && event->data_offset !=
sizeof(struct sound_trigger_phrase_recognition_event),
- "prepareRecognitionEvent_l(): invalid data offset %u for keyphrase event type",
+ "prepareRecognitionEvent(): invalid data offset %u for keyphrase event type",
event->data_offset);
event->data_offset = sizeof(struct sound_trigger_phrase_recognition_event);
break;
case SOUND_MODEL_TYPE_GENERIC:
ALOGW_IF(event->data_size != 0 && event->data_offset !=
sizeof(struct sound_trigger_generic_recognition_event),
- "prepareRecognitionEvent_l(): invalid data offset %u for generic event type",
+ "prepareRecognitionEvent(): invalid data offset %u for generic event type",
event->data_offset);
event->data_offset = sizeof(struct sound_trigger_generic_recognition_event);
break;
case SOUND_MODEL_TYPE_UNKNOWN:
ALOGW_IF(event->data_size != 0 && event->data_offset !=
sizeof(struct sound_trigger_recognition_event),
- "prepareRecognitionEvent_l(): invalid data offset %u for unknown event type",
+ "prepareRecognitionEvent(): invalid data offset %u for unknown event type",
event->data_offset);
event->data_offset = sizeof(struct sound_trigger_recognition_event);
break;
@@ -251,30 +252,19 @@
void SoundTriggerHwService::sendRecognitionEvent(struct sound_trigger_recognition_event *event,
Module *module)
- {
- AutoMutex lock(mServiceLock);
- if (module == NULL) {
- return;
- }
- sp<IMemory> eventMemory = prepareRecognitionEvent_l(event);
- if (eventMemory == 0) {
- return;
- }
- sp<Module> strongModule;
- for (size_t i = 0; i < mModules.size(); i++) {
- if (mModules.valueAt(i).get() == module) {
- strongModule = mModules.valueAt(i);
- break;
- }
- }
- if (strongModule == 0) {
- return;
- }
+{
+ if (module == NULL) {
+ return;
+ }
+ sp<IMemory> eventMemory = prepareRecognitionEvent(event);
+ if (eventMemory == 0) {
+ return;
+ }
sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
eventMemory);
- callbackEvent->setModule(strongModule);
- sendCallbackEvent_l(callbackEvent);
+ callbackEvent->setModule(module);
+ sendCallbackEvent(callbackEvent);
}
// static
@@ -293,8 +283,9 @@
service->sendSoundModelEvent(event, module);
}
-sp<IMemory> SoundTriggerHwService::prepareSoundModelEvent_l(struct sound_trigger_model_event *event)
+sp<IMemory> SoundTriggerHwService::prepareSoundModelEvent(struct sound_trigger_model_event *event)
{
+ AutoMutex lock(mMemoryDealerLock);
sp<IMemory> eventMemory;
size_t size = event->data_offset + event->data_size;
@@ -311,30 +302,20 @@
void SoundTriggerHwService::sendSoundModelEvent(struct sound_trigger_model_event *event,
Module *module)
{
- AutoMutex lock(mServiceLock);
- sp<IMemory> eventMemory = prepareSoundModelEvent_l(event);
+ sp<IMemory> eventMemory = prepareSoundModelEvent(event);
if (eventMemory == 0) {
return;
}
- sp<Module> strongModule;
- for (size_t i = 0; i < mModules.size(); i++) {
- if (mModules.valueAt(i).get() == module) {
- strongModule = mModules.valueAt(i);
- break;
- }
- }
- if (strongModule == 0) {
- return;
- }
sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SOUNDMODEL,
eventMemory);
- callbackEvent->setModule(strongModule);
- sendCallbackEvent_l(callbackEvent);
+ callbackEvent->setModule(module);
+ sendCallbackEvent(callbackEvent);
}
sp<IMemory> SoundTriggerHwService::prepareServiceStateEvent_l(sound_trigger_service_state_t state)
{
+ AutoMutex lock(mMemoryDealerLock);
sp<IMemory> eventMemory;
size_t size = sizeof(sound_trigger_service_state_t);
@@ -368,7 +349,7 @@
sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
eventMemory);
callbackEvent->setModule(strongModule);
- sendCallbackEvent_l(callbackEvent);
+ sendCallbackEvent(callbackEvent);
}
void SoundTriggerHwService::sendServiceStateEvent_l(sound_trigger_service_state_t state,
@@ -381,11 +362,10 @@
sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_SERVICE_STATE,
eventMemory);
callbackEvent->setModuleClient(moduleClient);
- sendCallbackEvent_l(callbackEvent);
+ sendCallbackEvent(callbackEvent);
}
-// call with mServiceLock held
-void SoundTriggerHwService::sendCallbackEvent_l(const sp<CallbackEvent>& event)
+void SoundTriggerHwService::sendCallbackEvent(const sp<CallbackEvent>& event)
{
mCallbackThread->sendCallbackEvent(event);
}
@@ -404,6 +384,19 @@
if (moduleClient == 0) {
return;
}
+ } else {
+ // Sanity check on this being a Module we know about.
+ bool foundModule = false;
+ for (size_t i = 0; i < mModules.size(); i++) {
+ if (mModules.valueAt(i).get() == module.get()) {
+ foundModule = true;
+ break;
+ }
+ }
+ if (!foundModule) {
+ ALOGE("onCallbackEvent for unknown module");
+ return;
+ }
}
}
if (module != 0) {
@@ -878,7 +871,7 @@
event.common.type = model->mType;
event.common.model = model->mHandle;
event.common.data_size = 0;
- sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&event.common);
+ sp<IMemory> eventMemory = service->prepareRecognitionEvent(&event.common);
if (eventMemory != 0) {
events.add(eventMemory);
}
@@ -889,7 +882,7 @@
event.common.type = model->mType;
event.common.model = model->mHandle;
event.common.data_size = 0;
- sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&event.common);
+ sp<IMemory> eventMemory = service->prepareRecognitionEvent(&event.common);
if (eventMemory != 0) {
events.add(eventMemory);
}
@@ -900,7 +893,7 @@
event.common.type = model->mType;
event.common.model = model->mHandle;
event.common.data_size = 0;
- sp<IMemory> eventMemory = service->prepareRecognitionEvent_l(&event.common);
+ sp<IMemory> eventMemory = service->prepareRecognitionEvent(&event.common);
if (eventMemory != 0) {
events.add(eventMemory);
}
@@ -915,7 +908,7 @@
sp<CallbackEvent> callbackEvent = new CallbackEvent(CallbackEvent::TYPE_RECOGNITION,
events[i]);
callbackEvent->setModule(this);
- service->sendCallbackEvent_l(callbackEvent);
+ service->sendCallbackEvent(callbackEvent);
}
exit:
diff --git a/services/soundtrigger/SoundTriggerHwService.h b/services/soundtrigger/SoundTriggerHwService.h
index 95efc4b..a955f40 100644
--- a/services/soundtrigger/SoundTriggerHwService.h
+++ b/services/soundtrigger/SoundTriggerHwService.h
@@ -214,11 +214,11 @@
};
static void recognitionCallback(struct sound_trigger_recognition_event *event, void *cookie);
- sp<IMemory> prepareRecognitionEvent_l(struct sound_trigger_recognition_event *event);
+ sp<IMemory> prepareRecognitionEvent(struct sound_trigger_recognition_event *event);
void sendRecognitionEvent(struct sound_trigger_recognition_event *event, Module *module);
static void soundModelCallback(struct sound_trigger_model_event *event, void *cookie);
- sp<IMemory> prepareSoundModelEvent_l(struct sound_trigger_model_event *event);
+ sp<IMemory> prepareSoundModelEvent(struct sound_trigger_model_event *event);
void sendSoundModelEvent(struct sound_trigger_model_event *event, Module *module);
sp<IMemory> prepareServiceStateEvent_l(sound_trigger_service_state_t state);
@@ -226,7 +226,7 @@
void sendServiceStateEvent_l(sound_trigger_service_state_t state,
ModuleClient *moduleClient);
- void sendCallbackEvent_l(const sp<CallbackEvent>& event);
+ void sendCallbackEvent(const sp<CallbackEvent>& event);
void onCallbackEvent(const sp<CallbackEvent>& event);
private:
@@ -238,6 +238,7 @@
DefaultKeyedVector< sound_trigger_module_handle_t, sp<Module> > mModules;
sp<CallbackThread> mCallbackThread;
sp<MemoryDealer> mMemoryDealer;
+ Mutex mMemoryDealerLock;
bool mCaptureState;
};