Merge "Rename libcamera2.so -> libcamera2ndk.so"
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index bb1f7c5..297d11b 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -3289,6 +3289,81 @@
*/
ACAMERA_SCALER_CROPPING_TYPE = // byte (acamera_metadata_enum_android_scaler_cropping_type_t)
ACAMERA_SCALER_START + 13,
+ /**
+ * <p>Recommended stream configurations for common client use cases.</p>
+ *
+ * <p>Type: int32[n*5] (acamera_metadata_enum_android_scaler_available_recommended_stream_configurations_t)</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>Optional subset of the ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS that contains
+ * similar tuples listed as
+ * (i.e. width, height, format, output/input stream, usecase bit field).
+ * Camera devices will be able to suggest particular stream configurations which are
+ * power and performance efficient for specific use cases. For more information about
+ * retrieving the suggestions see
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#getRecommendedStreamConfigurationMap">CameraCharacteristics#getRecommendedStreamConfigurationMap</a>.</p>
+ * <p>The data representation is int[5], which maps to
+ * (width, height, format, output/input stream, usecase bit field). The array can be
+ * parsed using the following pseudo code:</p>
+ * <p>struct StreamConfiguration {
+ * int32_t format;
+ * int32_t width;
+ * int32_t height;
+ * int32_t isInput; };</p>
+ * <p>void getPreferredStreamConfigurations(
+ * int32_t *array, size_t count, int32_t usecaseId,
+ * Vector < StreamConfiguration > * scs) {
+ * const size_t STREAM_CONFIGURATION_SIZE = 5;
+ * const size_t STREAM_WIDTH_OFFSET = 0;
+ * const size_t STREAM_HEIGHT_OFFSET = 1;
+ * const size_t STREAM_FORMAT_OFFSET = 2;
+ * const size_t STREAM_IS_INPUT_OFFSET = 3;
+ * const size_t STREAM_USECASE_BITMAP_OFFSET = 4;</p>
+ * <pre><code>for (size_t i = 0; i < count; i+= STREAM_CONFIGURATION_SIZE) {
+ * int32_t width = array[i + STREAM_WIDTH_OFFSET];
+ * int32_t height = array[i + STREAM_HEIGHT_OFFSET];
+ * int32_t format = array[i + STREAM_FORMAT_OFFSET];
+ * int32_t isInput = array[i + STREAM_IS_INPUT_OFFSET];
+ * int32_t supportedUsecases = array[i + STREAM_USECASE_BITMAP_OFFSET];
+ * if (supportedUsecases & (1 << usecaseId)) {
+ * StreamConfiguration sc = {format, width, height, isInput};
+ * scs->add(sc);
+ * }
+ * }
+ * </code></pre>
+ * <p>}</p>
+ *
+ * @see ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS
+ */
+ ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS =
+ // int32[n*5] (acamera_metadata_enum_android_scaler_available_recommended_stream_configurations_t)
+ ACAMERA_SCALER_START + 14,
+ /**
+ * <p>Recommended mappings of image formats that are supported by this
+ * camera device for input streams, to their corresponding output formats.</p>
+ *
+ * <p>Type: int32</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>This is a recommended subset of the complete list of mappings found in
+ * android.scaler.availableInputOutputFormatsMap. The same requirements apply here as well.
+ * The list however doesn't need to contain all available and supported mappings. Instead of
+ * this developers must list only recommended and efficient entries.
+ * If set, the information will be available in the ZERO_SHUTTER_LAG recommended stream
+ * configuration see
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#getRecommendedStreamConfigurationMap">CameraCharacteristics#getRecommendedStreamConfigurationMap</a>.</p>
+ */
+ ACAMERA_SCALER_AVAILABLE_RECOMMENDED_INPUT_OUTPUT_FORMATS_MAP =
+ // int32
+ ACAMERA_SCALER_START + 15,
ACAMERA_SCALER_END,
/**
@@ -5412,6 +5487,32 @@
*/
ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE = // byte (acamera_metadata_enum_android_depth_depth_is_exclusive_t)
ACAMERA_DEPTH_START + 4,
+ /**
+ * <p>Recommended depth stream configurations for common client use cases.</p>
+ *
+ * <p>Type: int32[n*5]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>Optional subset of the ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS that
+ * contains similar tuples listed as
+ * (i.e. width, height, format, output/input stream, usecase bit field).
+ * Camera devices will be able to suggest particular depth stream configurations which are
+ * power and performance efficient for specific use cases. For more information about
+ * retrieving the suggestions see
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#getRecommendedStreamConfigurationMap">CameraCharacteristics#getRecommendedStreamConfigurationMap</a>.</p>
+ * <p>For data representation please refer to
+ * ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS</p>
+ *
+ * @see ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS
+ * @see ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS
+ */
+ ACAMERA_DEPTH_AVAILABLE_RECOMMENDED_DEPTH_STREAM_CONFIGURATIONS =
+ // int32[n*5]
+ ACAMERA_DEPTH_START + 5,
ACAMERA_DEPTH_END,
/**
@@ -7305,6 +7406,67 @@
} acamera_metadata_enum_android_scaler_cropping_type_t;
+// ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS
+typedef enum acamera_metadata_enum_acamera_scaler_available_recommended_stream_configurations {
+ /**
+ * <p>Preview must only include non-stalling processed stream configurations with
+ * output formats like YUV_420_888, IMPLEMENTATION_DEFINED, etc.</p>
+ */
+ ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_PREVIEW
+ = 0x0,
+
+ /**
+ * <p>Video record must include stream configurations that match the advertised
+ * supported media profiles <a href="https://developer.android.com/reference/android/media/CamcorderProfile.html">CamcorderProfile</a> with
+ * IMPLEMENTATION_DEFINED format.</p>
+ */
+ ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_RECORD
+ = 0x1,
+
+ /**
+ * <p>Video snapshot must include stream configurations at least as big as
+ * the maximum RECORD resolutions and only with format BLOB + DATASPACE_JFIF
+ * format/dataspace combination (JPEG). Additionally the configurations shouldn't cause
+ * preview glitches and also be able to run at 30 fps.</p>
+ */
+ ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_VIDEO_SNAPSHOT
+ = 0x2,
+
+ /**
+ * <p>Recommended snapshot stream configurations must include at least one with
+ * size close to ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE with BLOB + DATASPACE_JFIF
+ * format/dataspace combination (JPEG). Taking into account restrictions on aspect
+ * ratio, alignment etc. the area of the maximum suggested size shouldn’t be less than
+ * 97% of the sensor array size area.</p>
+ *
+ * @see ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE
+ */
+ ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_SNAPSHOT
+ = 0x3,
+
+ /**
+ * <p>If supported, recommended input stream configurations must only be advertised with
+ * ZSL along with other processed and/or stalling output formats.</p>
+ */
+ ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_ZSL = 0x4,
+
+ /**
+ * <p>If supported, recommended raw stream configurations must only include RAW based
+ * output formats.</p>
+ */
+ ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_RAW = 0x5,
+
+ ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_PUBLIC_END
+ = 0x6,
+
+ /**
+ * <p>Vendor defined use cases. These depend on the vendor implementation.</p>
+ */
+ ACAMERA_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_VENDOR_START
+ = 0x18,
+
+} acamera_metadata_enum_android_scaler_available_recommended_stream_configurations_t;
+
// ACAMERA_SENSOR_REFERENCE_ILLUMINANT1
typedef enum acamera_metadata_enum_acamera_sensor_reference_illuminant1 {
diff --git a/media/bufferpool/2.0/Accessor.cpp b/media/bufferpool/2.0/Accessor.cpp
index f264501..57b4609 100644
--- a/media/bufferpool/2.0/Accessor.cpp
+++ b/media/bufferpool/2.0/Accessor.cpp
@@ -113,6 +113,10 @@
return sConnectionDeathRecipient;
}
+void Accessor::createInvalidator() {
+ Accessor::Impl::createInvalidator();
+}
+
// Methods from ::android::hardware::media::bufferpool::V2_0::IAccessor follow.
Return<void> Accessor::connect(
const sp<::android::hardware::media::bufferpool::V2_0::IObserver>& observer,
diff --git a/media/bufferpool/2.0/Accessor.h b/media/bufferpool/2.0/Accessor.h
index 4b5b17a..8d02519 100644
--- a/media/bufferpool/2.0/Accessor.h
+++ b/media/bufferpool/2.0/Accessor.h
@@ -185,6 +185,8 @@
*/
static sp<ConnectionDeathRecipient> getConnectionDeathRecipient();
+ static void createInvalidator();
+
private:
class Impl;
std::shared_ptr<Impl> mImpl;
diff --git a/media/bufferpool/2.0/AccessorImpl.cpp b/media/bufferpool/2.0/AccessorImpl.cpp
index 4cc8abc..84fcca2 100644
--- a/media/bufferpool/2.0/AccessorImpl.cpp
+++ b/media/bufferpool/2.0/AccessorImpl.cpp
@@ -177,6 +177,7 @@
ResultStatus Accessor::Impl::close(ConnectionId connectionId) {
std::lock_guard<std::mutex> lock(mBufferPool.mMutex);
+ ALOGV("connection close %lld: %u", (long long)connectionId, mBufferPool.mInvalidation.mId);
mBufferPool.processStatusMessages();
mBufferPool.handleClose(connectionId);
mBufferPool.mObserver.close(connectionId);
@@ -277,7 +278,7 @@
return int(total ? 0.5 + 100. * static_cast<S>(base) / total : 0);
}
-std::atomic<std::uint32_t> Accessor::Impl::BufferPool::Invalidation::sSeqId(0);
+std::atomic<std::uint32_t> Accessor::Impl::BufferPool::Invalidation::sInvSeqId(0);
Accessor::Impl::Impl::BufferPool::~BufferPool() {
std::lock_guard<std::mutex> lock(mMutex);
@@ -316,8 +317,7 @@
BufferId bufferId,
BufferInvalidationChannel &channel) {
for (auto it = mPendings.begin(); it != mPendings.end();) {
- if (it->invalidate(bufferId)) {
- it = mPendings.erase(it);
+ if (it->isInvalidated(bufferId)) {
uint32_t msgId = 0;
if (it->mNeedsAck) {
msgId = ++mInvalidationId;
@@ -327,7 +327,8 @@
}
}
channel.postInvalidation(msgId, it->mFrom, it->mTo);
- sInvalidator.addAccessor(mId, it->mImpl);
+ sInvalidator->addAccessor(mId, it->mImpl);
+ it = mPendings.erase(it);
continue;
}
++it;
@@ -350,10 +351,12 @@
msgId = ++mInvalidationId;
}
}
+ ALOGV("bufferpool invalidation requested and queued");
channel.postInvalidation(msgId, from, to);
- sInvalidator.addAccessor(mId, impl);
+ sInvalidator->addAccessor(mId, impl);
} else {
// TODO: sending hint message?
+ ALOGV("bufferpool invalidation requested and pending");
Pending pending(needsAck, from, to, left, impl);
mPendings.push_back(pending);
}
@@ -364,10 +367,14 @@
std::set<int> deads;
for (auto it = mAcks.begin(); it != mAcks.end(); ++it) {
if (it->second != mInvalidationId) {
- const sp<IObserver> observer = mObservers[it->first].promote();
+ const sp<IObserver> observer = mObservers[it->first];
if (observer) {
- observer->onMessage(it->first, mInvalidationId);
+ ALOGV("connection %lld call observer (%u: %u)",
+ (long long)it->first, it->second, mInvalidationId);
+ Return<void> transResult = observer->onMessage(it->first, mInvalidationId);
+ (void) transResult;
} else {
+ ALOGV("bufferpool observer died %lld", (long long)it->first);
deads.insert(it->first);
}
}
@@ -379,7 +386,7 @@
}
}
// All invalidation Ids are synced.
- sInvalidator.delAccessor(mId);
+ sInvalidator->delAccessor(mId);
}
bool Accessor::Impl::BufferPool::handleOwnBuffer(
@@ -542,6 +549,7 @@
break;
case BufferStatus::INVALIDATION_ACK:
mInvalidation.onAck(message.connectionId, message.bufferId);
+ ret = true;
break;
}
if (ret == false) {
@@ -727,6 +735,7 @@
BufferId to = mSeq;
mStartSeq = mSeq;
// TODO: needsAck params
+ ALOGV("buffer invalidation request bp:%u %u %u", mInvalidation.mId, from, to);
if (from != to) {
invalidate(true, from, to, impl);
}
@@ -791,6 +800,7 @@
notify = true;
}
mAccessors.insert(std::make_pair(accessorId, impl));
+ ALOGV("buffer invalidation added bp:%u %d", accessorId, notify);
}
lock.unlock();
if (notify) {
@@ -801,12 +811,19 @@
void Accessor::Impl::AccessorInvalidator::delAccessor(uint32_t accessorId) {
std::lock_guard<std::mutex> lock(mMutex);
mAccessors.erase(accessorId);
+ ALOGV("buffer invalidation deleted bp:%u", accessorId);
if (mAccessors.size() == 0) {
mReady = false;
}
}
-Accessor::Impl::AccessorInvalidator Accessor::Impl::sInvalidator;
+std::unique_ptr<Accessor::Impl::AccessorInvalidator> Accessor::Impl::sInvalidator;
+
+void Accessor::Impl::createInvalidator() {
+ if (!sInvalidator) {
+ sInvalidator = std::make_unique<Accessor::Impl::AccessorInvalidator>();
+ }
+}
} // namespace implementation
} // namespace V2_0
diff --git a/media/bufferpool/2.0/AccessorImpl.h b/media/bufferpool/2.0/AccessorImpl.h
index 6b03494..b3faa96 100644
--- a/media/bufferpool/2.0/AccessorImpl.h
+++ b/media/bufferpool/2.0/AccessorImpl.h
@@ -34,7 +34,7 @@
/**
* An implementation of a buffer pool accessor(or a buffer pool implementation.) */
-class Accessor::Impl
+class Accessor::Impl
: public std::enable_shared_from_this<Accessor::Impl> {
public:
Impl(const std::shared_ptr<BufferPoolAllocator> &allocator);
@@ -69,6 +69,8 @@
void handleInvalidateAck();
+ static void createInvalidator();
+
private:
// ConnectionId = pid : (timestamp_created + seqId)
// in order to guarantee uniqueness for each connection
@@ -111,7 +113,7 @@
std::set<BufferId> mFreeBuffers;
struct Invalidation {
- static std::atomic<std::uint32_t> sSeqId;
+ static std::atomic<std::uint32_t> sInvSeqId;
struct Pending {
bool mNeedsAck;
@@ -128,18 +130,18 @@
mImpl(impl)
{}
- bool invalidate(uint32_t bufferId) {
+ bool isInvalidated(uint32_t bufferId) {
return isBufferInRange(mFrom, mTo, bufferId) && --mLeft == 0;
}
};
std::list<Pending> mPendings;
std::map<ConnectionId, uint32_t> mAcks;
- std::map<ConnectionId, const wp<IObserver>> mObservers;
+ std::map<ConnectionId, const sp<IObserver>> mObservers;
uint32_t mInvalidationId;
uint32_t mId;
- Invalidation() : mInvalidationId(0), mId(sSeqId.fetch_add(1)) {}
+ Invalidation() : mInvalidationId(0), mId(sInvSeqId.fetch_add(1)) {}
void onConnect(ConnectionId conId, const sp<IObserver> &observer);
@@ -234,6 +236,8 @@
void invalidate(bool needsAck, BufferId from, BufferId to,
const std::shared_ptr<Accessor::Impl> &impl);
+ static void createInvalidator();
+
public:
/** Creates a buffer pool. */
BufferPool();
@@ -376,7 +380,7 @@
void delAccessor(uint32_t accessorId);
};
- static AccessorInvalidator sInvalidator;
+ static std::unique_ptr<AccessorInvalidator> sInvalidator;
static void invalidatorThread(
std::map<uint32_t, const std::weak_ptr<Accessor::Impl>> &accessors,
diff --git a/media/bufferpool/2.0/BufferPoolClient.cpp b/media/bufferpool/2.0/BufferPoolClient.cpp
index c80beff..5564a13 100644
--- a/media/bufferpool/2.0/BufferPoolClient.cpp
+++ b/media/bufferpool/2.0/BufferPoolClient.cpp
@@ -644,7 +644,7 @@
} else if (messageId != 0) {
// messages are drained.
if (isMessageLater(messageId, mReleasing.mInvalidateId)) {
- mReleasing.mInvalidateId = lastMsgId;
+ mReleasing.mInvalidateId = messageId;
mReleasing.mInvalidateAck = true;
}
}
@@ -653,6 +653,9 @@
mReleasing.mStatusChannel->postBufferInvalidateAck(
mConnectionId,
mReleasing.mInvalidateId, &mReleasing.mInvalidateAck);
+ ALOGV("client %lld invalidateion ack (%d) %u",
+ (long long)mConnectionId,
+ mReleasing.mInvalidateAck, mReleasing.mInvalidateId);
}
}
return cleared;
@@ -808,6 +811,7 @@
}
void BufferPoolClient::receiveInvalidation(uint32_t msgId) {
+ ALOGV("bufferpool client recv inv %u", msgId);
if (isValid()) {
mImpl->receiveInvalidation(msgId);
}
diff --git a/media/bufferpool/2.0/ClientManager.cpp b/media/bufferpool/2.0/ClientManager.cpp
index f8531d8..c31d313 100644
--- a/media/bufferpool/2.0/ClientManager.cpp
+++ b/media/bufferpool/2.0/ClientManager.cpp
@@ -24,6 +24,7 @@
#include <utils/Log.h>
#include "BufferPoolClient.h"
#include "Observer.h"
+#include "Accessor.h"
namespace android {
namespace hardware {
@@ -453,6 +454,7 @@
if (!sInstance) {
sInstance = new ClientManager();
}
+ Accessor::createInvalidator();
return sInstance;
}
diff --git a/media/extractors/mp3/MP3Extractor.cpp b/media/extractors/mp3/MP3Extractor.cpp
index 621fd03..0e1ffb4 100644
--- a/media/extractors/mp3/MP3Extractor.cpp
+++ b/media/extractors/mp3/MP3Extractor.cpp
@@ -546,7 +546,8 @@
buffer->release();
buffer = NULL;
- return (n < 0 ? AMEDIA_ERROR_UNKNOWN : AMEDIA_ERROR_END_OF_STREAM);
+ return ((n < 0 && n != ERROR_END_OF_STREAM) ?
+ AMEDIA_ERROR_UNKNOWN : AMEDIA_ERROR_END_OF_STREAM);
}
uint32_t header = U32_AT((const uint8_t *)buffer->data());
@@ -590,7 +591,8 @@
buffer->release();
buffer = NULL;
- return (n < 0 ? AMEDIA_ERROR_UNKNOWN : AMEDIA_ERROR_END_OF_STREAM);
+ return ((n < 0 && n != ERROR_END_OF_STREAM) ?
+ AMEDIA_ERROR_UNKNOWN : AMEDIA_ERROR_END_OF_STREAM);
}
buffer->set_range(0, frame_size);
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 524d02f..7323f43 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -56,6 +56,8 @@
#define UINT32_MAX (4294967295U)
#endif
+#define ALAC_SPECIFIC_INFO_SIZE (36)
+
namespace android {
enum {
@@ -334,6 +336,8 @@
case FOURCC('t', 'w', 'o', 's'):
case FOURCC('s', 'o', 'w', 't'):
return MEDIA_MIMETYPE_AUDIO_RAW;
+ case FOURCC('a', 'l', 'a', 'c'):
+ return MEDIA_MIMETYPE_AUDIO_ALAC;
default:
ALOGW("Unknown fourcc: %c%c%c%c",
@@ -1124,6 +1128,43 @@
mLastTrack->meta.setInt32(kKeyChannelCount, num_channels);
mLastTrack->meta.setInt32(kKeySampleRate, sample_rate);
}
+
+ // If format type is 'alac', it is necessary to get the parameters
+ // from a alac atom spreading behind the frma atom.
+ // See 'external/alac/ALACMagicCookieDescription.txt'.
+ if (original_fourcc == FOURCC('a', 'l', 'a', 'c')) {
+ // Store ALAC magic cookie (decoder needs it).
+ uint8_t alacInfo[12];
+ data_offset = *offset;
+ if (mDataSource->readAt(
+ data_offset, alacInfo, sizeof(alacInfo)) < (ssize_t)sizeof(alacInfo)) {
+ return ERROR_IO;
+ }
+ uint32_t size = U32_AT(&alacInfo[0]);
+ if ((size != ALAC_SPECIFIC_INFO_SIZE) ||
+ (U32_AT(&alacInfo[4]) != FOURCC('a', 'l', 'a', 'c')) ||
+ (U32_AT(&alacInfo[8]) != 0)) {
+ return ERROR_MALFORMED;
+ }
+
+ data_offset += sizeof(alacInfo);
+ uint8_t cookie[size - sizeof(alacInfo)];
+ if (mDataSource->readAt(
+ data_offset, cookie, sizeof(cookie)) < (ssize_t)sizeof(cookie)) {
+ return ERROR_IO;
+ }
+
+ uint8_t bitsPerSample = cookie[5];
+ mLastTrack->meta.setInt32(kKeyBitsPerSample, bitsPerSample);
+ mLastTrack->meta.setInt32(kKeyChannelCount, cookie[9]);
+ mLastTrack->meta.setInt32(kKeySampleRate, U32_AT(&cookie[20]));
+ mLastTrack->meta.setData(
+ kKeyAlacMagicCookie, MetaData::TYPE_NONE, cookie, sizeof(cookie));
+
+ // Add the size of ALAC Specific Info (36 bytes) and terminator
+ // atom (8 bytes).
+ *offset += (size + 8);
+ }
break;
}
@@ -1492,6 +1533,7 @@
case FOURCC('s', 'a', 'w', 'b'):
case FOURCC('t', 'w', 'o', 's'):
case FOURCC('s', 'o', 'w', 't'):
+ case FOURCC('a', 'l', 'a', 'c'):
{
if (mIsQT && chunk_type == FOURCC('m', 'p', '4', 'a')
&& depth >= 1 && mPath[depth - 1] == FOURCC('w', 'a', 'v', 'e')) {
@@ -1574,6 +1616,40 @@
mLastTrack->meta.setInt32(kKeyChannelCount, num_channels);
mLastTrack->meta.setInt32(kKeySampleRate, sample_rate);
+ if (chunk_type == FOURCC('a', 'l', 'a', 'c')) {
+
+ // See 'external/alac/ALACMagicCookieDescription.txt for the detail'.
+ // Store ALAC magic cookie (decoder needs it).
+ uint8_t alacInfo[12];
+ data_offset += sizeof(buffer);
+ if (mDataSource->readAt(
+ data_offset, alacInfo, sizeof(alacInfo)) < (ssize_t)sizeof(alacInfo)) {
+ return ERROR_IO;
+ }
+ uint32_t size = U32_AT(&alacInfo[0]);
+ if ((size != ALAC_SPECIFIC_INFO_SIZE) ||
+ (U32_AT(&alacInfo[4]) != FOURCC('a', 'l', 'a', 'c')) ||
+ (U32_AT(&alacInfo[8]) != 0)) {
+ return ERROR_MALFORMED;
+ }
+ data_offset += sizeof(alacInfo);
+ uint8_t cookie[size - sizeof(alacInfo)];
+ if (mDataSource->readAt(
+ data_offset, cookie, sizeof(cookie)) < (ssize_t)sizeof(cookie)) {
+ return ERROR_IO;
+ }
+
+ uint8_t bitsPerSample = cookie[5];
+ mLastTrack->meta.setInt32(kKeyBitsPerSample, bitsPerSample);
+ mLastTrack->meta.setInt32(kKeyChannelCount, cookie[9]);
+ mLastTrack->meta.setInt32(kKeySampleRate, U32_AT(&cookie[20]));
+ mLastTrack->meta.setData(
+ kKeyAlacMagicCookie, MetaData::TYPE_NONE, cookie, sizeof(cookie));
+ data_offset += sizeof(cookie);
+ *offset = data_offset;
+ CHECK_EQ(*offset, stop_offset);
+ }
+
while (*offset < stop_offset) {
status_t err = parseChunk(offset, depth + 1);
if (err != OK) {
diff --git a/media/libmedia/include/media/JAudioAttributes.h b/media/libmedia/include/media/JAudioAttributes.h
index fb11435..ea0aaa3 100644
--- a/media/libmedia/include/media/JAudioAttributes.h
+++ b/media/libmedia/include/media/JAudioAttributes.h
@@ -26,8 +26,7 @@
public:
/* Creates a Java AudioAttributes object. */
static jobject createAudioAttributesObj(JNIEnv *env,
- const audio_attributes_t* pAttributes,
- audio_stream_type_t streamType) {
+ const audio_attributes_t* pAttributes) {
jclass jBuilderCls = env->FindClass("android/media/AudioAttributes$Builder");
jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
@@ -58,11 +57,6 @@
// TODO: Handle the 'tags' (char[] to HashSet<String>).
// How to parse the char[]? Is there any example of it?
// Also, the addTags() method is hidden.
- } else {
- // Call AudioAttributes.Builder.setLegacyStreamType().build()
- jmethodID jSetLegacyStreamType = env->GetMethodID(jBuilderCls, "setLegacyStreamType",
- "(I)Landroid/media/AudioAttributes$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetLegacyStreamType, streamType);
}
jmethodID jBuild = env->GetMethodID(jBuilderCls, "build",
diff --git a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
index 28b8c2b..eea7cfc 100644
--- a/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
+++ b/media/libmediaextractor/include/media/stagefright/MetaDataBase.h
@@ -221,6 +221,9 @@
kKeyExifSize = 'exsz', // int64_t, Exif data size
kKeyIsExif = 'exif', // bool (int32_t) buffer contains exif data block
kKeyPcmBigEndian = 'pcmb', // bool (int32_t)
+
+ // Key for ALAC Magic Cookie
+ kKeyAlacMagicCookie = 'almc', // raw data
};
enum {
diff --git a/media/libmediaplayer2/Android.bp b/media/libmediaplayer2/Android.bp
index 75d1df0..2109ad1 100644
--- a/media/libmediaplayer2/Android.bp
+++ b/media/libmediaplayer2/Android.bp
@@ -59,6 +59,7 @@
"libstagefright_player2",
"libstagefright_rtsp",
"libstagefright_timedtext2",
+ "libmedia2_jni_core",
],
export_include_dirs: [
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
index 778ae1b..543f700 100644
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -28,7 +28,6 @@
// TODO: Store Java class/methodID as a member variable in the class.
// TODO: Add NULL && Exception checks after every JNI call.
JAudioTrack::JAudioTrack( // < Usages of the arguments are below >
- audio_stream_type_t streamType, // AudioAudioAttributes
uint32_t sampleRate, // AudioFormat && bufferSizeInBytes
audio_format_t format, // AudioFormat && bufferSizeInBytes
audio_channel_mask_t channelMask, // AudioFormat && bufferSizeInBytes
@@ -40,8 +39,10 @@
float maxRequiredSpeed) { // bufferSizeInBytes
JNIEnv *env = JavaVMHelper::getJNIEnv();
+
jclass jAudioTrackCls = env->FindClass("android/media/AudioTrack");
- mAudioTrackCls = (jclass) env->NewGlobalRef(jAudioTrackCls);
+ mAudioTrackCls = reinterpret_cast<jclass>(env->NewGlobalRef(jAudioTrackCls));
+ env->DeleteLocalRef(jAudioTrackCls);
maxRequiredSpeed = std::min(std::max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
@@ -64,10 +65,13 @@
jmethodID jBuilderCtor = env->GetMethodID(jBuilderCls, "<init>", "()V");
jobject jBuilderObj = env->NewObject(jBuilderCls, jBuilderCtor);
+ jobject jAudioAttributesObj = JAudioAttributes::createAudioAttributesObj(env, pAttributes);
+ mAudioAttributesObj = reinterpret_cast<jobject>(env->NewGlobalRef(jAudioAttributesObj));
+ env->DeleteLocalRef(jAudioAttributesObj);
+
jmethodID jSetAudioAttributes = env->GetMethodID(jBuilderCls, "setAudioAttributes",
"(Landroid/media/AudioAttributes;)Landroid/media/AudioTrack$Builder;");
- jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetAudioAttributes,
- JAudioAttributes::createAudioAttributesObj(env, pAttributes, streamType));
+ jBuilderObj = env->CallObjectMethod(jBuilderObj, jSetAudioAttributes, mAudioAttributesObj);
jmethodID jSetAudioFormat = env->GetMethodID(jBuilderCls, "setAudioFormat",
"(Landroid/media/AudioFormat;)Landroid/media/AudioTrack$Builder;");
@@ -100,7 +104,9 @@
}
jmethodID jBuild = env->GetMethodID(jBuilderCls, "build", "()Landroid/media/AudioTrack;");
- mAudioTrackObj = env->CallObjectMethod(jBuilderObj, jBuild);
+ jobject jAudioTrackObj = env->CallObjectMethod(jBuilderObj, jBuild);
+ mAudioTrackObj = reinterpret_cast<jobject>(env->NewGlobalRef(jAudioTrackObj));
+ env->DeleteLocalRef(jBuilderObj);
if (cbf != NULL) {
// Set offload mode callback
@@ -118,6 +124,8 @@
JAudioTrack::~JAudioTrack() {
JNIEnv *env = JavaVMHelper::getJNIEnv();
env->DeleteGlobalRef(mAudioTrackCls);
+ env->DeleteGlobalRef(mAudioTrackObj);
+ env->DeleteGlobalRef(mAudioAttributesObj);
}
size_t JAudioTrack::frameCount() {
@@ -151,21 +159,21 @@
return NO_ERROR;
}
-bool JAudioTrack::getTimestamp(AudioTimestamp& timestamp) {
+status_t JAudioTrack::getTimestamp(AudioTimestamp& timestamp) {
JNIEnv *env = JavaVMHelper::getJNIEnv();
jclass jAudioTimeStampCls = env->FindClass("android/media/AudioTimestamp");
jobject jAudioTimeStampObj = env->AllocObject(jAudioTimeStampCls);
- jfieldID jFramePosition = env->GetFieldID(jAudioTimeStampCls, "framePosition", "L");
- jfieldID jNanoTime = env->GetFieldID(jAudioTimeStampCls, "nanoTime", "L");
+ jfieldID jFramePosition = env->GetFieldID(jAudioTimeStampCls, "framePosition", "J");
+ jfieldID jNanoTime = env->GetFieldID(jAudioTimeStampCls, "nanoTime", "J");
jmethodID jGetTimestamp = env->GetMethodID(mAudioTrackCls,
- "getTimestamp", "(Landroid/media/AudioTimestamp)B");
+ "getTimestamp", "(Landroid/media/AudioTimestamp;)Z");
bool success = env->CallBooleanMethod(mAudioTrackObj, jGetTimestamp, jAudioTimeStampObj);
if (!success) {
- return false;
+ return NO_INIT;
}
long long framePosition = env->GetLongField(jAudioTimeStampObj, jFramePosition);
@@ -178,7 +186,7 @@
timestamp.mTime = ts;
timestamp.mPosition = (uint32_t) framePosition;
- return true;
+ return NO_ERROR;
}
status_t JAudioTrack::getTimestamp(ExtendedTimestamp *timestamp __unused) {
@@ -423,6 +431,35 @@
return audioFormatToNative(javaFormat);
}
+size_t JAudioTrack::frameSize() {
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+
+ // TODO: Calculated here implementing the logic in AudioTrack.java
+ // wait for AudioTrack.java exposing this parameter (i.e. getFrameSizeInBtytes())
+ jmethodID jGetAudioFormat = env->GetMethodID(mAudioTrackCls, "getAudioFormat", "()I");
+ int javaFormat = env->CallIntMethod(mAudioTrackObj, jGetAudioFormat);
+
+ jclass jAudioFormatCls = env->FindClass("android/media/AudioFormat");
+ jmethodID jIsEncodingLinearFrames = env->GetStaticMethodID(
+ jAudioFormatCls, "isEncodingLinearFrames", "(I)Z");
+ jboolean javaIsEncodingLinearFrames = env->CallStaticBooleanMethod(
+ jAudioFormatCls, jIsEncodingLinearFrames, javaFormat);
+
+ if (javaIsEncodingLinearFrames == false) {
+ return 1;
+ }
+
+ jmethodID jGetBytesPerSample = env->GetStaticMethodID(jAudioFormatCls,
+ "getBytesPerSample", "(I)I");
+ int javaBytesPerSample = env->CallStaticIntMethod(jAudioFormatCls,
+ jGetBytesPerSample, javaFormat);
+
+ jmethodID jGetChannelCount = env->GetMethodID(mAudioTrackCls, "getChannelCount", "()I");
+ int javaChannelCount = env->CallIntMethod(mAudioTrackObj, jGetChannelCount);
+
+ return javaChannelCount * javaBytesPerSample;
+}
+
status_t JAudioTrack::dump(int fd, const Vector<String16>& args __unused) const
{
String8 result;
@@ -432,10 +469,6 @@
// TODO: Remove logs that includes unavailable information from below.
// result.appendFormat(" status(%d), state(%d), session Id(%d), flags(%#x)\n",
// mStatus, mState, mSessionId, mFlags);
-// result.appendFormat(" stream type(%d), left - right volume(%f, %f)\n",
-// (mStreamType == AUDIO_STREAM_DEFAULT) ?
-// audio_attributes_to_stream_type(&mAttributes) : mStreamType,
-// mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
// result.appendFormat(" format(%#x), channel mask(%#x), channel count(%u)\n",
// format(), mChannelMask, channelCount());
// result.appendFormat(" sample rate(%u), original sample rate(%u), speed(%f)\n",
@@ -453,19 +486,11 @@
return NO_ERROR;
}
-audio_port_handle_t JAudioTrack::getRoutedDeviceId() {
+jobject JAudioTrack::getRoutedDevice() {
JNIEnv *env = JavaVMHelper::getJNIEnv();
jmethodID jGetRoutedDevice = env->GetMethodID(mAudioTrackCls, "getRoutedDevice",
"()Landroid/media/AudioDeviceInfo;");
- jobject jAudioDeviceInfoObj = env->CallObjectMethod(mAudioTrackObj, jGetRoutedDevice);
- if (env->IsSameObject(jAudioDeviceInfoObj, NULL)) {
- return AUDIO_PORT_HANDLE_NONE;
- }
-
- jclass jAudioDeviceInfoCls = env->FindClass("Landroid/media/AudioDeviceInfo");
- jmethodID jGetId = env->GetMethodID(jAudioDeviceInfoCls, "getId", "()I");
- jint routedDeviceId = env->CallIntMethod(jAudioDeviceInfoObj, jGetId);
- return routedDeviceId;
+ return env->CallObjectMethod(mAudioTrackObj, jGetRoutedDevice);
}
audio_session_t JAudioTrack::getAudioSessionId() {
@@ -475,16 +500,23 @@
return (audio_session_t) sessionId;
}
-status_t JAudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
+status_t JAudioTrack::setPreferredDevice(jobject device) {
JNIEnv *env = JavaVMHelper::getJNIEnv();
- jclass jMP2ImplCls = env->FindClass("android/media/MediaPlayer2Impl");
- jmethodID jSetAudioOutputDeviceById = env->GetMethodID(
- jMP2ImplCls, "setAudioOutputDeviceById", "(Landroid/media/AudioTrack;I)Z");
- jboolean result = env->CallStaticBooleanMethod(
- jMP2ImplCls, jSetAudioOutputDeviceById, mAudioTrackObj, deviceId);
+ jmethodID jSetPreferredDeviceId = env->GetMethodID(mAudioTrackCls, "setPreferredDevice",
+ "(Landroid/media/AudioDeviceInfo;)Z");
+ jboolean result = env->CallBooleanMethod(mAudioTrackObj, jSetPreferredDeviceId, device);
return result == true ? NO_ERROR : BAD_VALUE;
}
+audio_stream_type_t JAudioTrack::getAudioStreamType() {
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ jclass jAudioAttributesCls = env->FindClass("android/media/AudioAttributes");
+ jmethodID jGetVolumeControlStream = env->GetMethodID(jAudioAttributesCls,
+ "getVolumeControlStream", "()I");
+ int javaAudioStreamType = env->CallIntMethod(mAudioAttributesObj, jGetVolumeControlStream);
+ return (audio_stream_type_t)javaAudioStreamType;
+}
+
status_t JAudioTrack::pendingDuration(int32_t *msec) {
if (msec == nullptr) {
return BAD_VALUE;
@@ -526,18 +558,85 @@
return NO_ERROR;
}
-status_t JAudioTrack::addAudioDeviceCallback(
- const sp<AudioSystem::AudioDeviceCallback>& callback __unused) {
- // TODO: Implement this after appropriate Java AudioTrack method is available.
+status_t JAudioTrack::addAudioDeviceCallback(jobject listener, jobject handler) {
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ jmethodID jAddOnRoutingChangedListener = env->GetMethodID(mAudioTrackCls,
+ "addOnRoutingChangedListener",
+ "(Landroid/media/AudioRouting$OnRoutingChangedListener;Landroid/os/Handler;)V");
+ env->CallVoidMethod(mAudioTrackObj, jAddOnRoutingChangedListener, listener, handler);
return NO_ERROR;
}
-status_t JAudioTrack::removeAudioDeviceCallback(
- const sp<AudioSystem::AudioDeviceCallback>& callback __unused) {
- // TODO: Implement this after appropriate Java AudioTrack method is available.
+status_t JAudioTrack::removeAudioDeviceCallback(jobject listener) {
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ jmethodID jRemoveOnRoutingChangedListener = env->GetMethodID(mAudioTrackCls,
+ "removeOnRoutingChangedListener",
+ "(Landroid/media/AudioRouting$OnRoutingChangedListener;)V");
+ env->CallVoidMethod(mAudioTrackObj, jRemoveOnRoutingChangedListener, listener);
return NO_ERROR;
}
+void JAudioTrack::registerRoutingDelegates(
+ std::vector<std::pair<jobject, jobject>>& routingDelegates) {
+ for (std::vector<std::pair<jobject, jobject>>::iterator it = routingDelegates.begin();
+ it != routingDelegates.end(); it++) {
+ addAudioDeviceCallback(it->second, getHandler(it->second));
+ }
+}
+
+/////////////////////////////////////////////////////////////
+/// Static methods begin ///
+/////////////////////////////////////////////////////////////
+jobject JAudioTrack::getListener(const jobject routingDelegateObj) {
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ jclass jRoutingDelegateCls = env->FindClass("android/media/RoutingDelegate");
+ jmethodID jGetListener = env->GetMethodID(jRoutingDelegateCls,
+ "getListener", "()Landroid/media/AudioRouting$OnRoutingChangedListener;");
+ return env->CallObjectMethod(routingDelegateObj, jGetListener);
+}
+
+jobject JAudioTrack::getHandler(const jobject routingDelegateObj) {
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ jclass jRoutingDelegateCls = env->FindClass("android/media/RoutingDelegate");
+ jmethodID jGetHandler = env->GetMethodID(jRoutingDelegateCls,
+ "getHandler", "()Landroid/os/Handler;");
+ return env->CallObjectMethod(routingDelegateObj, jGetHandler);
+}
+
+jobject JAudioTrack::addGlobalRef(const jobject obj) {
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ return reinterpret_cast<jobject>(env->NewGlobalRef(obj));
+}
+
+status_t JAudioTrack::removeGlobalRef(const jobject obj) {
+ if (obj == NULL) {
+ return BAD_VALUE;
+ }
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ env->DeleteGlobalRef(obj);
+ return NO_ERROR;
+}
+
+jobject JAudioTrack::findByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key) {
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ for (std::vector<std::pair<jobject, jobject>>::iterator it = mp.begin(); it != mp.end(); it++) {
+ if (env->IsSameObject(it->first, key)) {
+ return it->second;
+ }
+ }
+ return nullptr;
+}
+
+void JAudioTrack::eraseByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key) {
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ for (std::vector<std::pair<jobject, jobject>>::iterator it = mp.begin(); it != mp.end(); it++) {
+ if (env->IsSameObject(it->first, key)) {
+ mp.erase(it);
+ return;
+ }
+ }
+}
+
/////////////////////////////////////////////////////////////
/// Private method begins ///
/////////////////////////////////////////////////////////////
diff --git a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
index 6b27ca7..149f243 100644
--- a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
+++ b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
@@ -23,7 +23,6 @@
#include <utils/Log.h>
#include <media/AudioPolicyHelper.h>
-#include <media/AudioTrack.h>
#include <media/stagefright/foundation/ADebug.h>
namespace {
@@ -44,29 +43,27 @@
String8 result;
result.append(" MediaPlayer2AudioOutput\n");
- snprintf(buffer, 255, " stream type(%d), volume(%f)\n",
- mStreamType, mVolume);
+ snprintf(buffer, 255, " volume(%f)\n", mVolume);
result.append(buffer);
snprintf(buffer, 255, " msec per frame(%f), latency (%d)\n",
- mMsecsPerFrame, (mTrack != 0) ? mTrack->latency() : -1);
+ mMsecsPerFrame, (mJAudioTrack != nullptr) ? mJAudioTrack->latency() : -1);
result.append(buffer);
snprintf(buffer, 255, " aux effect id(%d), send level (%f)\n",
mAuxEffectId, mSendLevel);
result.append(buffer);
::write(fd, result.string(), result.size());
- if (mTrack != 0) {
- mTrack->dump(fd, args);
+ if (mJAudioTrack != nullptr) {
+ mJAudioTrack->dump(fd, args);
}
return NO_ERROR;
}
MediaPlayer2AudioOutput::MediaPlayer2AudioOutput(audio_session_t sessionId, uid_t uid, int pid,
- const audio_attributes_t* attr, const sp<AudioSystem::AudioDeviceCallback>& deviceCallback)
- : mCallback(NULL),
- mCallbackCookie(NULL),
- mCallbackData(NULL),
- mStreamType(AUDIO_STREAM_MUSIC),
+ const audio_attributes_t* attr, std::vector<jobject>& routingDelegatesBackup)
+ : mCallback(nullptr),
+ mCallbackCookie(nullptr),
+ mCallbackData(nullptr),
mVolume(1.0),
mPlaybackRate(AUDIO_PLAYBACK_RATE_DEFAULT),
mSampleRateHz(0),
@@ -77,26 +74,30 @@
mPid(pid),
mSendLevel(0.0),
mAuxEffectId(0),
- mFlags(AUDIO_OUTPUT_FLAG_NONE),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mDeviceCallbackEnabled(false),
- mDeviceCallback(deviceCallback) {
+ mFlags(AUDIO_OUTPUT_FLAG_NONE) {
ALOGV("MediaPlayer2AudioOutput(%d)", sessionId);
- if (attr != NULL) {
+ if (attr != nullptr) {
mAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
- if (mAttributes != NULL) {
+ if (mAttributes != nullptr) {
memcpy(mAttributes, attr, sizeof(audio_attributes_t));
- mStreamType = audio_attributes_to_stream_type(attr);
}
} else {
- mAttributes = NULL;
+ mAttributes = nullptr;
}
setMinBufferCount();
+ mRoutingDelegates.clear();
+ for (auto routingDelegate : routingDelegatesBackup) {
+ mRoutingDelegates.push_back(std::pair<jobject, jobject>(
+ JAudioTrack::getListener(routingDelegate), routingDelegate));
+ }
+ routingDelegatesBackup.clear();
}
MediaPlayer2AudioOutput::~MediaPlayer2AudioOutput() {
+ for (auto routingDelegate : mRoutingDelegates) {
+ JAudioTrack::removeGlobalRef(routingDelegate.second);
+ }
close();
free(mAttributes);
delete mCallbackData;
@@ -125,31 +126,31 @@
ssize_t MediaPlayer2AudioOutput::bufferSize() const {
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == nullptr) {
return NO_INIT;
}
- return mTrack->frameCount() * mFrameSize;
+ return mJAudioTrack->frameCount() * mFrameSize;
}
ssize_t MediaPlayer2AudioOutput::frameCount() const {
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == nullptr) {
return NO_INIT;
}
- return mTrack->frameCount();
+ return mJAudioTrack->frameCount();
}
ssize_t MediaPlayer2AudioOutput::channelCount() const {
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == nullptr) {
return NO_INIT;
}
- return mTrack->channelCount();
+ return mJAudioTrack->channelCount();
}
ssize_t MediaPlayer2AudioOutput::frameSize() const {
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == nullptr) {
return NO_INIT;
}
return mFrameSize;
@@ -157,10 +158,10 @@
uint32_t MediaPlayer2AudioOutput::latency () const {
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == nullptr) {
return 0;
}
- return mTrack->latency();
+ return mJAudioTrack->latency();
}
float MediaPlayer2AudioOutput::msecsPerFrame() const {
@@ -170,18 +171,18 @@
status_t MediaPlayer2AudioOutput::getPosition(uint32_t *position) const {
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == nullptr) {
return NO_INIT;
}
- return mTrack->getPosition(position);
+ return mJAudioTrack->getPosition(position);
}
status_t MediaPlayer2AudioOutput::getTimestamp(AudioTimestamp &ts) const {
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == nullptr) {
return NO_INIT;
}
- return mTrack->getTimestamp(ts);
+ return mJAudioTrack->getTimestamp(ts);
}
// TODO: Remove unnecessary calls to getPlayedOutDurationUs()
@@ -194,7 +195,7 @@
// Calculate duration of played samples if played at normal rate (i.e., 1.0).
int64_t MediaPlayer2AudioOutput::getPlayedOutDurationUs(int64_t nowUs) const {
Mutex::Autolock lock(mLock);
- if (mTrack == 0 || mSampleRateHz == 0) {
+ if (mJAudioTrack == nullptr || mSampleRateHz == 0) {
return 0;
}
@@ -202,22 +203,18 @@
int64_t numFramesPlayedAtUs;
AudioTimestamp ts;
- status_t res = mTrack->getTimestamp(ts);
+ status_t res = mJAudioTrack->getTimestamp(ts);
+
if (res == OK) { // case 1: mixing audio tracks and offloaded tracks.
numFramesPlayed = ts.mPosition;
numFramesPlayedAtUs = ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
//ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAtUs);
- } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
+ } else { // case 2: transitory state on start of a new track
+ // case 3: transitory at new track or audio fast tracks.
numFramesPlayed = 0;
numFramesPlayedAtUs = nowUs;
//ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
// numFramesPlayed, (long long)numFramesPlayedAtUs);
- } else { // case 3: transitory at new track or audio fast tracks.
- res = mTrack->getPosition(&numFramesPlayed);
- CHECK_EQ(res, (status_t)OK);
- numFramesPlayedAtUs = nowUs;
- numFramesPlayedAtUs += 1000LL * mTrack->latency() / 2; /* XXX */
- //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAtUs);
}
// CHECK_EQ(numFramesPlayed & (1 << 31), 0); // can't be negative until 12.4 hrs, test
@@ -243,57 +240,41 @@
status_t MediaPlayer2AudioOutput::getFramesWritten(uint32_t *frameswritten) const {
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == nullptr) {
return NO_INIT;
}
ExtendedTimestamp ets;
- status_t status = mTrack->getTimestamp(&ets);
+ status_t status = mJAudioTrack->getTimestamp(&ets);
if (status == OK || status == WOULD_BLOCK) {
*frameswritten = (uint32_t)ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT];
}
return status;
}
-status_t MediaPlayer2AudioOutput::setParameters(const String8& keyValuePairs) {
- Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
- return NO_INIT;
- }
- return mTrack->setParameters(keyValuePairs);
-}
-
-String8 MediaPlayer2AudioOutput::getParameters(const String8& keys) {
- Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
- return String8::empty();
- }
- return mTrack->getParameters(keys);
-}
-
void MediaPlayer2AudioOutput::setAudioAttributes(const audio_attributes_t * attributes) {
Mutex::Autolock lock(mLock);
- if (attributes == NULL) {
+ if (attributes == nullptr) {
free(mAttributes);
- mAttributes = NULL;
+ mAttributes = nullptr;
} else {
- if (mAttributes == NULL) {
+ if (mAttributes == nullptr) {
mAttributes = (audio_attributes_t *) calloc(1, sizeof(audio_attributes_t));
}
memcpy(mAttributes, attributes, sizeof(audio_attributes_t));
- mStreamType = audio_attributes_to_stream_type(attributes);
}
}
-void MediaPlayer2AudioOutput::setAudioStreamType(audio_stream_type_t streamType) {
+audio_stream_type_t MediaPlayer2AudioOutput::getAudioStreamType() const {
+ ALOGV("getAudioStreamType");
Mutex::Autolock lock(mLock);
- // do not allow direct stream type modification if attributes have been set
- if (mAttributes == NULL) {
- mStreamType = streamType;
+ if (mJAudioTrack == nullptr) {
+ return AUDIO_STREAM_DEFAULT;
}
+ return mJAudioTrack->getAudioStreamType();
}
void MediaPlayer2AudioOutput::close_l() {
- mTrack.clear();
+ mJAudioTrack.clear();
}
status_t MediaPlayer2AudioOutput::open(
@@ -302,7 +283,6 @@
AudioCallback cb, void *cookie,
audio_output_flags_t flags,
const audio_offload_info_t *offloadInfo,
- bool doNotReconnect,
uint32_t suggestedFrameCount) {
ALOGV("open(%u, %d, 0x%x, 0x%x, %d 0x%x)", sampleRate, channelCount, channelMask,
format, mSessionId, flags);
@@ -310,7 +290,7 @@
// offloading is only supported in callback mode for now.
// offloadInfo must be present if offload flag is set
if (((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) != 0) &&
- ((cb == NULL) || (offloadInfo == NULL))) {
+ ((cb == nullptr) || (offloadInfo == nullptr))) {
return BAD_VALUE;
}
@@ -330,32 +310,23 @@
mCallback = cb;
mCallbackCookie = cookie;
- sp<AudioTrack> t;
- CallbackData *newcbd = NULL;
+ sp<JAudioTrack> jT;
+ CallbackData *newcbd = nullptr;
- ALOGV("creating new AudioTrack");
+ ALOGV("creating new JAudioTrack");
- if (mCallback != NULL) {
+ if (mCallback != nullptr) {
newcbd = new CallbackData(this);
- t = new AudioTrack(
- mStreamType,
- sampleRate,
- format,
- channelMask,
- frameCount,
- flags,
- CallbackWrapper,
- newcbd,
- 0, // notification frames
- mSessionId,
- AudioTrack::TRANSFER_CALLBACK,
- offloadInfo,
- mUid,
- mPid,
- mAttributes,
- doNotReconnect,
- 1.0f, // default value for maxRequiredSpeed
- mSelectedDeviceId);
+ jT = new JAudioTrack(
+ sampleRate,
+ format,
+ channelMask,
+ CallbackWrapper,
+ newcbd,
+ frameCount,
+ mSessionId,
+ mAttributes,
+ 1.0f); // default value for maxRequiredSpeed
} else {
// TODO: Due to buffer memory concerns, we use a max target playback speed
// based on mPlaybackRate at the time of open (instead of kMaxRequiredSpeed),
@@ -365,73 +336,62 @@
ALOGW_IF(targetSpeed != mPlaybackRate.mSpeed,
"track target speed:%f clamped from playback speed:%f",
targetSpeed, mPlaybackRate.mSpeed);
- t = new AudioTrack(
- mStreamType,
- sampleRate,
- format,
- channelMask,
- frameCount,
- flags,
- NULL, // callback
- NULL, // user data
- 0, // notification frames
- mSessionId,
- AudioTrack::TRANSFER_DEFAULT,
- NULL, // offload info
- mUid,
- mPid,
- mAttributes,
- doNotReconnect,
- targetSpeed,
- mSelectedDeviceId);
+ jT = new JAudioTrack(
+ sampleRate,
+ format,
+ channelMask,
+ nullptr,
+ nullptr,
+ frameCount,
+ mSessionId,
+ mAttributes,
+ targetSpeed);
}
- if ((t == 0) || (t->initCheck() != NO_ERROR)) {
+ if (jT == 0) {
ALOGE("Unable to create audio track");
delete newcbd;
// t goes out of scope, so reference count drops to zero
return NO_INIT;
- } else {
- // successful AudioTrack initialization implies a legacy stream type was generated
- // from the audio attributes
- mStreamType = t->streamType();
}
- CHECK((t != NULL) && ((mCallback == NULL) || (newcbd != NULL)));
+ CHECK((jT != nullptr) && ((mCallback == nullptr) || (newcbd != nullptr)));
mCallbackData = newcbd;
ALOGV("setVolume");
- t->setVolume(mVolume);
+ jT->setVolume(mVolume);
mSampleRateHz = sampleRate;
mFlags = flags;
mMsecsPerFrame = 1E3f / (mPlaybackRate.mSpeed * sampleRate);
- mFrameSize = t->frameSize();
- mTrack = t;
+ mFrameSize = jT->frameSize();
+ mJAudioTrack = jT;
return updateTrack_l();
}
status_t MediaPlayer2AudioOutput::updateTrack_l() {
- if (mTrack == NULL) {
+ if (mJAudioTrack == nullptr) {
return NO_ERROR;
}
status_t res = NO_ERROR;
// Note some output devices may give us a direct track even though we don't specify it.
// Example: Line application b/17459982.
- if ((mTrack->getFlags()
+ if ((mJAudioTrack->getFlags()
& (AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD | AUDIO_OUTPUT_FLAG_DIRECT)) == 0) {
- res = mTrack->setPlaybackRate(mPlaybackRate);
+ res = mJAudioTrack->setPlaybackRate(mPlaybackRate);
if (res == NO_ERROR) {
- mTrack->setAuxEffectSendLevel(mSendLevel);
- res = mTrack->attachAuxEffect(mAuxEffectId);
+ mJAudioTrack->setAuxEffectSendLevel(mSendLevel);
+ res = mJAudioTrack->attachAuxEffect(mAuxEffectId);
}
}
- mTrack->setOutputDevice(mSelectedDeviceId);
- if (mDeviceCallbackEnabled) {
- mTrack->addAudioDeviceCallback(mDeviceCallback.promote());
+ if (mPreferredDevice != nullptr) {
+ mJAudioTrack->setPreferredDevice(mPreferredDevice->getJObject());
}
+
+ mJAudioTrack->registerRoutingDelegates(mRoutingDelegates);
+
ALOGV("updateTrack_l() DONE status %d", res);
return res;
}
@@ -439,13 +399,13 @@
status_t MediaPlayer2AudioOutput::start() {
ALOGV("start");
Mutex::Autolock lock(mLock);
- if (mCallbackData != NULL) {
+ if (mCallbackData != nullptr) {
mCallbackData->endTrackSwitch();
}
- if (mTrack != 0) {
- mTrack->setVolume(mVolume);
- mTrack->setAuxEffectSendLevel(mSendLevel);
- status_t status = mTrack->start();
+ if (mJAudioTrack != nullptr) {
+ mJAudioTrack->setVolume(mVolume);
+ mJAudioTrack->setAuxEffectSendLevel(mSendLevel);
+ status_t status = mJAudioTrack->start();
return status;
}
return NO_INIT;
@@ -453,11 +413,11 @@
ssize_t MediaPlayer2AudioOutput::write(const void* buffer, size_t size, bool blocking) {
Mutex::Autolock lock(mLock);
- LOG_ALWAYS_FATAL_IF(mCallback != NULL, "Don't call write if supplying a callback.");
+ LOG_ALWAYS_FATAL_IF(mCallback != nullptr, "Don't call write if supplying a callback.");
//ALOGV("write(%p, %u)", buffer, size);
- if (mTrack != 0) {
- return mTrack->write(buffer, size, blocking);
+ if (mJAudioTrack != nullptr) {
+ return mJAudioTrack->write(buffer, size, blocking);
}
return NO_INIT;
}
@@ -465,34 +425,34 @@
void MediaPlayer2AudioOutput::stop() {
ALOGV("stop");
Mutex::Autolock lock(mLock);
- if (mTrack != 0) {
- mTrack->stop();
+ if (mJAudioTrack != nullptr) {
+ mJAudioTrack->stop();
}
}
void MediaPlayer2AudioOutput::flush() {
ALOGV("flush");
Mutex::Autolock lock(mLock);
- if (mTrack != 0) {
- mTrack->flush();
+ if (mJAudioTrack != nullptr) {
+ mJAudioTrack->flush();
}
}
void MediaPlayer2AudioOutput::pause() {
ALOGV("pause");
Mutex::Autolock lock(mLock);
- if (mTrack != 0) {
- mTrack->pause();
+ if (mJAudioTrack != nullptr) {
+ mJAudioTrack->pause();
}
}
void MediaPlayer2AudioOutput::close() {
ALOGV("close");
- sp<AudioTrack> track;
+ sp<JAudioTrack> track;
{
Mutex::Autolock lock(mLock);
- track = mTrack;
- close_l(); // clears mTrack
+ track = mJAudioTrack;
+ close_l(); // clears mJAudioTrack
}
// destruction of the track occurs outside of mutex.
}
@@ -501,8 +461,8 @@
ALOGV("setVolume(%f)", volume);
Mutex::Autolock lock(mLock);
mVolume = volume;
- if (mTrack != 0) {
- mTrack->setVolume(volume);
+ if (mJAudioTrack != nullptr) {
+ mJAudioTrack->setVolume(volume);
}
}
@@ -510,12 +470,12 @@
ALOGV("setPlaybackRate(%f %f %d %d)",
rate.mSpeed, rate.mPitch, rate.mFallbackMode, rate.mStretchMode);
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == 0) {
// remember rate so that we can set it when the track is opened
mPlaybackRate = rate;
return OK;
}
- status_t res = mTrack->setPlaybackRate(rate);
+ status_t res = mJAudioTrack->setPlaybackRate(rate);
if (res != NO_ERROR) {
return res;
}
@@ -531,10 +491,10 @@
status_t MediaPlayer2AudioOutput::getPlaybackRate(AudioPlaybackRate *rate) {
ALOGV("getPlaybackRate");
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == 0) {
return NO_INIT;
}
- *rate = mTrack->getPlaybackRate();
+ *rate = mJAudioTrack->getPlaybackRate();
return NO_ERROR;
}
@@ -542,8 +502,8 @@
ALOGV("setAuxEffectSendLevel(%f)", level);
Mutex::Autolock lock(mLock);
mSendLevel = level;
- if (mTrack != 0) {
- return mTrack->setAuxEffectSendLevel(level);
+ if (mJAudioTrack != nullptr) {
+ return mJAudioTrack->setAuxEffectSendLevel(level);
}
return NO_ERROR;
}
@@ -552,48 +512,72 @@
ALOGV("attachAuxEffect(%d)", effectId);
Mutex::Autolock lock(mLock);
mAuxEffectId = effectId;
- if (mTrack != 0) {
- return mTrack->attachAuxEffect(effectId);
+ if (mJAudioTrack != nullptr) {
+ return mJAudioTrack->attachAuxEffect(effectId);
}
return NO_ERROR;
}
-status_t MediaPlayer2AudioOutput::setOutputDevice(audio_port_handle_t deviceId) {
- ALOGV("setOutputDevice(%d)", deviceId);
+status_t MediaPlayer2AudioOutput::setPreferredDevice(jobject device) {
+ ALOGV("setPreferredDevice");
Mutex::Autolock lock(mLock);
- mSelectedDeviceId = deviceId;
- if (mTrack != 0) {
- return mTrack->setOutputDevice(deviceId);
+ status_t ret = NO_ERROR;
+ if (mJAudioTrack != nullptr) {
+ ret = mJAudioTrack->setPreferredDevice(device);
+ }
+ if (ret == NO_ERROR) {
+ mPreferredDevice = new JObjectHolder(device);
+ }
+ return ret;
+}
+
+jobject MediaPlayer2AudioOutput::getRoutedDevice() {
+ ALOGV("getRoutedDevice");
+ Mutex::Autolock lock(mLock);
+ if (mJAudioTrack != nullptr) {
+ return mJAudioTrack->getRoutedDevice();
+ }
+ return nullptr;
+}
+
+status_t MediaPlayer2AudioOutput::addAudioDeviceCallback(jobject jRoutingDelegate) {
+ ALOGV("addAudioDeviceCallback");
+ Mutex::Autolock lock(mLock);
+ jobject listener = JAudioTrack::getListener(jRoutingDelegate);
+ if (mJAudioTrack != nullptr &&
+ JAudioTrack::findByKey(mRoutingDelegates, listener) == nullptr) {
+ jobject handler = JAudioTrack::getHandler(jRoutingDelegate);
+ jobject routingDelegate = JAudioTrack::addGlobalRef(jRoutingDelegate);
+ mRoutingDelegates.push_back(std::pair<jobject, jobject>(listener, routingDelegate));
+ return mJAudioTrack->addAudioDeviceCallback(routingDelegate, handler);
}
return NO_ERROR;
}
-status_t MediaPlayer2AudioOutput::getRoutedDeviceId(audio_port_handle_t* deviceId) {
- ALOGV("getRoutedDeviceId");
+status_t MediaPlayer2AudioOutput::removeAudioDeviceCallback(jobject listener) {
+ ALOGV("removeAudioDeviceCallback");
Mutex::Autolock lock(mLock);
- if (mTrack != 0) {
- mRoutedDeviceId = mTrack->getRoutedDeviceId();
- }
- *deviceId = mRoutedDeviceId;
- return NO_ERROR;
-}
-
-status_t MediaPlayer2AudioOutput::enableAudioDeviceCallback(bool enabled) {
- ALOGV("enableAudioDeviceCallback, %d", enabled);
- Mutex::Autolock lock(mLock);
- mDeviceCallbackEnabled = enabled;
- if (mTrack != 0) {
- status_t status;
- if (enabled) {
- status = mTrack->addAudioDeviceCallback(mDeviceCallback.promote());
- } else {
- status = mTrack->removeAudioDeviceCallback(mDeviceCallback.promote());
+ jobject routingDelegate = nullptr;
+ if (mJAudioTrack != nullptr &&
+ (routingDelegate = JAudioTrack::findByKey(mRoutingDelegates, listener)) != nullptr) {
+ mJAudioTrack->removeAudioDeviceCallback(routingDelegate);
+ JAudioTrack::eraseByKey(mRoutingDelegates, listener);
+ if (JAudioTrack::removeGlobalRef(routingDelegate) != NO_ERROR) {
+ return BAD_VALUE;
}
- return status;
}
return NO_ERROR;
}
+void MediaPlayer2AudioOutput::copyAudioDeviceCallback(
+ std::vector<jobject>& routingDelegateTarget) {
+ ALOGV("copyAudioDeviceCallback");
+ for (std::vector<std::pair<jobject, jobject>>::iterator it = mRoutingDelegates.begin();
+ it != mRoutingDelegates.end(); it++) {
+ routingDelegateTarget.push_back(it->second);
+ }
+}
+
// static
void MediaPlayer2AudioOutput::CallbackWrapper(
int event, void *cookie, void *info) {
@@ -602,21 +586,21 @@
// lock to ensure we aren't caught in the middle of a track switch.
data->lock();
MediaPlayer2AudioOutput *me = data->getOutput();
- AudioTrack::Buffer *buffer = (AudioTrack::Buffer *)info;
- if (me == NULL) {
+ JAudioTrack::Buffer *buffer = (JAudioTrack::Buffer *)info;
+ if (me == nullptr) {
// no output set, likely because the track was scheduled to be reused
// by another player, but the format turned out to be incompatible.
data->unlock();
- if (buffer != NULL) {
- buffer->size = 0;
+ if (buffer != nullptr) {
+ buffer->mSize = 0;
}
return;
}
switch(event) {
- case AudioTrack::EVENT_MORE_DATA: {
+ case JAudioTrack::EVENT_MORE_DATA: {
size_t actualSize = (*me->mCallback)(
- me, buffer->raw, buffer->size, me->mCallbackCookie,
+ me, buffer->mData, buffer->mSize, me->mCallbackCookie,
CB_EVENT_FILL_BUFFER);
// Log when no data is returned from the callback.
@@ -628,25 +612,25 @@
// This is a benign busy-wait, with the next data request generated 10 ms or more later;
// nevertheless for power reasons, we don't want to see too many of these.
- ALOGV_IF(actualSize == 0 && buffer->size > 0, "callbackwrapper: empty buffer returned");
+ ALOGV_IF(actualSize == 0 && buffer->mSize > 0, "callbackwrapper: empty buffer returned");
- buffer->size = actualSize;
+ buffer->mSize = actualSize;
} break;
- case AudioTrack::EVENT_STREAM_END:
+ case JAudioTrack::EVENT_STREAM_END:
// currently only occurs for offloaded callbacks
ALOGV("callbackwrapper: deliver EVENT_STREAM_END");
- (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
+ (*me->mCallback)(me, nullptr /* buffer */, 0 /* size */,
me->mCallbackCookie, CB_EVENT_STREAM_END);
break;
- case AudioTrack::EVENT_NEW_IAUDIOTRACK :
+ case JAudioTrack::EVENT_NEW_IAUDIOTRACK :
ALOGV("callbackwrapper: deliver EVENT_TEAR_DOWN");
- (*me->mCallback)(me, NULL /* buffer */, 0 /* size */,
+ (*me->mCallback)(me, nullptr /* buffer */, 0 /* size */,
me->mCallbackCookie, CB_EVENT_TEAR_DOWN);
break;
- case AudioTrack::EVENT_UNDERRUN:
+ case JAudioTrack::EVENT_UNDERRUN:
// This occurs when there is no data available, typically
// when there is a failure to supply data to the AudioTrack. It can also
// occur in non-offloaded mode when the audio device comes out of standby.
@@ -666,29 +650,26 @@
data->unlock();
}
-audio_session_t MediaPlayer2AudioOutput::getSessionId() const
-{
+audio_session_t MediaPlayer2AudioOutput::getSessionId() const {
Mutex::Autolock lock(mLock);
return mSessionId;
}
-uint32_t MediaPlayer2AudioOutput::getSampleRate() const
-{
+uint32_t MediaPlayer2AudioOutput::getSampleRate() const {
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == 0) {
return 0;
}
- return mTrack->getSampleRate();
+ return mJAudioTrack->getSampleRate();
}
-int64_t MediaPlayer2AudioOutput::getBufferDurationInUs() const
-{
+int64_t MediaPlayer2AudioOutput::getBufferDurationInUs() const {
Mutex::Autolock lock(mLock);
- if (mTrack == 0) {
+ if (mJAudioTrack == 0) {
return 0;
}
int64_t duration;
- if (mTrack->getBufferDurationInUs(&duration) != OK) {
+ if (mJAudioTrack->getBufferDurationInUs(&duration) != OK) {
return 0;
}
return duration;
diff --git a/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h b/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
index 301825b..6122687 100644
--- a/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
+++ b/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_JAUDIOTRACK_H
#define ANDROID_JAUDIOTRACK_H
+#include <vector>
+#include <utility>
#include <jni.h>
#include <media/AudioResamplerPublic.h>
#include <media/AudioSystem.h>
@@ -29,7 +31,7 @@
namespace android {
-class JAudioTrack {
+class JAudioTrack : public RefBase {
public:
/* Events used by AudioTrack callback function (callback_t).
@@ -37,6 +39,8 @@
*/
enum event_type {
EVENT_MORE_DATA = 0, // Request to write more data to buffer.
+ EVENT_UNDERRUN = 1, // Buffer underrun occurred. This will not occur for
+ // static tracks.
EVENT_NEW_IAUDIOTRACK = 6, // IAudioTrack was re-created, either due to re-routing and
// voluntary invalidation by mediaserver, or mediaserver crash.
EVENT_STREAM_END = 7, // Sent after all the buffers queued in AF and HW are played
@@ -104,8 +108,7 @@
*
* TODO: Revive removed arguments after offload mode is supported.
*/
- JAudioTrack(audio_stream_type_t streamType,
- uint32_t sampleRate,
+ JAudioTrack(uint32_t sampleRate,
audio_format_t format,
audio_channel_mask_t channelMask,
callback_t cbf,
@@ -158,10 +161,10 @@
* Caution: calling this method too often may be inefficient;
* if you need a high resolution mapping between frame position and presentation time,
* consider implementing that at application level, based on the low resolution timestamps.
- * Returns true if timestamp is valid.
- * The timestamp parameter is undefined on return, if false is returned.
+ * Returns NO_ERROR if timestamp is valid.
+ * NO_INIT if finds error, and timestamp parameter will be undefined on return.
*/
- bool getTimestamp(AudioTimestamp& timestamp);
+ status_t getTimestamp(AudioTimestamp& timestamp);
// TODO: This doc is just copied from AudioTrack.h. Revise it after implemenation.
/* Return the extended timestamp, with additional timebase info and improved drain behavior.
@@ -324,37 +327,43 @@
audio_format_t format();
+ size_t frameSize();
+
/*
* Dumps the state of an audio track.
* Not a general-purpose API; intended only for use by media player service to dump its tracks.
*/
status_t dump(int fd, const Vector<String16>& args) const;
- /* Returns the ID of the audio device actually used by the output to which this AudioTrack is
- * attached. When the AudioTrack is inactive, it will return AUDIO_PORT_HANDLE_NONE.
+ /* Returns the AudioDeviceInfo used by the output to which this AudioTrack is
+ * attached.
*/
- audio_port_handle_t getRoutedDeviceId();
+ jobject getRoutedDevice();
/* Returns the ID of the audio session this AudioTrack belongs to. */
audio_session_t getAudioSessionId();
- /* Selects the audio device to use for output of this AudioTrack. A value of
- * AUDIO_PORT_HANDLE_NONE indicates default routing.
+ /* Sets the preferred audio device to use for output of this AudioTrack.
*
* Parameters:
- * The device ID of the selected device (as returned by the AudioDevicesManager API).
+ * Device: an AudioDeviceInfo object.
*
* Returned value:
* - NO_ERROR: successful operation
- * - BAD_VALUE: failed to find the valid output device with given device Id.
+ * - BAD_VALUE: failed to set the device
*/
- status_t setOutputDevice(audio_port_handle_t deviceId);
+ status_t setPreferredDevice(jobject device);
// TODO: Add AUDIO_OUTPUT_FLAG_DIRECT when it is possible to check.
// TODO: Add AUDIO_FLAG_HW_AV_SYNC when it is possible to check.
/* Returns the flags */
audio_output_flags_t getFlags() const { return mFlags; }
+ /* We don't keep stream type here,
+ * instead, we keep attributes and call getVolumeControlStream() to get stream type
+ */
+ audio_stream_type_t getAudioStreamType();
+
/* Obtain the pending duration in milliseconds for playback of pure PCM data remaining in
* AudioTrack.
*
@@ -369,33 +378,75 @@
* Replaces any previously installed callback.
*
* Parameters:
- *
- * callback: The callback interface
+ * Listener: the listener to receive notification of rerouting events.
+ * Handler: the handler to handler the rerouting events.
*
* Returns NO_ERROR if successful.
- * INVALID_OPERATION if the same callback is already installed.
- * NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
- * BAD_VALUE if the callback is NULL
+ * (TODO) INVALID_OPERATION if the same callback is already installed.
+ * (TODO) NO_INIT or PREMISSION_DENIED if AudioFlinger service is not reachable
+ * (TODO) BAD_VALUE if the callback is NULL
*/
- status_t addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
+ status_t addAudioDeviceCallback(jobject listener, jobject rd);
/* Removes an AudioDeviceCallback.
*
* Parameters:
- *
- * callback: The callback interface
+ * Listener: the listener to receive notification of rerouting events.
*
* Returns NO_ERROR if successful.
- * INVALID_OPERATION if the callback is not installed
- * BAD_VALUE if the callback is NULL
+ * (TODO) INVALID_OPERATION if the callback is not installed
+ * (TODO) BAD_VALUE if the callback is NULL
*/
- status_t removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
+ status_t removeAudioDeviceCallback(jobject listener);
+
+ /* Register all backed-up routing delegates.
+ *
+ * Parameters:
+ * routingDelegates: backed-up routing delegates
+ *
+ */
+ void registerRoutingDelegates(std::vector<std::pair<jobject, jobject>>& routingDelegates);
+
+ /* get listener from RoutingDelegate object
+ */
+ static jobject getListener(const jobject routingDelegateObj);
+
+ /* get handler from RoutingDelegate object
+ */
+ static jobject getHandler(const jobject routingDelegateObj);
+
+ /* convert local reference to global reference.
+ */
+ static jobject addGlobalRef(const jobject obj);
+
+ /* erase global reference.
+ *
+ * Returns NO_ERROR if succeeds
+ * BAD_VALUE if obj is NULL
+ */
+ static status_t removeGlobalRef(const jobject obj);
+
+ /*
+ * Parameters:
+ * map and key
+ *
+ * Returns value if key is in the map
+ * nullptr if key is not in the map
+ */
+ static jobject findByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key);
+
+ /*
+ * Parameters:
+ * map and key
+ */
+ static void eraseByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key);
private:
audio_output_flags_t mFlags;
jclass mAudioTrackCls;
jobject mAudioTrackObj;
+ jobject mAudioAttributesObj;
/* Creates a Java VolumeShaper.Configuration object from VolumeShaper::Configuration */
jobject createVolumeShaperConfigurationObj(
diff --git a/media/libmediaplayer2/include/mediaplayer2/JObjectHolder.h b/media/libmediaplayer2/include/mediaplayer2/JObjectHolder.h
new file mode 100644
index 0000000..93d8b40
--- /dev/null
+++ b/media/libmediaplayer2/include/mediaplayer2/JObjectHolder.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright 2018, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef JOBJECT_HOLDER_H_
+
+#define JOBJECT_HOLDER_H_
+
+#include "jni.h"
+#include <mediaplayer2/JavaVMHelper.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+// Helper class for managing global reference of jobject.
+struct JObjectHolder : public RefBase {
+ JObjectHolder(jobject obj) {
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ mJObject = reinterpret_cast<jobject>(env->NewGlobalRef(obj));
+ }
+
+ virtual ~JObjectHolder() {
+ JNIEnv *env = JavaVMHelper::getJNIEnv();
+ env->DeleteGlobalRef(mJObject);
+ }
+
+ jobject getJObject() { return mJObject; }
+
+private:
+ jobject mJObject;
+};
+
+} //" android
+
+#endif // JOBJECT_HOLDER_H_
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
index fe1005b..d13c54c 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
@@ -19,10 +19,16 @@
#define ANDROID_MEDIAPLAYER2AUDIOOUTPUT_H
#include <mediaplayer2/MediaPlayer2Interface.h>
+#include <mediaplayer2/JAudioTrack.h>
+#include <mediaplayer2/JObjectHolder.h>
+#include <vector>
+#include <utility>
#include <utils/String16.h>
#include <utils/Vector.h>
+#include "jni.h"
+
namespace android {
class AudioTrack;
@@ -36,11 +42,11 @@
uid_t uid,
int pid,
const audio_attributes_t * attr,
- const sp<AudioSystem::AudioDeviceCallback>& deviceCallback);
+ std::vector<jobject>& routingDelegatesBackup);
virtual ~MediaPlayer2AudioOutput();
virtual bool ready() const {
- return mTrack != 0;
+ return mJAudioTrack != nullptr;
}
virtual ssize_t bufferSize() const;
virtual ssize_t frameCount() const;
@@ -62,7 +68,6 @@
AudioCallback cb, void *cookie,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
const audio_offload_info_t *offloadInfo = NULL,
- bool doNotReconnect = false,
uint32_t suggestedFrameCount = 0);
virtual status_t start();
@@ -71,11 +76,8 @@
virtual void flush();
virtual void pause();
virtual void close();
- void setAudioStreamType(audio_stream_type_t streamType);
- virtual audio_stream_type_t getAudioStreamType() const {
- return mStreamType;
- }
void setAudioAttributes(const audio_attributes_t * attributes);
+ virtual audio_stream_type_t getAudioStreamType() const;
void setVolume(float volume);
virtual status_t setPlaybackRate(const AudioPlaybackRate& rate);
@@ -92,13 +94,12 @@
// TODO: return correct value.
//return mNextOutput == NULL;
}
- virtual status_t setParameters(const String8& keyValuePairs);
- virtual String8 getParameters(const String8& keys);
-
// AudioRouting
- virtual status_t setOutputDevice(audio_port_handle_t deviceId);
- virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId);
- virtual status_t enableAudioDeviceCallback(bool enabled);
+ virtual status_t setPreferredDevice(jobject device);
+ virtual jobject getRoutedDevice();
+ virtual status_t addAudioDeviceCallback(jobject routingDelegate);
+ virtual status_t removeAudioDeviceCallback(jobject listener);
+ virtual void copyAudioDeviceCallback(std::vector<jobject>& routingDelegateTarget);
private:
static void setMinBufferCount();
@@ -107,11 +108,10 @@
void close_l();
status_t updateTrack_l();
- sp<AudioTrack> mTrack;
+ sp<JAudioTrack> mJAudioTrack;
AudioCallback mCallback;
void * mCallbackCookie;
CallbackData * mCallbackData;
- audio_stream_type_t mStreamType;
audio_attributes_t * mAttributes;
float mVolume;
AudioPlaybackRate mPlaybackRate;
@@ -124,11 +124,9 @@
float mSendLevel;
int mAuxEffectId;
audio_output_flags_t mFlags;
- audio_port_handle_t mSelectedDeviceId;
- audio_port_handle_t mRoutedDeviceId;
- bool mDeviceCallbackEnabled;
- wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
+ sp<JObjectHolder> mPreferredDevice;
mutable Mutex mLock;
+ std::vector<std::pair<jobject, jobject>> mRoutingDelegates; // <listener, routingDelegate>
// static variables below not protected by mutex
static bool mIsOnEmulator;
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
index 846441e..07a7946 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Interface.h
@@ -23,6 +23,7 @@
#include <utils/Errors.h>
#include <utils/String8.h>
#include <utils/RefBase.h>
+#include <jni.h>
#include <media/AVSyncSettings.h>
#include <media/AudioResamplerPublic.h>
@@ -33,6 +34,7 @@
#include <media/stagefright/foundation/AHandler.h>
#include <mediaplayer2/MediaPlayer2Types.h>
+#include "jni.h"
#include "mediaplayer2.pb.h"
using android::media::MediaPlayer2Proto::PlayerMessage;
@@ -106,7 +108,6 @@
void *cookie = NULL,
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
const audio_offload_info_t *offloadInfo = NULL,
- bool doNotReconnect = false,
uint32_t suggestedFrameCount = 0) = 0;
virtual status_t start() = 0;
@@ -142,9 +143,10 @@
}
// AudioRouting
- virtual status_t setOutputDevice(audio_port_handle_t deviceId);
- virtual status_t getRoutedDeviceId(audio_port_handle_t* deviceId);
- virtual status_t enableAudioDeviceCallback(bool enabled);
+ virtual status_t setPreferredDevice(jobject device);
+ virtual jobject getRoutedDevice();
+ virtual status_t addAudioDeviceCallback(jobject routingDelegate);
+ virtual status_t removeAudioDeviceCallback(jobject listener);
};
MediaPlayer2Interface() : mListener(NULL) { }
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
index 10e07ea..2430289 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2Types.h
@@ -42,7 +42,6 @@
MEDIA2_SUBTITLE_DATA = 201,
MEDIA2_META_DATA = 202,
MEDIA2_DRM_INFO = 210,
- MEDIA2_AUDIO_ROUTING_CHANGED = 10000,
};
// Generic error codes for the media player framework. Errors are fatal, the
diff --git a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
index 4f73ad3..a646399 100644
--- a/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
+++ b/media/libmediaplayer2/include/mediaplayer2/mediaplayer2.h
@@ -25,6 +25,8 @@
#include <mediaplayer2/MediaPlayer2Interface.h>
#include <mediaplayer2/MediaPlayer2Types.h>
+#include <vector>
+#include <jni.h>
#include <utils/Errors.h>
#include <utils/Mutex.h>
#include <utils/RefBase.h>
@@ -32,6 +34,8 @@
#include <utils/Vector.h>
#include <system/audio-base.h>
+#include "jni.h"
+
namespace android {
struct ANativeWindowWrapper;
@@ -103,9 +107,10 @@
status_t prepareDrm(const uint8_t uuid[16], const Vector<uint8_t>& drmSessionId);
status_t releaseDrm();
// AudioRouting
- status_t setOutputDevice(audio_port_handle_t deviceId);
- audio_port_handle_t getRoutedDeviceId();
- status_t enableAudioDeviceCallback(bool enabled);
+ status_t setPreferredDevice(jobject device);
+ jobject getRoutedDevice();
+ status_t addAudioDeviceCallback(jobject routingDelegate);
+ status_t removeAudioDeviceCallback(jobject listener);
status_t dump(int fd, const Vector<String16>& args);
@@ -148,7 +153,7 @@
audio_session_t mAudioSessionId;
audio_attributes_t * mAudioAttributes;
float mSendLevel;
-
+ std::vector<jobject> mRoutingDelegates;
sp<ANativeWindowWrapper> mConnectedWindow;
};
diff --git a/media/libmediaplayer2/mediaplayer2.cpp b/media/libmediaplayer2/mediaplayer2.cpp
index 480a630..617da47 100644
--- a/media/libmediaplayer2/mediaplayer2.cpp
+++ b/media/libmediaplayer2/mediaplayer2.cpp
@@ -104,27 +104,6 @@
}
}
-class AudioDeviceUpdatedNotifier: public AudioSystem::AudioDeviceCallback {
-public:
- AudioDeviceUpdatedNotifier(const sp<MediaPlayer2Interface>& listener)
- : mListener(listener) { }
-
- ~AudioDeviceUpdatedNotifier() { }
-
- virtual void onAudioDeviceUpdate(audio_io_handle_t audioIo,
- audio_port_handle_t deviceId) override {
- sp<MediaPlayer2Interface> listener = mListener.promote();
- if (listener != NULL) {
- listener->sendEvent(0, MEDIA2_AUDIO_ROUTING_CHANGED, audioIo, deviceId);
- } else {
- ALOGW("listener for process %d death is gone", MEDIA2_AUDIO_ROUTING_CHANGED);
- }
- }
-
-private:
- wp<MediaPlayer2Interface> mListener;
-};
-
class proxyListener : public MediaPlayer2InterfaceListener {
public:
proxyListener(const wp<MediaPlayer2> &player)
@@ -433,9 +412,13 @@
clear_l();
+ if (mAudioOutput != NULL) {
+ mAudioOutput->copyAudioDeviceCallback(mRoutingDelegates);
+ }
+
player->setListener(new proxyListener(this));
mAudioOutput = new MediaPlayer2AudioOutput(mAudioSessionId, mUid,
- mPid, mAudioAttributes, new AudioDeviceUpdatedNotifier(player));
+ mPid, mAudioAttributes, mRoutingDelegates);
player->setAudioSink(mAudioOutput);
err = player->setDataSource(dsd);
@@ -614,8 +597,6 @@
if (err != OK) {
return err;
}
- } else if (mAudioOutput != 0) {
- mAudioOutput->setAudioStreamType(mStreamType);
}
mCurrentState = MEDIA_PLAYER2_PREPARING;
return mPlayer->prepareAsync();
@@ -1283,36 +1264,41 @@
return status;
}
-status_t MediaPlayer2::setOutputDevice(audio_port_handle_t deviceId) {
+status_t MediaPlayer2::setPreferredDevice(jobject device) {
Mutex::Autolock _l(mLock);
if (mAudioOutput == NULL) {
- ALOGV("setOutputDevice: audio sink not init");
+ ALOGV("setPreferredDevice: audio sink not init");
return NO_INIT;
}
- return mAudioOutput->setOutputDevice(deviceId);
+ return mAudioOutput->setPreferredDevice(device);
}
-audio_port_handle_t MediaPlayer2::getRoutedDeviceId() {
+jobject MediaPlayer2::getRoutedDevice() {
Mutex::Autolock _l(mLock);
if (mAudioOutput == NULL) {
- ALOGV("getRoutedDeviceId: audio sink not init");
- return AUDIO_PORT_HANDLE_NONE;
+ ALOGV("getRoutedDevice: audio sink not init");
+ return nullptr;
}
- audio_port_handle_t deviceId;
- status_t status = mAudioOutput->getRoutedDeviceId(&deviceId);
- if (status != NO_ERROR) {
- return AUDIO_PORT_HANDLE_NONE;
- }
- return deviceId;
+ return mAudioOutput->getRoutedDevice();
}
-status_t MediaPlayer2::enableAudioDeviceCallback(bool enabled) {
+status_t MediaPlayer2::addAudioDeviceCallback(jobject routingDelegate) {
+ Mutex::Autolock _l(mLock);
+ if (mAudioOutput == NULL) {
+ ALOGV("addAudioDeviceCallback: player not init");
+ mRoutingDelegates.push_back(routingDelegate);
+ return NO_INIT;
+ }
+ return mAudioOutput->addAudioDeviceCallback(routingDelegate);
+}
+
+status_t MediaPlayer2::removeAudioDeviceCallback(jobject listener) {
Mutex::Autolock _l(mLock);
if (mAudioOutput == NULL) {
ALOGV("addAudioDeviceCallback: player not init");
return NO_INIT;
}
- return mAudioOutput->enableAudioDeviceCallback(enabled);
+ return mAudioOutput->removeAudioDeviceCallback(listener);
}
status_t MediaPlayer2::dump(int fd, const Vector<String16>& args) {
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
index bc17d13..c37460b 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2.cpp
@@ -643,6 +643,7 @@
}
void NuPlayer2::onMessageReceived(const sp<AMessage> &msg) {
+
switch (msg->what()) {
case kWhatSetDataSource:
{
@@ -1717,7 +1718,7 @@
mRenderer = new Renderer(mAudioSink, mMediaClock, notify, flags);
mRendererLooper = new ALooper;
mRendererLooper->setName("NuPlayerRenderer");
- mRendererLooper->start(false, false, ANDROID_PRIORITY_AUDIO);
+ mRendererLooper->start(false, true, ANDROID_PRIORITY_AUDIO);
mRendererLooper->registerHandler(mRenderer);
status_t err = mRenderer->setPlaybackSettings(mPlaybackSettings);
diff --git a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
index 652cc89..7db78c1 100644
--- a/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
+++ b/media/libmediaplayer2/nuplayer2/NuPlayer2Renderer.cpp
@@ -1848,6 +1848,7 @@
bool isStreaming) {
ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
offloadOnly, offloadingAudio());
+
bool audioSinkChanged = false;
int32_t numChannels;
@@ -1989,13 +1990,6 @@
const uint32_t frameCount =
(unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
- // The doNotReconnect means AudioSink will signal back and let NuPlayer2 to re-construct
- // AudioSink. We don't want this when there's video because it will cause a video seek to
- // the previous I frame. But we do want this when there's only audio because it will give
- // NuPlayer2 a chance to switch from non-offload mode to offload mode.
- // So we only set doNotReconnect when there's no video.
- const bool doNotReconnect = !hasVideo;
-
// We should always be able to set our playback settings if the sink is closed.
LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
"onOpenAudioSink: can't set playback rate on closed sink");
@@ -2008,7 +2002,6 @@
mUseAudioCallback ? this : NULL,
(audio_output_flags_t)pcmFlags,
NULL,
- doNotReconnect,
frameCount);
if (err != OK) {
ALOGW("openAudioSink: non offloaded open failed status: %d", err);
diff --git a/media/libmediaplayerservice/nuplayer/GenericSource.cpp b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
index 8cd6eda..f3b69d6 100644
--- a/media/libmediaplayerservice/nuplayer/GenericSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/GenericSource.cpp
@@ -1547,6 +1547,16 @@
notifyBufferingUpdate(100);
}
+ if (mPreparing) {
+ notifyPreparedAndCleanup(finalStatus);
+ mPreparing = false;
+ } else if (mSentPauseOnBuffering) {
+ sendCacheStats();
+ mSentPauseOnBuffering = false;
+ sp<AMessage> notify = dupNotify();
+ notify->setInt32("what", kWhatResumeOnBufferingEnd);
+ notify->post();
+ }
return;
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 96993e9..3956520 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -1204,6 +1204,17 @@
msg->setBuffer("csd-0", buffer);
parseVp9ProfileLevelFromCsd(buffer, msg);
+ } else if (meta->findData(kKeyAlacMagicCookie, &type, &data, &size)) {
+ ALOGV("convertMetaDataToMessage found kKeyAlacMagicCookie of size %zu\n", size);
+ sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+ if (buffer.get() == NULL || buffer->base() == NULL) {
+ return NO_MEMORY;
+ }
+ memcpy(buffer->data(), data, size);
+
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ msg->setBuffer("csd-0", buffer);
}
// TODO expose "crypto-key"/kKeyCryptoKey through public api
@@ -1621,6 +1632,8 @@
if (msg->findBuffer("csd-1", &csd1)) {
meta->setData(kKeyVorbisBooks, 0, csd1->data(), csd1->size());
}
+ } else if (mime == MEDIA_MIMETYPE_AUDIO_ALAC) {
+ meta->setData(kKeyAlacMagicCookie, 0, csd0->data(), csd0->size());
}
} else if (mime == MEDIA_MIMETYPE_VIDEO_AVC && msg->findBuffer("csd-avc", &csd0)) {
meta->setData(kKeyAVCC, kTypeAVCC, csd0->data(), csd0->size());
@@ -1710,6 +1723,7 @@
{ MEDIA_MIMETYPE_AUDIO_EAC3, AUDIO_FORMAT_E_AC3},
{ MEDIA_MIMETYPE_AUDIO_AC4, AUDIO_FORMAT_AC4},
{ MEDIA_MIMETYPE_AUDIO_FLAC, AUDIO_FORMAT_FLAC},
+ { MEDIA_MIMETYPE_AUDIO_ALAC, AUDIO_FORMAT_ALAC },
{ 0, AUDIO_FORMAT_INVALID }
};
@@ -1800,7 +1814,7 @@
info.sample_rate = srate;
int32_t cmask = 0;
- if (!meta->findInt32(kKeyChannelMask, &cmask)) {
+ if (!meta->findInt32(kKeyChannelMask, &cmask) || cmask == CHANNEL_MASK_USE_CHANNEL_ORDER) {
ALOGV("track of type '%s' does not publish channel mask", mime);
// Try a channel count instead
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index 28bb10a..f93ae65 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -52,6 +52,7 @@
const char *MEDIA_MIMETYPE_AUDIO_EAC3 = "audio/eac3";
const char *MEDIA_MIMETYPE_AUDIO_AC4 = "audio/ac4";
const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED = "audio/scrambled";
+const char *MEDIA_MIMETYPE_AUDIO_ALAC = "audio/alac";
const char *MEDIA_MIMETYPE_CONTAINER_MPEG4 = "video/mp4";
const char *MEDIA_MIMETYPE_CONTAINER_WAV = "audio/x-wav";
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index b165bcb..523378e 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -54,6 +54,7 @@
extern const char *MEDIA_MIMETYPE_AUDIO_EAC3;
extern const char *MEDIA_MIMETYPE_AUDIO_AC4;
extern const char *MEDIA_MIMETYPE_AUDIO_SCRAMBLED;
+extern const char *MEDIA_MIMETYPE_AUDIO_ALAC;
extern const char *MEDIA_MIMETYPE_CONTAINER_MPEG4;
extern const char *MEDIA_MIMETYPE_CONTAINER_WAV;
diff --git a/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java b/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
index c9869c0..061bc5b 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoSurfaceView.java
@@ -21,6 +21,7 @@
import android.content.Context;
import android.graphics.Rect;
import android.media.MediaPlayer2;
+import android.media.VideoSize;
import android.util.AttributeSet;
import android.util.Log;
import android.view.SurfaceHolder;
@@ -149,8 +150,8 @@
// TODO: Investigate the way to move onMeasure() code into FrameLayout.
@Override
protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
- int videoWidth = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoWidth();
- int videoHeight = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoHeight();
+ int videoWidth = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoSize().getWidth();
+ int videoHeight = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoSize().getHeight();
if (DEBUG) {
Log.d(TAG, "onMeasure(" + MeasureSpec.toString(widthMeasureSpec) + ", "
+ MeasureSpec.toString(heightMeasureSpec) + ")");
diff --git a/packages/MediaComponents/src/com/android/widget/VideoTextureView.java b/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
index 40fb046..c2c1ca6 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoTextureView.java
@@ -21,6 +21,7 @@
import android.content.Context;
import android.graphics.SurfaceTexture;
import android.media.MediaPlayer2;
+import android.media.VideoSize;
import android.util.AttributeSet;
import android.util.Log;
import android.view.Surface;
@@ -160,8 +161,8 @@
@Override
protected void onMeasure(int widthMeasureSpec, int heightMeasureSpec) {
- int videoWidth = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoWidth();
- int videoHeight = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoHeight();
+ int videoWidth = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoSize().getWidth();
+ int videoHeight = (mMediaPlayer == null) ? 0 : mMediaPlayer.getVideoSize().getHeight();
if (DEBUG) {
Log.d(TAG, "onMeasure(" + MeasureSpec.toString(widthMeasureSpec) + ", "
+ MeasureSpec.toString(heightMeasureSpec) + ")");
diff --git a/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
index 802f86f..5eb5ba6 100644
--- a/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
+++ b/packages/MediaComponents/src/com/android/widget/VideoView2Impl.java
@@ -39,6 +39,7 @@
import android.media.SessionToken2;
import android.media.SubtitleData;
import android.media.TimedText;
+import android.media.VideoSize;
import android.media.session.MediaController;
import android.media.session.MediaController.PlaybackInfo;
import android.media.session.MediaSession;
@@ -1110,12 +1111,13 @@
@Override
public void onVideoSizeChanged(
- MediaPlayer2 mp, DataSourceDesc dsd, int width, int height) {
+ MediaPlayer2 mp, DataSourceDesc dsd, VideoSize size) {
if (DEBUG) {
- Log.d(TAG, "onVideoSizeChanged(): size: " + width + "/" + height);
+ Log.d(TAG, "onVideoSizeChanged(): size: " + size.getWidth() + "/"
+ + size.getHeight());
}
- mVideoWidth = mp.getVideoWidth();
- mVideoHeight = mp.getVideoHeight();
+ mVideoWidth = mp.getVideoSize().getWidth();
+ mVideoHeight = mp.getVideoSize().getHeight();
if (DEBUG) {
Log.d(TAG, "onVideoSizeChanged(): mVideoSize:" + mVideoWidth + "/"
+ mVideoHeight);
@@ -1193,8 +1195,8 @@
if (mMediaControlView != null) {
mMediaControlView.setEnabled(true);
}
- int videoWidth = mp.getVideoWidth();
- int videoHeight = mp.getVideoHeight();
+ int videoWidth = mp.getVideoSize().getWidth();
+ int videoHeight = mp.getVideoSize().getHeight();
// mSeekWhenPrepared may be changed after seekTo() call
long seekToPosition = mSeekWhenPrepared;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 7b1454d..18addb5 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -69,7 +69,7 @@
res = buildQuirks();
if (res != OK) return res;
- const Size MAX_PREVIEW_SIZE = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
+ Size maxPreviewSize = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
// Treat the H.264 max size as the max supported video size.
MediaProfiles *videoEncoderProfiles = MediaProfiles::getInstance();
Vector<video_encoder> encoders = videoEncoderProfiles->getVideoEncoders();
@@ -90,11 +90,16 @@
}
}
// This is just an upper bound and may not be an actually valid video size
- const Size VIDEO_SIZE_UPPER_BOUND = {maxVideoWidth, maxVideoHeight};
+ Size videoSizeUpperBound = {maxVideoWidth, maxVideoHeight};
- res = getFilteredSizes(MAX_PREVIEW_SIZE, &availablePreviewSizes);
+ if (fastInfo.supportsPreferredConfigs) {
+ maxPreviewSize = getMaxSize(getPreferredPreviewSizes());
+ videoSizeUpperBound = getMaxSize(getPreferredVideoSizes());
+ }
+
+ res = getFilteredSizes(maxPreviewSize, &availablePreviewSizes);
if (res != OK) return res;
- res = getFilteredSizes(VIDEO_SIZE_UPPER_BOUND, &availableVideoSizes);
+ res = getFilteredSizes(videoSizeUpperBound, &availableVideoSizes);
if (res != OK) return res;
// Select initial preview and video size that's under the initial bound and
@@ -296,9 +301,13 @@
Vector<Size> availableJpegSizes = getAvailableJpegSizes();
if (!availableJpegSizes.size()) return NO_INIT;
- // TODO: Pick maximum
pictureWidth = availableJpegSizes[0].width;
pictureHeight = availableJpegSizes[0].height;
+ if (fastInfo.supportsPreferredConfigs) {
+ Size suggestedJpegSize = getMaxSize(getPreferredJpegSizes());
+ pictureWidth = suggestedJpegSize.width;
+ pictureHeight = suggestedJpegSize.height;
+ }
pictureWidthLastSet = pictureWidth;
pictureHeightLastSet = pictureHeight;
pictureSizeOverriden = false;
@@ -1011,6 +1020,9 @@
arrayHeight = activeArraySize.data.i32[3];
} else return NO_INIT;
+ fastInfo.supportsPreferredConfigs =
+ info->exists(ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS);
+
// We'll set the target FPS range for still captures to be as wide
// as possible to give the HAL maximum latitude for exposure selection
camera_metadata_ro_entry_t availableFpsRanges =
@@ -1022,8 +1034,11 @@
// Get supported preview fps ranges, up to default maximum.
Vector<Size> supportedPreviewSizes;
Vector<FpsRange> supportedPreviewFpsRanges;
- const Size PREVIEW_SIZE_BOUND = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
- status_t res = getFilteredSizes(PREVIEW_SIZE_BOUND, &supportedPreviewSizes);
+ Size previewSizeBound = { MAX_PREVIEW_WIDTH, MAX_PREVIEW_HEIGHT };
+ if (fastInfo.supportsPreferredConfigs) {
+ previewSizeBound = getMaxSize(getPreferredPreviewSizes());
+ }
+ status_t res = getFilteredSizes(previewSizeBound, &supportedPreviewSizes);
if (res != OK) return res;
for (size_t i=0; i < availableFpsRanges.count; i += 2) {
if (!isFpsSupported(supportedPreviewSizes,
@@ -3107,6 +3122,67 @@
return jpegSizes;
}
+Vector<Parameters::StreamConfiguration> Parameters::getPreferredStreamConfigurations(
+ int32_t usecaseId) const {
+ const size_t STREAM_CONFIGURATION_SIZE = 5;
+ const size_t STREAM_WIDTH_OFFSET = 0;
+ const size_t STREAM_HEIGHT_OFFSET = 1;
+ const size_t STREAM_FORMAT_OFFSET = 2;
+ const size_t STREAM_IS_INPUT_OFFSET = 3;
+ const size_t STREAM_USECASE_BITMAP_OFFSET = 4;
+ Vector<StreamConfiguration> scs;
+
+ if (fastInfo.supportsPreferredConfigs) {
+ camera_metadata_ro_entry_t availableStreamConfigs = staticInfo(
+ ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS);
+ for (size_t i = 0; i < availableStreamConfigs.count; i+= STREAM_CONFIGURATION_SIZE) {
+ int32_t width = availableStreamConfigs.data.i32[i + STREAM_WIDTH_OFFSET];
+ int32_t height = availableStreamConfigs.data.i32[i + STREAM_HEIGHT_OFFSET];
+ int32_t format = availableStreamConfigs.data.i32[i + STREAM_FORMAT_OFFSET];
+ int32_t isInput = availableStreamConfigs.data.i32[i + STREAM_IS_INPUT_OFFSET];
+ int32_t supportedUsecases =
+ availableStreamConfigs.data.i32[i + STREAM_USECASE_BITMAP_OFFSET];
+ if (supportedUsecases & (1 << usecaseId)) {
+ StreamConfiguration sc = {format, width, height, isInput};
+ scs.add(sc);
+ }
+ }
+ }
+
+ return scs;
+}
+
+Vector<Parameters::Size> Parameters::getPreferredFilteredSizes(int32_t usecaseId,
+ int32_t format) const {
+ Vector<Parameters::Size> sizes;
+ Vector<StreamConfiguration> scs = getPreferredStreamConfigurations(usecaseId);
+ for (const auto &it : scs) {
+ if (it.format == format) {
+ sizes.add({it.width, it.height});
+ }
+ }
+
+ return sizes;
+}
+
+Vector<Parameters::Size> Parameters::getPreferredJpegSizes() const {
+ return getPreferredFilteredSizes(
+ ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_SNAPSHOT,
+ HAL_PIXEL_FORMAT_BLOB);
+}
+
+Vector<Parameters::Size> Parameters::getPreferredPreviewSizes() const {
+ return getPreferredFilteredSizes(
+ ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_PREVIEW,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
+}
+
+Vector<Parameters::Size> Parameters::getPreferredVideoSizes() const {
+ return getPreferredFilteredSizes(
+ ANDROID_SCALER_AVAILABLE_RECOMMENDED_STREAM_CONFIGURATIONS_RECORD,
+ HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED);
+}
+
Parameters::CropRegion Parameters::calculateCropRegion(bool previewOnly) const {
float zoomLeft, zoomTop, zoomWidth, zoomHeight;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index e008648..3a709c9 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -248,6 +248,7 @@
bool useFlexibleYuv;
Size maxJpegSize;
Size maxZslSize;
+ bool supportsPreferredConfigs;
} fastInfo;
// Quirks information; these are short-lived flags to enable workarounds for
@@ -418,6 +419,9 @@
// returns an empty Vector if device HAL version does support it
Vector<StreamConfiguration> getStreamConfigurations();
+ // Helper function to extract the suggested stream configurations
+ Vector<StreamConfiguration> getPreferredStreamConfigurations(int32_t usecaseId) const;
+
// Helper function to get minimum frame duration for a jpeg size
// return -1 if input jpeg size cannot be found in supported size list
int64_t getJpegStreamMinFrameDurationNs(Parameters::Size size);
@@ -439,6 +443,15 @@
// The maximum size is defined by comparing width first, when width ties comparing height.
Size getMaxSize(const Vector<Size>& sizes);
+ // Helper function to filter and sort suggested sizes
+ Vector<Parameters::Size> getPreferredFilteredSizes(int32_t usecaseId, int32_t format) const;
+ // Helper function to get the suggested jpeg sizes
+ Vector<Size> getPreferredJpegSizes() const;
+ // Helper function to get the suggested preview sizes
+ Vector<Size> getPreferredPreviewSizes() const;
+ // Helper function to get the suggested video sizes
+ Vector<Size> getPreferredVideoSizes() const;
+
int mDeviceVersion;
uint8_t mDefaultSceneMode;
};
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 53aee7e..7d41256 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -82,8 +82,6 @@
mLastTemplateId(-1)
{
ATRACE_CALL();
- camera3_callback_ops::notify = &sNotify;
- camera3_callback_ops::process_capture_result = &sProcessCaptureResult;
ALOGV("%s: Created device for camera %s", __FUNCTION__, mId.string());
}
@@ -218,8 +216,17 @@
if (sessionKeysEntry.count > 0) {
sessionParamKeys.insertArrayAt(sessionKeysEntry.data.i32, 0, sessionKeysEntry.count);
}
+
+ camera_metadata_entry bufMgrMode =
+ mDeviceInfo.find(ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION);
+ if (bufMgrMode.count > 0) {
+ mUseHalBufManager = (bufMgrMode.data.u8[0] ==
+ ANDROID_INFO_SUPPORTED_BUFFER_MANAGEMENT_VERSION_HIDL_DEVICE_3_5);
+ }
+
/** Start up request queue thread */
- mRequestThread = new RequestThread(this, mStatusTracker, mInterface, sessionParamKeys);
+ mRequestThread = new RequestThread(
+ this, mStatusTracker, mInterface, sessionParamKeys, mUseHalBufManager);
res = mRequestThread->run(String8::format("C3Dev-%s-ReqQueue", mId.string()).string());
if (res != OK) {
SET_ERR_L("Unable to start request queue thread: %s (%d)",
@@ -271,7 +278,6 @@
return res;
}
}
-
return OK;
}
@@ -919,6 +925,217 @@
return res;
}
+hardware::Return<void> Camera3Device::requestStreamBuffers(
+ const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
+ requestStreamBuffers_cb _hidl_cb) {
+ using hardware::camera::device::V3_5::BufferRequestStatus;
+ using hardware::camera::device::V3_5::StreamBufferRet;
+ using hardware::camera::device::V3_5::StreamBufferRequestError;
+
+ std::lock_guard<std::mutex> lock(mRequestBufferInterfaceLock);
+
+ hardware::hidl_vec<StreamBufferRet> bufRets;
+ if (!mUseHalBufManager) {
+ ALOGE("%s: Camera %s does not support HAL buffer management",
+ __FUNCTION__, mId.string());
+ _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
+ return hardware::Void();
+ }
+
+ SortedVector<int32_t> streamIds;
+ ssize_t sz = streamIds.setCapacity(bufReqs.size());
+ if (sz < 0 || static_cast<size_t>(sz) != bufReqs.size()) {
+ ALOGE("%s: failed to allocate memory for %zu buffer requests",
+ __FUNCTION__, bufReqs.size());
+ _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
+ return hardware::Void();
+ }
+
+ // Check for repeated streamId
+ for (const auto& bufReq : bufReqs) {
+ if (streamIds.indexOf(bufReq.streamId) != NAME_NOT_FOUND) {
+ ALOGE("%s: Stream %d appear multiple times in buffer requests",
+ __FUNCTION__, bufReq.streamId);
+ _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
+ return hardware::Void();
+ }
+ streamIds.add(bufReq.streamId);
+ }
+
+ // TODO: check we are not configuring streams. If so return FAILED_CONFIGURING
+ // Probably need to hook CameraDeviceClient::beginConfigure and figure something
+ // out for API1 client... maybe grab mLock and check mNeedConfig but then we will
+ // need to wait until mLock is released...
+ // _hidl_cb(BufferRequestStatus::FAILED_CONFIGURING, bufRets);
+ // return hardware::Void();
+
+ // TODO: here we start accessing mOutputStreams, might need mLock, but that
+ // might block incoming API calls. Not sure how bad is it.
+ if (bufReqs.size() > mOutputStreams.size()) {
+ ALOGE("%s: too many buffer requests (%zu > # of output streams %zu)",
+ __FUNCTION__, bufReqs.size(), mOutputStreams.size());
+ _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, bufRets);
+ return hardware::Void();
+ }
+
+ bufRets.resize(bufReqs.size());
+
+ bool allReqsSucceeds = true;
+ bool oneReqSucceeds = false;
+ for (size_t i = 0; i < bufReqs.size(); i++) {
+ const auto& bufReq = bufReqs[i];
+ auto& bufRet = bufRets[i];
+ int32_t streamId = bufReq.streamId;
+ sp<Camera3OutputStreamInterface> outputStream = mOutputStreams.get(streamId);
+ if (outputStream == nullptr) {
+ ALOGE("%s: Output stream id %d not found!", __FUNCTION__, streamId);
+ hardware::hidl_vec<StreamBufferRet> emptyBufRets;
+ _hidl_cb(BufferRequestStatus::FAILED_ILLEGAL_ARGUMENTS, emptyBufRets);
+ return hardware::Void();
+ }
+
+ bufRet.streamId = streamId;
+ uint32_t numBuffersRequested = bufReq.numBuffersRequested;
+ size_t totalHandout = outputStream->getOutstandingBuffersCount() + numBuffersRequested;
+ if (totalHandout > outputStream->asHalStream()->max_buffers) {
+ // Not able to allocate enough buffer. Exit early for this stream
+ bufRet.val.error(StreamBufferRequestError::MAX_BUFFER_EXCEEDED);
+ allReqsSucceeds = false;
+ continue;
+ }
+
+ hardware::hidl_vec<StreamBuffer> tmpRetBuffers(numBuffersRequested);
+ bool currentReqSucceeds = true;
+ std::vector<camera3_stream_buffer_t> streamBuffers(numBuffersRequested);
+ size_t numAllocatedBuffers = 0;
+ size_t numPushedInflightBuffers = 0;
+ for (size_t b = 0; b < numBuffersRequested; b++) {
+ camera3_stream_buffer_t& sb = streamBuffers[b];
+ // Since this method can run concurrently with request thread
+ // We need to update the wait duration everytime we call getbuffer
+ nsecs_t waitDuration = kBaseGetBufferWait + getExpectedInFlightDuration();
+ status_t res = outputStream->getBuffer(&sb, waitDuration);
+ if (res != OK) {
+ ALOGE("%s: Can't get output buffer for stream %d: %s (%d)",
+ __FUNCTION__, streamId, strerror(-res), res);
+ if (res == NO_INIT || res == DEAD_OBJECT) {
+ bufRet.val.error(StreamBufferRequestError::STREAM_DISCONNECTED);
+ } else if (res == TIMED_OUT || res == NO_MEMORY) {
+ bufRet.val.error(StreamBufferRequestError::NO_BUFFER_AVAILABLE);
+ } else {
+ bufRet.val.error(StreamBufferRequestError::UNKNOWN_ERROR);
+ }
+ currentReqSucceeds = false;
+ break;
+ }
+ numAllocatedBuffers++;
+
+ buffer_handle_t *buffer = sb.buffer;
+ auto pair = mInterface->getBufferId(*buffer, streamId);
+ bool isNewBuffer = pair.first;
+ uint64_t bufferId = pair.second;
+ StreamBuffer& hBuf = tmpRetBuffers[b];
+
+ hBuf.streamId = streamId;
+ hBuf.bufferId = bufferId;
+ hBuf.buffer = (isNewBuffer) ? *buffer : nullptr;
+ hBuf.status = BufferStatus::OK;
+ hBuf.releaseFence = nullptr;
+
+ native_handle_t *acquireFence = nullptr;
+ if (sb.acquire_fence != -1) {
+ acquireFence = native_handle_create(1,0);
+ acquireFence->data[0] = sb.acquire_fence;
+ }
+ hBuf.acquireFence.setTo(acquireFence, /*shouldOwn*/true);
+ hBuf.releaseFence = nullptr;
+
+ res = mInterface->pushInflightRequestBuffer(bufferId, buffer);
+ if (res != OK) {
+ ALOGE("%s: Can't get register request buffers for stream %d: %s (%d)",
+ __FUNCTION__, streamId, strerror(-res), res);
+ bufRet.val.error(StreamBufferRequestError::UNKNOWN_ERROR);
+ currentReqSucceeds = false;
+ break;
+ }
+ numPushedInflightBuffers++;
+ }
+ if (currentReqSucceeds) {
+ bufRet.val.buffers(std::move(tmpRetBuffers));
+ oneReqSucceeds = true;
+ } else {
+ allReqsSucceeds = false;
+ for (size_t b = 0; b < numPushedInflightBuffers; b++) {
+ StreamBuffer& hBuf = tmpRetBuffers[b];
+ buffer_handle_t* buffer;
+ status_t res = mInterface->popInflightRequestBuffer(hBuf.bufferId, &buffer);
+ if (res != OK) {
+ SET_ERR("%s: popInflightRequestBuffer failed for stream %d: %s (%d)",
+ __FUNCTION__, streamId, strerror(-res), res);
+ }
+ }
+ returnOutputBuffers(streamBuffers.data(), numAllocatedBuffers, 0);
+ }
+ }
+ // End of mOutputStreams access
+
+ _hidl_cb(allReqsSucceeds ? BufferRequestStatus::OK :
+ oneReqSucceeds ? BufferRequestStatus::FAILED_PARTIAL :
+ BufferRequestStatus::FAILED_UNKNOWN,
+ bufRets);
+ return hardware::Void();
+}
+
+hardware::Return<void> Camera3Device::returnStreamBuffers(
+ const hardware::hidl_vec<hardware::camera::device::V3_2::StreamBuffer>& buffers) {
+ if (!mUseHalBufManager) {
+ ALOGE("%s: Camera %s does not support HAL buffer managerment",
+ __FUNCTION__, mId.string());
+ return hardware::Void();
+ }
+
+ for (const auto& buf : buffers) {
+ if (buf.bufferId == HalInterface::BUFFER_ID_NO_BUFFER) {
+ ALOGE("%s: cannot return a buffer without bufferId", __FUNCTION__);
+ continue;
+ }
+
+ buffer_handle_t* buffer;
+ status_t res = mInterface->popInflightRequestBuffer(buf.bufferId, &buffer);
+
+ if (res != OK) {
+ ALOGE("%s: cannot find in-flight buffer %" PRIu64 " for stream %d",
+ __FUNCTION__, buf.bufferId, buf.streamId);
+ continue;
+ }
+
+ camera3_stream_buffer_t streamBuffer;
+ streamBuffer.buffer = buffer;
+ streamBuffer.status = CAMERA3_BUFFER_STATUS_ERROR;
+ streamBuffer.acquire_fence = -1;
+ streamBuffer.release_fence = -1;
+
+ if (buf.releaseFence == nullptr) {
+ streamBuffer.release_fence = -1;
+ } else if (buf.releaseFence->numFds == 1) {
+ streamBuffer.release_fence = dup(buf.releaseFence->data[0]);
+ } else {
+ ALOGE("%s: Invalid release fence, fd count is %d, not 1",
+ __FUNCTION__, buf.releaseFence->numFds);
+ continue;
+ }
+
+ sp<Camera3StreamInterface> stream = mOutputStreams.get(buf.streamId);
+ if (stream == nullptr) {
+ ALOGE("%s: Output stream id %d not found!", __FUNCTION__, buf.streamId);
+ continue;
+ }
+ streamBuffer.stream = stream->asHalStream();
+ returnOutputBuffers(&streamBuffer, /*size*/1, /*timestamp*/ 0);
+ }
+ return hardware::Void();
+}
+
hardware::Return<void> Camera3Device::processCaptureResult_3_4(
const hardware::hidl_vec<
hardware::camera::device::V3_4::CaptureResult>& results) {
@@ -1067,21 +1284,32 @@
auto& bDst = outputBuffers[i];
const StreamBuffer &bSrc = result.outputBuffers[i];
- ssize_t idx = mOutputStreams.indexOfKey(bSrc.streamId);
- if (idx == NAME_NOT_FOUND) {
+ sp<Camera3StreamInterface> stream = mOutputStreams.get(bSrc.streamId);
+ if (stream == nullptr) {
ALOGE("%s: Frame %d: Buffer %zu: Invalid output stream id %d",
__FUNCTION__, result.frameNumber, i, bSrc.streamId);
return;
}
- bDst.stream = mOutputStreams.valueAt(idx)->asHalStream();
+ bDst.stream = stream->asHalStream();
buffer_handle_t *buffer;
- res = mInterface->popInflightBuffer(result.frameNumber, bSrc.streamId, &buffer);
+ if (mUseHalBufManager) {
+ if (bSrc.bufferId == HalInterface::BUFFER_ID_NO_BUFFER) {
+ ALOGE("%s: Frame %d: Buffer %zu: No bufferId for stream %d",
+ __FUNCTION__, result.frameNumber, i, bSrc.streamId);
+ return;
+ }
+ res = mInterface->popInflightRequestBuffer(bSrc.bufferId, &buffer);
+ } else {
+ res = mInterface->popInflightBuffer(result.frameNumber, bSrc.streamId, &buffer);
+ }
+
if (res != OK) {
ALOGE("%s: Frame %d: Buffer %zu: No in-flight buffer for stream %d",
__FUNCTION__, result.frameNumber, i, bSrc.streamId);
return;
}
+
bDst.buffer = buffer;
bDst.status = mapHidlBufferStatus(bSrc.status);
bDst.acquire_fence = -1;
@@ -1163,13 +1391,13 @@
m.type = CAMERA3_MSG_ERROR;
m.message.error.frame_number = msg.msg.error.frameNumber;
if (msg.msg.error.errorStreamId >= 0) {
- ssize_t idx = mOutputStreams.indexOfKey(msg.msg.error.errorStreamId);
- if (idx == NAME_NOT_FOUND) {
- ALOGE("%s: Frame %d: Invalid error stream id %d",
- __FUNCTION__, m.message.error.frame_number, msg.msg.error.errorStreamId);
+ sp<Camera3StreamInterface> stream = mOutputStreams.get(msg.msg.error.errorStreamId);
+ if (stream == nullptr) {
+ ALOGE("%s: Frame %d: Invalid error stream id %d", __FUNCTION__,
+ m.message.error.frame_number, msg.msg.error.errorStreamId);
return;
}
- m.message.error.error_stream = mOutputStreams.valueAt(idx)->asHalStream();
+ m.message.error.error_stream = stream->asHalStream();
} else {
m.message.error.error_stream = nullptr;
}
@@ -1350,6 +1578,56 @@
return OK;
}
+status_t Camera3Device::StreamSet::add(
+ int streamId, sp<camera3::Camera3OutputStreamInterface> stream) {
+ if (stream == nullptr) {
+ ALOGE("%s: cannot add null stream", __FUNCTION__);
+ return BAD_VALUE;
+ }
+ std::lock_guard<std::mutex> lock(mLock);
+ return mData.add(streamId, stream);
+}
+
+ssize_t Camera3Device::StreamSet::remove(int streamId) {
+ std::lock_guard<std::mutex> lock(mLock);
+ return mData.removeItem(streamId);
+}
+
+sp<camera3::Camera3OutputStreamInterface>
+Camera3Device::StreamSet::get(int streamId) {
+ std::lock_guard<std::mutex> lock(mLock);
+ ssize_t idx = mData.indexOfKey(streamId);
+ if (idx == NAME_NOT_FOUND) {
+ return nullptr;
+ }
+ return mData.editValueAt(idx);
+}
+
+sp<camera3::Camera3OutputStreamInterface>
+Camera3Device::StreamSet::operator[] (size_t index) {
+ std::lock_guard<std::mutex> lock(mLock);
+ return mData.editValueAt(index);
+}
+
+size_t Camera3Device::StreamSet::size() const {
+ std::lock_guard<std::mutex> lock(mLock);
+ return mData.size();
+}
+
+void Camera3Device::StreamSet::clear() {
+ std::lock_guard<std::mutex> lock(mLock);
+ return mData.clear();
+}
+
+std::vector<int> Camera3Device::StreamSet::getStreamIds() {
+ std::lock_guard<std::mutex> lock(mLock);
+ std::vector<int> streamIds(mData.size());
+ for (size_t i = 0; i < mData.size(); i++) {
+ streamIds[i] = mData.keyAt(i);
+ }
+ return streamIds;
+}
+
status_t Camera3Device::createStream(sp<Surface> consumer,
uint32_t width, uint32_t height, int format,
android_dataspace dataSpace, camera3_stream_rotation_t rotation, int *id,
@@ -1533,20 +1811,20 @@
return INVALID_OPERATION;
}
- ssize_t idx = mOutputStreams.indexOfKey(id);
- if (idx == NAME_NOT_FOUND) {
+ sp<Camera3StreamInterface> stream = mOutputStreams.get(id);
+ if (stream == nullptr) {
CLOGE("Stream %d is unknown", id);
- return idx;
+ return BAD_VALUE;
}
- streamInfo->width = mOutputStreams[idx]->getWidth();
- streamInfo->height = mOutputStreams[idx]->getHeight();
- streamInfo->format = mOutputStreams[idx]->getFormat();
- streamInfo->dataSpace = mOutputStreams[idx]->getDataSpace();
- streamInfo->formatOverridden = mOutputStreams[idx]->isFormatOverridden();
- streamInfo->originalFormat = mOutputStreams[idx]->getOriginalFormat();
- streamInfo->dataSpaceOverridden = mOutputStreams[idx]->isDataSpaceOverridden();
- streamInfo->originalDataSpace = mOutputStreams[idx]->getOriginalDataSpace();
+ streamInfo->width = stream->getWidth();
+ streamInfo->height = stream->getHeight();
+ streamInfo->format = stream->getFormat();
+ streamInfo->dataSpace = stream->getDataSpace();
+ streamInfo->formatOverridden = stream->isFormatOverridden();
+ streamInfo->originalFormat = stream->getOriginalFormat();
+ streamInfo->dataSpaceOverridden = stream->isDataSpaceOverridden();
+ streamInfo->originalDataSpace = stream->getOriginalDataSpace();
return OK;
}
@@ -1573,14 +1851,12 @@
return INVALID_OPERATION;
}
- ssize_t idx = mOutputStreams.indexOfKey(id);
- if (idx == NAME_NOT_FOUND) {
- CLOGE("Stream %d does not exist",
- id);
+ sp<Camera3OutputStreamInterface> stream = mOutputStreams.get(id);
+ if (stream == nullptr) {
+ CLOGE("Stream %d does not exist", id);
return BAD_VALUE;
}
-
- return mOutputStreams.editValueAt(idx)->setTransform(transform);
+ return stream->setTransform(transform);
}
status_t Camera3Device::deleteStream(int id) {
@@ -1605,21 +1881,21 @@
}
sp<Camera3StreamInterface> deletedStream;
- ssize_t outputStreamIdx = mOutputStreams.indexOfKey(id);
+ sp<Camera3StreamInterface> stream = mOutputStreams.get(id);
if (mInputStream != NULL && id == mInputStream->getId()) {
deletedStream = mInputStream;
mInputStream.clear();
} else {
- if (outputStreamIdx == NAME_NOT_FOUND) {
+ if (stream == nullptr) {
CLOGE("Stream %d does not exist", id);
return BAD_VALUE;
}
}
// Delete output stream or the output part of a bi-directional stream.
- if (outputStreamIdx != NAME_NOT_FOUND) {
- deletedStream = mOutputStreams.editValueAt(outputStreamIdx);
- mOutputStreams.removeItem(id);
+ if (stream != nullptr) {
+ deletedStream = stream;
+ mOutputStreams.remove(id);
}
// Free up the stream endpoint so that it can be used by some other stream
@@ -1863,6 +2139,12 @@
break;
}
+ // Notify HAL to start draining
+ if (!active && mUseHalBufManager) {
+ auto streamIds = mOutputStreams.getStreamIds();
+ mRequestThread->signalPipelineDrain(streamIds);
+ }
+
res = mStatusChanged.waitRelative(mLock, timeout);
if (res != OK) break;
@@ -2038,15 +2320,12 @@
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- sp<Camera3StreamInterface> stream;
- ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId);
- if (outputStreamIdx == NAME_NOT_FOUND) {
+ sp<Camera3StreamInterface> stream = mOutputStreams.get(streamId);
+ if (stream == nullptr) {
CLOGE("Stream %d does not exist", streamId);
return BAD_VALUE;
}
- stream = mOutputStreams.editValueAt(outputStreamIdx);
-
if (stream->isUnpreparable() || stream->hasOutstandingBuffers() ) {
CLOGE("Stream %d has already been a request target", streamId);
return BAD_VALUE;
@@ -2066,15 +2345,12 @@
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- sp<Camera3StreamInterface> stream;
- ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId);
- if (outputStreamIdx == NAME_NOT_FOUND) {
+ sp<Camera3StreamInterface> stream = mOutputStreams.get(streamId);
+ if (stream == nullptr) {
CLOGE("Stream %d does not exist", streamId);
return BAD_VALUE;
}
- stream = mOutputStreams.editValueAt(outputStreamIdx);
-
if (stream->hasOutstandingBuffers() || mRequestThread->isStreamPending(stream)) {
CLOGE("Stream %d is a target of a in-progress request", streamId);
return BAD_VALUE;
@@ -2090,14 +2366,11 @@
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- sp<Camera3StreamInterface> stream;
- ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId);
- if (outputStreamIdx == NAME_NOT_FOUND) {
+ sp<Camera3StreamInterface> stream = mOutputStreams.get(streamId);
+ if (stream == nullptr) {
CLOGE("Stream %d does not exist", streamId);
return BAD_VALUE;
}
-
- stream = mOutputStreams.editValueAt(outputStreamIdx);
stream->addBufferListener(listener);
return OK;
@@ -2156,12 +2429,11 @@
return BAD_VALUE;
}
- ssize_t idx = mOutputStreams.indexOfKey(streamId);
- if (idx == NAME_NOT_FOUND) {
+ sp<Camera3OutputStreamInterface> stream = mOutputStreams.get(streamId);
+ if (stream == nullptr) {
CLOGE("Stream %d is unknown", streamId);
- return idx;
+ return BAD_VALUE;
}
- sp<Camera3OutputStreamInterface> stream = mOutputStreams[idx];
status_t res = stream->setConsumers(consumers);
if (res != OK) {
CLOGE("Stream %d set consumer failed (error %d %s) ", streamId, res, strerror(-res));
@@ -2206,10 +2478,10 @@
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- ssize_t idx = mOutputStreams.indexOfKey(streamId);
- if (idx == NAME_NOT_FOUND) {
+ sp<Camera3OutputStreamInterface> stream = mOutputStreams.get(streamId);
+ if (stream == nullptr) {
CLOGE("Stream %d is unknown", streamId);
- return idx;
+ return BAD_VALUE;
}
for (const auto &it : removedSurfaceIds) {
@@ -2219,7 +2491,6 @@
}
}
- sp<Camera3OutputStreamInterface> stream = mOutputStreams[idx];
status_t res = stream->updateStream(newSurfaces, outputInfo, removedSurfaceIds, outputMap);
if (res != OK) {
CLOGE("Stream %d failed to update stream (error %d %s) ",
@@ -2238,13 +2509,11 @@
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
- int idx = mOutputStreams.indexOfKey(streamId);
- if (idx == NAME_NOT_FOUND) {
+ sp<Camera3OutputStreamInterface> stream = mOutputStreams.get(streamId);
+ if (stream == nullptr) {
ALOGE("%s: Stream %d is not found.", __FUNCTION__, streamId);
return BAD_VALUE;
}
-
- sp<Camera3OutputStreamInterface> stream = mOutputStreams.editValueAt(idx);
return stream->dropBuffers(dropping);
}
@@ -2298,15 +2567,12 @@
}
for (size_t i = 0; i < streams.count; i++) {
- int idx = mOutputStreams.indexOfKey(streams.data.i32[i]);
- if (idx == NAME_NOT_FOUND) {
+ sp<Camera3OutputStreamInterface> stream = mOutputStreams.get(streams.data.i32[i]);
+ if (stream == nullptr) {
CLOGE("Request references unknown stream %d",
- streams.data.u8[i]);
+ streams.data.i32[i]);
return NULL;
}
- sp<Camera3OutputStreamInterface> stream =
- mOutputStreams.editValueAt(idx);
-
// It is illegal to include a deferred consumer output stream into a request
auto iter = surfaceMap.find(streams.data.i32[i]);
if (iter != surfaceMap.end()) {
@@ -2367,7 +2633,7 @@
}
for (size_t i = 0; i < mOutputStreams.size(); i++) {
- sp<Camera3OutputStreamInterface> outputStream = mOutputStreams.editValueAt(i);
+ sp<Camera3OutputStreamInterface> outputStream = mOutputStreams[i];
if (outputStream->isConfiguring()) {
res = outputStream->cancelConfiguration();
if (res != OK) {
@@ -2502,7 +2768,7 @@
}
camera3_stream_t *outputStream;
- outputStream = mOutputStreams.editValueAt(i)->startConfiguration();
+ outputStream = mOutputStreams[i]->startConfiguration();
if (outputStream == NULL) {
CLOGE("Can't start output stream configuration");
cancelStreamsConfigurationLocked();
@@ -2560,8 +2826,7 @@
}
for (size_t i = 0; i < mOutputStreams.size(); i++) {
- sp<Camera3OutputStreamInterface> outputStream =
- mOutputStreams.editValueAt(i);
+ sp<Camera3OutputStreamInterface> outputStream = mOutputStreams[i];
if (outputStream->isConfiguring() && !outputStream->isConsumerConfigurationDeferred()) {
res = outputStream->finishConfiguration();
if (res != OK) {
@@ -2668,15 +2933,12 @@
// Ok, have a dummy stream and there's at least one other output stream,
// so remove the dummy
- sp<Camera3StreamInterface> deletedStream;
- ssize_t outputStreamIdx = mOutputStreams.indexOfKey(mDummyStreamId);
- if (outputStreamIdx == NAME_NOT_FOUND) {
+ sp<Camera3StreamInterface> deletedStream = mOutputStreams.get(mDummyStreamId);
+ if (deletedStream == nullptr) {
SET_ERR_L("Dummy stream %d does not appear to exist", mDummyStreamId);
return INVALID_OPERATION;
}
-
- deletedStream = mOutputStreams.editValueAt(outputStreamIdx);
- mOutputStreams.removeItemsAt(outputStreamIdx);
+ mOutputStreams.remove(mDummyStreamId);
// Free up the stream endpoint so that it can be used by some other stream
res = deletedStream->disconnect();
@@ -2943,12 +3205,12 @@
frameNumber, streamId, strerror(-res), res);
}
} else {
- ssize_t idx = mOutputStreams.indexOfKey(streamId);
- if (idx == NAME_NOT_FOUND) {
+ sp<Camera3StreamInterface> stream = mOutputStreams.get(streamId);
+ if (stream == nullptr) {
ALOGE("%s: Output stream id %d not found!", __FUNCTION__, streamId);
continue;
}
- streamBuffer.stream = mOutputStreams.valueAt(idx)->asHalStream();
+ streamBuffer.stream = stream->asHalStream();
returnOutputBuffers(&streamBuffer, /*size*/1, /*timestamp*/ 0);
}
}
@@ -3475,6 +3737,10 @@
mRequestMetadataQueue(queue) {
// Check with hardware service manager if we can downcast these interfaces
// Somewhat expensive, so cache the results at startup
+ auto castResult_3_5 = device::V3_5::ICameraDeviceSession::castFrom(mHidlSession);
+ if (castResult_3_5.isOk()) {
+ mHidlSession_3_5 = castResult_3_5;
+ }
auto castResult_3_4 = device::V3_4::ICameraDeviceSession::castFrom(mHidlSession);
if (castResult_3_4.isOk()) {
mHidlSession_3_4 = castResult_3_4;
@@ -3651,26 +3917,49 @@
// Invoke configureStreams
device::V3_3::HalStreamConfiguration finalConfiguration;
+ device::V3_4::HalStreamConfiguration finalConfiguration3_4;
common::V1_0::Status status;
- // See if we have v3.4 or v3.3 HAL
- if (mHidlSession_3_4 != nullptr) {
- // We do; use v3.4 for the call
- ALOGV("%s: v3.4 device found", __FUNCTION__);
- device::V3_4::HalStreamConfiguration finalConfiguration3_4;
- auto err = mHidlSession_3_4->configureStreams_3_4(requestedConfiguration3_4,
- [&status, &finalConfiguration3_4]
+ auto configStream34Cb = [&status, &finalConfiguration3_4]
(common::V1_0::Status s, const device::V3_4::HalStreamConfiguration& halConfiguration) {
finalConfiguration3_4 = halConfiguration;
status = s;
- });
- if (!err.isOk()) {
- ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
- return DEAD_OBJECT;
+ };
+
+ auto postprocConfigStream34 = [&finalConfiguration, &finalConfiguration3_4]
+ (hardware::Return<void>& err) -> status_t {
+ if (!err.isOk()) {
+ ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+ return DEAD_OBJECT;
+ }
+ finalConfiguration.streams.resize(finalConfiguration3_4.streams.size());
+ for (size_t i = 0; i < finalConfiguration3_4.streams.size(); i++) {
+ finalConfiguration.streams[i] = finalConfiguration3_4.streams[i].v3_3;
+ }
+ return OK;
+ };
+
+ // See if we have v3.4 or v3.3 HAL
+ if (mHidlSession_3_5 != nullptr) {
+ ALOGV("%s: v3.5 device found", __FUNCTION__);
+ device::V3_5::StreamConfiguration requestedConfiguration3_5;
+ requestedConfiguration3_5.v3_4 = requestedConfiguration3_4;
+ requestedConfiguration3_5.streamConfigCounter = mNextStreamConfigCounter++;
+ auto err = mHidlSession_3_5->configureStreams_3_5(
+ requestedConfiguration3_5, configStream34Cb);
+ res = postprocConfigStream34(err);
+ if (res != OK) {
+ return res;
}
- finalConfiguration.streams.resize(finalConfiguration3_4.streams.size());
- for (size_t i = 0; i < finalConfiguration3_4.streams.size(); i++) {
- finalConfiguration.streams[i] = finalConfiguration3_4.streams[i].v3_3;
+ } else if (mHidlSession_3_4 != nullptr) {
+ // We do; use v3.4 for the call
+ ALOGV("%s: v3.4 device found", __FUNCTION__);
+ device::V3_4::HalStreamConfiguration finalConfiguration3_4;
+ auto err = mHidlSession_3_4->configureStreams_3_4(
+ requestedConfiguration3_4, configStream34Cb);
+ res = postprocConfigStream34(err);
+ if (res != OK) {
+ return res;
}
} else if (mHidlSession_3_3 != nullptr) {
// We do; use v3.3 for the call
@@ -4041,6 +4330,20 @@
return res;
}
+void Camera3Device::HalInterface::signalPipelineDrain(const std::vector<int>& streamIds) {
+ ATRACE_NAME("CameraHal::signalPipelineDrain");
+ if (!valid() || mHidlSession_3_5 == nullptr) {
+ ALOGE("%s called on invalid camera!", __FUNCTION__);
+ return;
+ }
+
+ auto err = mHidlSession_3_5->signalStreamFlush(streamIds, mNextStreamConfigCounter);
+ if (!err.isOk()) {
+ ALOGE("%s: Transaction error: %s", __FUNCTION__, err.description().c_str());
+ return;
+ }
+}
+
void Camera3Device::HalInterface::getInflightBufferKeys(
std::vector<std::pair<int32_t, int32_t>>* out) {
std::lock_guard<std::mutex> lock(mInflightLock);
@@ -4081,6 +4384,33 @@
return OK;
}
+status_t Camera3Device::HalInterface::pushInflightRequestBuffer(
+ uint64_t bufferId, buffer_handle_t* buf) {
+ std::lock_guard<std::mutex> lock(mRequestedBuffersLock);
+ auto pair = mRequestedBuffers.insert({bufferId, buf});
+ if (!pair.second) {
+ ALOGE("%s: bufId %" PRIu64 " is already inflight!",
+ __FUNCTION__, bufferId);
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+// Find and pop a buffer_handle_t based on bufferId
+status_t Camera3Device::HalInterface::popInflightRequestBuffer(
+ uint64_t bufferId, /*out*/ buffer_handle_t **buffer) {
+ std::lock_guard<std::mutex> lock(mRequestedBuffersLock);
+ auto it = mRequestedBuffers.find(bufferId);
+ if (it == mRequestedBuffers.end()) {
+ ALOGE("%s: bufId %" PRIu64 " is not inflight!",
+ __FUNCTION__, bufferId);
+ return BAD_VALUE;
+ }
+ *buffer = it->second;
+ mRequestedBuffers.erase(it);
+ return OK;
+}
+
std::pair<bool, uint64_t> Camera3Device::HalInterface::getBufferId(
const buffer_handle_t& buf, int streamId) {
std::lock_guard<std::mutex> lock(mBufferIdMapLock);
@@ -4129,7 +4459,8 @@
Camera3Device::RequestThread::RequestThread(wp<Camera3Device> parent,
sp<StatusTracker> statusTracker,
- sp<HalInterface> interface, const Vector<int32_t>& sessionParamKeys) :
+ sp<HalInterface> interface, const Vector<int32_t>& sessionParamKeys,
+ bool useHalBufManager) :
Thread(/*canCallJava*/false),
mParent(parent),
mStatusTracker(statusTracker),
@@ -4139,6 +4470,7 @@
mReconfigured(false),
mDoPause(false),
mPaused(true),
+ mNotifyPipelineDrain(false),
mFrameNumber(0),
mLatestRequestId(NAME_NOT_FOUND),
mCurrentAfTriggerId(0),
@@ -4149,7 +4481,8 @@
mConstrainedMode(false),
mRequestLatency(kRequestLatencyBinSize),
mSessionParamKeys(sessionParamKeys),
- mLatestSessionParams(sessionParamKeys.size()) {
+ mLatestSessionParams(sessionParamKeys.size()),
+ mUseHalBufManager(useHalBufManager) {
mStatusId = statusTracker->addComponent();
}
@@ -4928,16 +5261,27 @@
}
}
- res = outputStream->getBuffer(&outputBuffers->editItemAt(j),
- waitDuration,
- captureRequest->mOutputSurfaces[outputStream->getId()]);
- if (res != OK) {
- // Can't get output buffer from gralloc queue - this could be due to
- // abandoned queue or other consumer misbehavior, so not a fatal
- // error
- ALOGE("RequestThread: Can't get output buffer, skipping request:"
- " %s (%d)", strerror(-res), res);
- return TIMED_OUT;
+ if (mUseHalBufManager) {
+ // HAL will request buffer through requestStreamBuffer API
+ camera3_stream_buffer_t& buffer = outputBuffers->editItemAt(j);
+ buffer.stream = outputStream->asHalStream();
+ buffer.buffer = nullptr;
+ buffer.status = CAMERA3_BUFFER_STATUS_OK;
+ buffer.acquire_fence = -1;
+ buffer.release_fence = -1;
+ } else {
+ res = outputStream->getBuffer(&outputBuffers->editItemAt(j),
+ waitDuration,
+ captureRequest->mOutputSurfaces[outputStream->getId()]);
+ if (res != OK) {
+ // Can't get output buffer from gralloc queue - this could be due to
+ // abandoned queue or other consumer misbehavior, so not a fatal
+ // error
+ ALOGE("RequestThread: Can't get output buffer, skipping request:"
+ " %s (%d)", strerror(-res), res);
+
+ return TIMED_OUT;
+ }
}
String8 physicalCameraId = outputStream->getPhysicalCameraId();
@@ -5078,6 +5422,21 @@
return false;
}
+void Camera3Device::RequestThread::signalPipelineDrain(const std::vector<int>& streamIds) {
+ if (!mUseHalBufManager) {
+ ALOGE("%s called for camera device not supporting HAL buffer management", __FUNCTION__);
+ return;
+ }
+
+ Mutex::Autolock pl(mPauseLock);
+ if (mPaused) {
+ return mInterface->signalPipelineDrain(streamIds);
+ }
+ // If request thread is still busy, wait until paused then notify HAL
+ mNotifyPipelineDrain = true;
+ mStreamIdsToBeDrained = streamIds;
+}
+
nsecs_t Camera3Device::getExpectedInFlightDuration() {
ATRACE_CALL();
Mutex::Autolock al(mInFlightLock);
@@ -5255,6 +5614,11 @@
if (statusTracker != 0) {
statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
}
+ if (mNotifyPipelineDrain) {
+ mInterface->signalPipelineDrain(mStreamIdsToBeDrained);
+ mNotifyPipelineDrain = false;
+ mStreamIdsToBeDrained.clear();
+ }
}
// Stop waiting for now and let thread management happen
return NULL;
@@ -5339,6 +5703,11 @@
if (statusTracker != 0) {
statusTracker->markComponentIdle(mStatusId, Fence::NO_FENCE);
}
+ if (mNotifyPipelineDrain) {
+ mInterface->signalPipelineDrain(mStreamIdsToBeDrained);
+ mNotifyPipelineDrain = false;
+ mStreamIdsToBeDrained.clear();
+ }
}
res = mDoPauseSignal.waitRelative(mPauseLock, kRequestTimeout);
@@ -5794,23 +6163,4 @@
return true;
}
-/**
- * Static callback forwarding methods from HAL to instance
- */
-
-void Camera3Device::sProcessCaptureResult(const camera3_callback_ops *cb,
- const camera3_capture_result *result) {
- Camera3Device *d =
- const_cast<Camera3Device*>(static_cast<const Camera3Device*>(cb));
-
- d->processCaptureResult(result);
-}
-
-void Camera3Device::sNotify(const camera3_callback_ops *cb,
- const camera3_notify_msg *msg) {
- Camera3Device *d =
- const_cast<Camera3Device*>(static_cast<const Camera3Device*>(cb));
- d->notify(msg);
-}
-
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 5e749b6..5c0f570 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -33,10 +33,11 @@
#include <android/hardware/camera/device/3.2/ICameraDeviceSession.h>
#include <android/hardware/camera/device/3.3/ICameraDeviceSession.h>
#include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
+#include <android/hardware/camera/device/3.5/ICameraDeviceSession.h>
#include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
#include <android/hardware/camera/device/3.4/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.5/ICameraDeviceCallback.h>
#include <fmq/MessageQueue.h>
-#include <hardware/camera3.h>
#include <camera/CaptureResult.h>
@@ -50,20 +51,6 @@
using android::camera3::OutputStreamInfo;
-/**
- * Function pointer types with C calling convention to
- * use for HAL callback functions.
- */
-extern "C" {
- typedef void (callbacks_process_capture_result_t)(
- const struct camera3_callback_ops *,
- const camera3_capture_result_t *);
-
- typedef void (callbacks_notify_t)(
- const struct camera3_callback_ops *,
- const camera3_notify_msg_t *);
-}
-
namespace android {
namespace camera3 {
@@ -80,8 +67,7 @@
*/
class Camera3Device :
public CameraDeviceBase,
- virtual public hardware::camera::device::V3_4::ICameraDeviceCallback,
- private camera3_callback_ops {
+ virtual public hardware::camera::device::V3_5::ICameraDeviceCallback {
public:
explicit Camera3Device(const String8& id);
@@ -299,14 +285,27 @@
status_t dump(int fd);
status_t close();
+ void signalPipelineDrain(const std::vector<int>& streamIds);
+
+ // method to extract buffer's unique ID
+ // return pair of (newlySeenBuffer?, bufferId)
+ std::pair<bool, uint64_t> getBufferId(const buffer_handle_t& buf, int streamId);
+
// Find a buffer_handle_t based on frame number and stream ID
status_t popInflightBuffer(int32_t frameNumber, int32_t streamId,
/*out*/ buffer_handle_t **buffer);
+ // Register a bufId/buffer_handle_t to inflight request buffer
+ status_t pushInflightRequestBuffer(uint64_t bufferId, buffer_handle_t* buf);
+
+ // Find a buffer_handle_t based on bufferId
+ status_t popInflightRequestBuffer(uint64_t bufferId, /*out*/ buffer_handle_t **buffer);
+
// Get a vector of (frameNumber, streamId) pair of currently inflight
// buffers
void getInflightBufferKeys(std::vector<std::pair<int32_t, int32_t>>* out);
+ static const uint64_t BUFFER_ID_NO_BUFFER = 0;
private:
// Always valid
sp<hardware::camera::device::V3_2::ICameraDeviceSession> mHidlSession;
@@ -314,6 +313,8 @@
sp<hardware::camera::device::V3_3::ICameraDeviceSession> mHidlSession_3_3;
// Valid if ICameraDeviceSession is @3.4 or newer
sp<hardware::camera::device::V3_4::ICameraDeviceSession> mHidlSession_3_4;
+ // Valid if ICameraDeviceSession is @3.5 or newer
+ sp<hardware::camera::device::V3_5::ICameraDeviceSession> mHidlSession_3_5;
std::shared_ptr<RequestMetadataQueue> mRequestMetadataQueue;
@@ -365,19 +366,16 @@
// stream ID -> per stream buffer ID map
std::unordered_map<int, BufferIdMap> mBufferIdMaps;
uint64_t mNextBufferId = 1; // 0 means no buffer
- static const uint64_t BUFFER_ID_NO_BUFFER = 0;
-
- // method to extract buffer's unique ID
- // TODO: we should switch to use gralloc mapper's getBackingStore API
- // once we ran in binderized gralloc mode, but before that is ready,
- // we need to rely on the conventional buffer queue behavior where
- // buffer_handle_t's FD won't change.
- // return pair of (newlySeenBuffer?, bufferId)
- std::pair<bool, uint64_t> getBufferId(const buffer_handle_t& buf, int streamId);
virtual void onBufferFreed(int streamId, const native_handle_t* handle) override;
std::vector<std::pair<int, uint64_t>> mFreedBuffers;
+
+ // Buffers given to HAL through requestStreamBuffer API
+ std::mutex mRequestedBuffersLock;
+ std::unordered_map<uint64_t, buffer_handle_t*> mRequestedBuffers;
+
+ uint32_t mNextStreamConfigCounter = 1;
};
sp<HalInterface> mInterface;
@@ -412,9 +410,22 @@
// Tracking cause of fatal errors when in STATUS_ERROR
String8 mErrorCause;
- // Mapping of stream IDs to stream instances
- typedef KeyedVector<int, sp<camera3::Camera3OutputStreamInterface> >
- StreamSet;
+ // Synchronized mapping of stream IDs to stream instances
+ class StreamSet {
+ public:
+ status_t add(int streamId, sp<camera3::Camera3OutputStreamInterface>);
+ ssize_t remove(int streamId);
+ sp<camera3::Camera3OutputStreamInterface> get(int streamId);
+ // get by (underlying) vector index
+ sp<camera3::Camera3OutputStreamInterface> operator[] (size_t index);
+ size_t size() const;
+ std::vector<int> getStreamIds();
+ void clear();
+
+ private:
+ mutable std::mutex mLock;
+ KeyedVector<int, sp<camera3::Camera3OutputStreamInterface>> mData;
+ };
StreamSet mOutputStreams;
sp<camera3::Camera3Stream> mInputStream;
@@ -483,8 +494,9 @@
/**
- * Implementation of android::hardware::camera::device::V3_4::ICameraDeviceCallback
+ * Implementation of android::hardware::camera::device::V3_5::ICameraDeviceCallback
*/
+
hardware::Return<void> processCaptureResult_3_4(
const hardware::hidl_vec<
hardware::camera::device::V3_4::CaptureResult>& results) override;
@@ -495,6 +507,15 @@
const hardware::hidl_vec<
hardware::camera::device::V3_2::NotifyMsg>& msgs) override;
+ hardware::Return<void> requestStreamBuffers(
+ const hardware::hidl_vec<
+ hardware::camera::device::V3_5::BufferRequest>& bufReqs,
+ requestStreamBuffers_cb _hidl_cb) override;
+
+ hardware::Return<void> returnStreamBuffers(
+ const hardware::hidl_vec<
+ hardware::camera::device::V3_2::StreamBuffer>& buffers) override;
+
// Handle one capture result. Assume that mProcessCaptureResultLock is held.
void processOneCaptureResultLocked(
const hardware::camera::device::V3_2::CaptureResult& result,
@@ -702,7 +723,9 @@
RequestThread(wp<Camera3Device> parent,
sp<camera3::StatusTracker> statusTracker,
- sp<HalInterface> interface, const Vector<int32_t>& sessionParamKeys);
+ sp<HalInterface> interface,
+ const Vector<int32_t>& sessionParamKeys,
+ bool useHalBufManager);
~RequestThread();
void setNotificationListener(wp<NotificationListener> listener);
@@ -790,6 +813,8 @@
mRequestLatency.dump(fd, name);
}
+ void signalPipelineDrain(const std::vector<int>& streamIds);
+
protected:
virtual bool threadLoop();
@@ -899,12 +924,13 @@
bool mReconfigured;
- // Used by waitIfPaused, waitForNextRequest, and waitUntilPaused
+ // Used by waitIfPaused, waitForNextRequest, waitUntilPaused, and signalPipelineDrain
Mutex mPauseLock;
bool mDoPause;
Condition mDoPauseSignal;
bool mPaused;
- Condition mPausedSignal;
+ bool mNotifyPipelineDrain;
+ std::vector<int> mStreamIdsToBeDrained;
sp<CaptureRequest> mPrevRequest;
int32_t mPrevTriggers;
@@ -937,6 +963,8 @@
Vector<int32_t> mSessionParamKeys;
CameraMetadata mLatestSessionParams;
+
+ const bool mUseHalBufManager;
};
sp<RequestThread> mRequestThread;
@@ -1020,7 +1048,7 @@
InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
bool hasAppCallback, nsecs_t maxDuration,
- const std::set<String8>& physicalCameraIdSet, bool isStillCapture,
+ const std::set<String8>& physicalCameraIdSet, bool isStillCapture,
bool isZslCapture) :
shutterTimestamp(0),
sensorTimestamp(0),
@@ -1217,16 +1245,16 @@
// Cached last requested template id
int mLastTemplateId;
- /**
- * Static callback forwarding methods from HAL to instance
- */
- static callbacks_process_capture_result_t sProcessCaptureResult;
-
- static callbacks_notify_t sNotify;
-
// Synchronizes access to status tracker between inflight updates and disconnect.
// b/79972865
Mutex mTrackerLock;
+
+ // Whether HAL request buffers through requestStreamBuffer API
+ bool mUseHalBufManager = false;
+
+ // Lock to ensure requestStreamBuffers() callbacks are serialized
+ std::mutex mRequestBufferInterfaceLock;
+
}; // class Camera3Device
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 3c1e43d..18b8c4d 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -116,7 +116,7 @@
return mTotalBufferCount;
}
-size_t Camera3IOStreamBase::getHandoutOutputBufferCountLocked() {
+size_t Camera3IOStreamBase::getHandoutOutputBufferCountLocked() const {
return mHandoutOutputBufferCount;
}
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index 0a31d44..48e9bbf 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -82,7 +82,7 @@
virtual size_t getBufferCountLocked();
- virtual size_t getHandoutOutputBufferCountLocked();
+ virtual size_t getHandoutOutputBufferCountLocked() const;
virtual size_t getHandoutInputBufferCountLocked();
diff --git a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
index fb3ce4c..1c13950 100644
--- a/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3SharedOutputStream.cpp
@@ -138,6 +138,10 @@
return res;
}
+ // TODO: need to refactor this to support requestStreamBuffers API
+ // Need to wait until processCaptureResult to decide the source buffer
+ // to attach to output...
+
// Attach the buffer to the splitter output queues. This could block if
// the output queue doesn't have any empty slot. So unlock during the course
// of attachBufferToOutputs.
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index ee989e1..0a30a97 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -795,6 +795,12 @@
return hasOutstandingBuffersLocked();
}
+size_t Camera3Stream::getOutstandingBuffersCount() const {
+ ATRACE_CALL();
+ Mutex::Autolock l(mLock);
+ return getHandoutOutputBufferCountLocked();
+}
+
status_t Camera3Stream::setStatusTracker(sp<StatusTracker> statusTracker) {
Mutex::Autolock l(mLock);
sp<StatusTracker> oldTracker = mStatusTracker.promote();
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 1c67fb2..e29c3e0 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -360,6 +360,11 @@
*/
bool hasOutstandingBuffers() const;
+ /**
+ * Get number of buffers currently handed out to HAL
+ */
+ size_t getOutstandingBuffersCount() const;
+
enum {
TIMEOUT_NEVER = -1
};
@@ -495,7 +500,7 @@
virtual size_t getBufferCountLocked() = 0;
// Get handout output buffer count.
- virtual size_t getHandoutOutputBufferCountLocked() = 0;
+ virtual size_t getHandoutOutputBufferCountLocked() const = 0;
// Get handout input buffer count.
virtual size_t getHandoutInputBufferCountLocked() = 0;
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 5758ac8..866b722 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -289,6 +289,11 @@
*/
virtual bool hasOutstandingBuffers() const = 0;
+ /**
+ * Get number of buffers currently handed out to HAL
+ */
+ virtual size_t getOutstandingBuffersCount() const = 0;
+
enum {
TIMEOUT_NEVER = -1
};
diff --git a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
index d70e27b..e6c676c 100644
--- a/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
+++ b/services/mediaextractor/seccomp_policy/mediaextractor-arm64.policy
@@ -40,4 +40,7 @@
pread64: 1
mremap: 1
+# Required by Sanitizers
+sched_yield: 1
+
@include /system/etc/seccomp_policy/crash_dump.arm64.policy