Merge "Improve audio logs and dumpsys media.audio_flinger" into oc-dev
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index bf9904c..c6c35ef 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -240,6 +240,14 @@
c->releaseRecordingFrameHandle(handle);
}
+void Camera::releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*> handles) {
+ ALOGV("releaseRecordingFrameHandleBatch");
+ sp <::android::hardware::ICamera> c = mCamera;
+ if (c == 0) return;
+ c->releaseRecordingFrameHandleBatch(handles);
+}
+
// get preview state
bool Camera::previewEnabled()
{
@@ -418,6 +426,37 @@
}
}
+void Camera::recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles)
+{
+ // If recording proxy listener is registered, forward the frame and return.
+ // The other listener (mListener) is ignored because the receiver needs to
+ // call releaseRecordingFrameHandle.
+ sp<ICameraRecordingProxyListener> proxylistener;
+ {
+ Mutex::Autolock _l(mLock);
+ proxylistener = mRecordingProxyListener;
+ }
+ if (proxylistener != NULL) {
+ proxylistener->recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+ return;
+ }
+
+ sp<CameraListener> listener;
+ {
+ Mutex::Autolock _l(mLock);
+ listener = mListener;
+ }
+
+ if (listener != NULL) {
+ listener->postRecordingFrameHandleTimestampBatch(timestamps, handles);
+ } else {
+ ALOGW("No listener was set. Drop a batch of recording frames.");
+ releaseRecordingFrameHandleBatch(handles);
+ }
+}
+
sp<ICameraRecordingProxy> Camera::getRecordingProxy() {
ALOGV("getProxy");
return new RecordingProxy(this);
@@ -448,6 +487,12 @@
mCamera->releaseRecordingFrameHandle(handle);
}
+void Camera::RecordingProxy::releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles) {
+ ALOGV("RecordingProxy::releaseRecordingFrameHandleBatch");
+ mCamera->releaseRecordingFrameHandleBatch(handles);
+}
+
Camera::RecordingProxy::RecordingProxy(const sp<Camera>& camera)
{
mCamera = camera;
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 373b94e..e143e05 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -170,7 +170,7 @@
}
status_t CameraMetadata::checkType(uint32_t tag, uint8_t expectedType) {
- int tagType = get_camera_metadata_tag_type(tag);
+ int tagType = get_local_camera_metadata_tag_type(tag, mBuffer);
if ( CC_UNLIKELY(tagType == -1)) {
ALOGE("Update metadata entry: Unknown tag %d", tag);
return INVALID_OPERATION;
@@ -178,7 +178,7 @@
if ( CC_UNLIKELY(tagType != expectedType) ) {
ALOGE("Mismatched tag type when updating entry %s (%d) of type %s; "
"got type %s data instead ",
- get_camera_metadata_tag_name(tag), tag,
+ get_local_camera_metadata_tag_name(tag, mBuffer), tag,
camera_metadata_type_names[tagType],
camera_metadata_type_names[expectedType]);
return INVALID_OPERATION;
@@ -297,7 +297,7 @@
ALOGE("%s: CameraMetadata is locked", __FUNCTION__);
return INVALID_OPERATION;
}
- int type = get_camera_metadata_tag_type(tag);
+ int type = get_local_camera_metadata_tag_type(tag, mBuffer);
if (type == -1) {
ALOGE("%s: Tag %d not found", __FUNCTION__, tag);
return BAD_VALUE;
@@ -332,8 +332,9 @@
if (res != OK) {
ALOGE("%s: Unable to update metadata entry %s.%s (%x): %s (%d)",
- __FUNCTION__, get_camera_metadata_section_name(tag),
- get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
+ __FUNCTION__, get_local_camera_metadata_section_name(tag, mBuffer),
+ get_local_camera_metadata_tag_name(tag, mBuffer), tag,
+ strerror(-res), res);
}
IF_ALOGV() {
@@ -392,16 +393,18 @@
} else if (res != OK) {
ALOGE("%s: Error looking for entry %s.%s (%x): %s %d",
__FUNCTION__,
- get_camera_metadata_section_name(tag),
- get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
+ get_local_camera_metadata_section_name(tag, mBuffer),
+ get_local_camera_metadata_tag_name(tag, mBuffer),
+ tag, strerror(-res), res);
return res;
}
res = delete_camera_metadata_entry(mBuffer, entry.index);
if (res != OK) {
ALOGE("%s: Error deleting entry %s.%s (%x): %s %d",
__FUNCTION__,
- get_camera_metadata_section_name(tag),
- get_camera_metadata_tag_name(tag), tag, strerror(-res), res);
+ get_local_camera_metadata_section_name(tag, mBuffer),
+ get_local_camera_metadata_tag_name(tag, mBuffer),
+ tag, strerror(-res), res);
}
return res;
}
diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp
index 2bf956d..f0945c7 100644
--- a/camera/ICamera.cpp
+++ b/camera/ICamera.cpp
@@ -55,6 +55,7 @@
SET_VIDEO_BUFFER_MODE,
SET_VIDEO_BUFFER_TARGET,
RELEASE_RECORDING_FRAME_HANDLE,
+ RELEASE_RECORDING_FRAME_HANDLE_BATCH,
};
class BpCamera: public BpInterface<ICamera>
@@ -172,6 +173,24 @@
native_handle_delete(handle);
}
+ void releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+ ALOGV("releaseRecordingFrameHandleBatch");
+ Parcel data, reply;
+ data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
+ uint32_t n = handles.size();
+ data.writeUint32(n);
+ for (auto& handle : handles) {
+ data.writeNativeHandle(handle);
+ }
+ remote()->transact(RELEASE_RECORDING_FRAME_HANDLE_BATCH, data, &reply);
+
+ // Close the native handle because camera received a dup copy.
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ }
+
status_t setVideoBufferMode(int32_t videoBufferMode)
{
ALOGV("setVideoBufferMode: %d", videoBufferMode);
@@ -378,6 +397,19 @@
releaseRecordingFrameHandle(data.readNativeHandle());
return NO_ERROR;
} break;
+ case RELEASE_RECORDING_FRAME_HANDLE_BATCH: {
+ ALOGV("RELEASE_RECORDING_FRAME_HANDLE_BATCH");
+ CHECK_INTERFACE(ICamera, data, reply);
+ // releaseRecordingFrameHandle will be responsble to close the native handle.
+ uint32_t n = data.readUint32();
+ std::vector<native_handle_t*> handles;
+ handles.reserve(n);
+ for (uint32_t i = 0; i < n; i++) {
+ handles.push_back(data.readNativeHandle());
+ }
+ releaseRecordingFrameHandleBatch(handles);
+ return NO_ERROR;
+ } break;
case SET_VIDEO_BUFFER_MODE: {
ALOGV("SET_VIDEO_BUFFER_MODE");
CHECK_INTERFACE(ICamera, data, reply);
diff --git a/camera/ICameraClient.cpp b/camera/ICameraClient.cpp
index 1b6fac4..7e6297c 100644
--- a/camera/ICameraClient.cpp
+++ b/camera/ICameraClient.cpp
@@ -32,6 +32,7 @@
DATA_CALLBACK,
DATA_CALLBACK_TIMESTAMP,
RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP,
+ RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH,
};
class BpCameraClient: public BpInterface<ICameraClient>
@@ -91,6 +92,29 @@
remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP, data, &reply,
IBinder::FLAG_ONEWAY);
}
+
+ void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) {
+ ALOGV("recordingFrameHandleCallbackTimestampBatch");
+ Parcel data, reply;
+ data.writeInterfaceToken(ICameraClient::getInterfaceDescriptor());
+ uint32_t n = timestamps.size();
+ if (n != handles.size()) {
+ ALOGE("%s: size of timestamps(%zu) and handles(%zu) mismatch!",
+ __FUNCTION__, timestamps.size(), handles.size());
+ return;
+ }
+ data.writeUint32(n);
+ for (auto ts : timestamps) {
+ data.writeInt64(ts);
+ }
+ for (auto& handle : handles) {
+ data.writeNativeHandle(handle);
+ }
+ remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH, data, &reply,
+ IBinder::FLAG_ONEWAY);
+ }
};
IMPLEMENT_META_INTERFACE(CameraClient, "android.hardware.ICameraClient");
@@ -154,6 +178,41 @@
recordingFrameHandleCallbackTimestamp(timestamp, handle);
return NO_ERROR;
} break;
+ case RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH: {
+ ALOGV("RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH");
+ CHECK_INTERFACE(ICameraClient, data, reply);
+ uint32_t n = 0;
+ status_t res = data.readUint32(&n);
+ if (res != OK) {
+ ALOGE("%s: Failed to read batch size: %s (%d)", __FUNCTION__, strerror(-res), res);
+ return BAD_VALUE;
+ }
+ std::vector<nsecs_t> timestamps;
+ std::vector<native_handle_t*> handles;
+ timestamps.reserve(n);
+ handles.reserve(n);
+ for (uint32_t i = 0; i < n; i++) {
+ res = data.readInt64(×tamps[i]);
+ if (res != OK) {
+ ALOGE("%s: Failed to read timestamp[%d]: %s (%d)",
+ __FUNCTION__, i, strerror(-res), res);
+ return BAD_VALUE;
+ }
+ }
+ for (uint32_t i = 0; i < n; i++) {
+ native_handle_t* handle = data.readNativeHandle();
+ if (handle == nullptr) {
+ ALOGE("%s: Received a null native handle at handles[%d]",
+ __FUNCTION__, i);
+ return BAD_VALUE;
+ }
+ handles.push_back(handle);
+ }
+
+ // The native handle will be freed in BpCamera::releaseRecordingFrameHandleBatch.
+ recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/camera/ICameraRecordingProxy.cpp b/camera/ICameraRecordingProxy.cpp
index c9f8b5c..bd6af75 100644
--- a/camera/ICameraRecordingProxy.cpp
+++ b/camera/ICameraRecordingProxy.cpp
@@ -32,6 +32,7 @@
STOP_RECORDING,
RELEASE_RECORDING_FRAME,
RELEASE_RECORDING_FRAME_HANDLE,
+ RELEASE_RECORDING_FRAME_HANDLE_BATCH,
};
@@ -82,6 +83,24 @@
native_handle_close(handle);
native_handle_delete(handle);
}
+
+ void releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+ ALOGV("releaseRecordingFrameHandleBatch");
+ Parcel data, reply;
+ data.writeInterfaceToken(ICameraRecordingProxy::getInterfaceDescriptor());
+ uint32_t n = handles.size();
+ data.writeUint32(n);
+ for (auto& handle : handles) {
+ data.writeNativeHandle(handle);
+ }
+ remote()->transact(RELEASE_RECORDING_FRAME_HANDLE_BATCH, data, &reply);
+
+ // Close the native handle because camera received a dup copy.
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ }
};
IMPLEMENT_META_INTERFACE(CameraRecordingProxy, "android.hardware.ICameraRecordingProxy");
@@ -121,6 +140,31 @@
releaseRecordingFrameHandle(data.readNativeHandle());
return NO_ERROR;
} break;
+ case RELEASE_RECORDING_FRAME_HANDLE_BATCH: {
+ ALOGV("RELEASE_RECORDING_FRAME_HANDLE_BATCH");
+ CHECK_INTERFACE(ICameraRecordingProxy, data, reply);
+ uint32_t n = 0;
+ status_t res = data.readUint32(&n);
+ if (res != OK) {
+ ALOGE("%s: Failed to read batch size: %s (%d)", __FUNCTION__, strerror(-res), res);
+ return BAD_VALUE;
+ }
+ std::vector<native_handle_t*> handles;
+ handles.reserve(n);
+ for (uint32_t i = 0; i < n; i++) {
+ native_handle_t* handle = data.readNativeHandle();
+ if (handle == nullptr) {
+ ALOGE("%s: Received a null native handle at handles[%d]",
+ __FUNCTION__, i);
+ return BAD_VALUE;
+ }
+ handles.push_back(handle);
+ }
+
+ // releaseRecordingFrameHandleBatch will be responsble to close the native handle.
+ releaseRecordingFrameHandleBatch(handles);
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/camera/ICameraRecordingProxyListener.cpp b/camera/ICameraRecordingProxyListener.cpp
index 8529d3e..c954241 100644
--- a/camera/ICameraRecordingProxyListener.cpp
+++ b/camera/ICameraRecordingProxyListener.cpp
@@ -28,6 +28,7 @@
enum {
DATA_CALLBACK_TIMESTAMP = IBinder::FIRST_CALL_TRANSACTION,
RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP,
+ RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH
};
class BpCameraRecordingProxyListener: public BpInterface<ICameraRecordingProxyListener>
@@ -62,6 +63,36 @@
native_handle_close(handle);
native_handle_delete(handle);
}
+
+ void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) {
+ ALOGV("recordingFrameHandleCallbackTimestampBatch");
+ Parcel data, reply;
+ data.writeInterfaceToken(ICameraRecordingProxyListener::getInterfaceDescriptor());
+
+ uint32_t n = timestamps.size();
+ if (n != handles.size()) {
+ ALOGE("%s: size of timestamps(%zu) and handles(%zu) mismatch!",
+ __FUNCTION__, timestamps.size(), handles.size());
+ return;
+ }
+ data.writeUint32(n);
+ for (auto ts : timestamps) {
+ data.writeInt64(ts);
+ }
+ for (auto& handle : handles) {
+ data.writeNativeHandle(handle);
+ }
+ remote()->transact(RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH, data, &reply,
+ IBinder::FLAG_ONEWAY);
+
+ // The native handle is dupped in ICameraClient so we need to free it here.
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ }
};
IMPLEMENT_META_INTERFACE(CameraRecordingProxyListener, "android.hardware.ICameraRecordingProxyListener");
@@ -101,6 +132,41 @@
recordingFrameHandleCallbackTimestamp(timestamp, handle);
return NO_ERROR;
} break;
+ case RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH: {
+ ALOGV("RECORDING_FRAME_HANDLE_CALLBACK_TIMESTAMP_BATCH");
+ CHECK_INTERFACE(ICameraRecordingProxyListener, data, reply);
+ uint32_t n = 0;
+ status_t res = data.readUint32(&n);
+ if (res != OK) {
+ ALOGE("%s: Failed to read batch size: %s (%d)", __FUNCTION__, strerror(-res), res);
+ return BAD_VALUE;
+ }
+ std::vector<nsecs_t> timestamps;
+ std::vector<native_handle_t*> handles;
+ timestamps.reserve(n);
+ handles.reserve(n);
+ for (uint32_t i = 0; i < n; i++) {
+ res = data.readInt64(×tamps[i]);
+ if (res != OK) {
+ ALOGE("%s: Failed to read timestamp[%d]: %s (%d)",
+ __FUNCTION__, i, strerror(-res), res);
+ return BAD_VALUE;
+ }
+ }
+ for (uint32_t i = 0; i < n; i++) {
+ native_handle_t* handle = data.readNativeHandle();
+ if (handle == nullptr) {
+ ALOGE("%s: Received a null native handle at handles[%d]",
+ __FUNCTION__, i);
+ return BAD_VALUE;
+ }
+ handles.push_back(handle);
+ }
+ // The native handle will be freed in
+ // BpCameraRecordingProxy::releaseRecordingFrameHandleBatch.
+ recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+ return NO_ERROR;
+ } break;
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index ed09b60..4c28789 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -29,6 +29,7 @@
#include <stdio.h>
#include <string.h>
+#include <inttypes.h>
namespace android {
@@ -40,11 +41,22 @@
static const char* vendor_tag_descriptor_get_tag_name(const vendor_tag_ops_t* v, uint32_t tag);
static int vendor_tag_descriptor_get_tag_type(const vendor_tag_ops_t* v, uint32_t tag);
+static int vendor_tag_descriptor_cache_get_tag_count(metadata_vendor_id_t id);
+static void vendor_tag_descriptor_cache_get_all_tags(uint32_t* tagArray,
+ metadata_vendor_id_t id);
+static const char* vendor_tag_descriptor_cache_get_section_name(uint32_t tag,
+ metadata_vendor_id_t id);
+static const char* vendor_tag_descriptor_cache_get_tag_name(uint32_t tag,
+ metadata_vendor_id_t id);
+static int vendor_tag_descriptor_cache_get_tag_type(uint32_t tag,
+ metadata_vendor_id_t id);
+
} /* extern "C" */
static Mutex sLock;
static sp<VendorTagDescriptor> sGlobalVendorTagDescriptor;
+static sp<VendorTagDescriptorCache> sGlobalVendorTagDescriptorCache;
namespace hardware {
namespace camera2 {
@@ -333,11 +345,166 @@
}
+status_t VendorTagDescriptorCache::writeToParcel(Parcel* parcel) const {
+ status_t res = OK;
+ if (parcel == NULL) {
+ ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ if ((res = parcel->writeInt32(mVendorMap.size())) != OK) {
+ return res;
+ }
+
+ for (const auto &iter : mVendorMap) {
+ if ((res = parcel->writeUint64(iter.first)) != OK) break;
+ if ((res = parcel->writeParcelable(*iter.second)) != OK) break;
+ }
+
+ return res;
+}
+
+
+status_t VendorTagDescriptorCache::readFromParcel(const Parcel* parcel) {
+ status_t res = OK;
+ if (parcel == NULL) {
+ ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ int32_t vendorCount = 0;
+ if ((res = parcel->readInt32(&vendorCount)) != OK) {
+ ALOGE("%s: could not read vendor count from parcel", __FUNCTION__);
+ return res;
+ }
+
+ if (vendorCount < 0 || vendorCount > INT32_MAX) {
+ ALOGE("%s: vendor count %d from is invalid.", __FUNCTION__, vendorCount);
+ return BAD_VALUE;
+ }
+
+ metadata_vendor_id_t id;
+ for (int32_t i = 0; i < vendorCount; i++) {
+ if ((res = parcel->readUint64(&id)) != OK) {
+ ALOGE("%s: could not read vendor id from parcel for index %d",
+ __FUNCTION__, i);
+ break;
+ }
+ sp<android::VendorTagDescriptor> desc = new android::VendorTagDescriptor();
+ if ((res = parcel->readParcelable(desc.get())) != OK) {
+ ALOGE("%s: could not read vendor tag descriptor from parcel for index %d rc = %d",
+ __FUNCTION__, i, res);
+ break;
+ }
+
+ if ((res = addVendorDescriptor(id, desc)) != OK) {
+ ALOGE("%s: failed to add vendor tag descriptor for index: %d ",
+ __FUNCTION__, i);
+ break;
+ }
+ }
+
+ return res;
+}
+
+int VendorTagDescriptorCache::getTagCount(metadata_vendor_id_t id) const {
+ int ret = 0;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getTagCount();
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+void VendorTagDescriptorCache::getTagArray(uint32_t* tagArray,
+ metadata_vendor_id_t id) const {
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ desc->second->getTagArray(tagArray);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+}
+
+const char* VendorTagDescriptorCache::getSectionName(uint32_t tag,
+ metadata_vendor_id_t id) const {
+ const char *ret = nullptr;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getSectionName(tag);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+const char* VendorTagDescriptorCache::getTagName(uint32_t tag,
+ metadata_vendor_id_t id) const {
+ const char *ret = nullptr;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getTagName(tag);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+int VendorTagDescriptorCache::getTagType(uint32_t tag,
+ metadata_vendor_id_t id) const {
+ int ret = 0;
+ auto desc = mVendorMap.find(id);
+ if (desc != mVendorMap.end()) {
+ ret = desc->second->getTagType(tag);
+ } else {
+ ALOGE("%s: Vendor descriptor id is missing!", __func__);
+ }
+
+ return ret;
+}
+
+void VendorTagDescriptorCache::dump(int fd, int verbosity,
+ int indentation) const {
+ for (const auto &desc : mVendorMap) {
+ dprintf(fd, "%*sDumping vendor tag descriptors for vendor with"
+ " id %" PRIu64 " \n", indentation, "", desc.first);
+ desc.second->dump(fd, verbosity, indentation);
+ }
+}
+
+int32_t VendorTagDescriptorCache::addVendorDescriptor(metadata_vendor_id_t id,
+ sp<android::VendorTagDescriptor> desc) {
+ auto entry = mVendorMap.find(id);
+ if (entry != mVendorMap.end()) {
+ ALOGE("%s: Vendor descriptor with same id already present!", __func__);
+ return BAD_VALUE;
+ }
+
+ mVendorMap.emplace(id, desc);
+ return NO_ERROR;
+}
+
+int32_t VendorTagDescriptorCache::getVendorTagDescriptor(
+ metadata_vendor_id_t id, sp<android::VendorTagDescriptor> *desc /*out*/) {
+ auto entry = mVendorMap.find(id);
+ if (entry == mVendorMap.end()) {
+ return NAME_NOT_FOUND;
+ }
+
+ *desc = entry->second;
+
+ return NO_ERROR;
+}
+
} // namespace params
} // namespace camera2
} // namespace hardware
-
status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps,
/*out*/
sp<VendorTagDescriptor>& descriptor) {
@@ -451,6 +618,39 @@
return sGlobalVendorTagDescriptor;
}
+status_t VendorTagDescriptorCache::setAsGlobalVendorTagCache(
+ const sp<VendorTagDescriptorCache>& cache) {
+ status_t res = OK;
+ Mutex::Autolock al(sLock);
+ sGlobalVendorTagDescriptorCache = cache;
+
+ struct vendor_tag_cache_ops* opsPtr = NULL;
+ if (cache != NULL) {
+ opsPtr = &(cache->mVendorCacheOps);
+ opsPtr->get_tag_count = vendor_tag_descriptor_cache_get_tag_count;
+ opsPtr->get_all_tags = vendor_tag_descriptor_cache_get_all_tags;
+ opsPtr->get_section_name = vendor_tag_descriptor_cache_get_section_name;
+ opsPtr->get_tag_name = vendor_tag_descriptor_cache_get_tag_name;
+ opsPtr->get_tag_type = vendor_tag_descriptor_cache_get_tag_type;
+ }
+ if((res = set_camera_metadata_vendor_cache_ops(opsPtr)) != OK) {
+ ALOGE("%s: Could not set vendor tag cache, received error %s (%d)."
+ , __FUNCTION__, strerror(-res), res);
+ }
+ return res;
+}
+
+void VendorTagDescriptorCache::clearGlobalVendorTagCache() {
+ Mutex::Autolock al(sLock);
+ set_camera_metadata_vendor_cache_ops(NULL);
+ sGlobalVendorTagDescriptorCache.clear();
+}
+
+sp<VendorTagDescriptorCache> VendorTagDescriptorCache::getGlobalVendorTagCache() {
+ Mutex::Autolock al(sLock);
+ return sGlobalVendorTagDescriptorCache;
+}
+
extern "C" {
int vendor_tag_descriptor_get_tag_count(const vendor_tag_ops_t* /*v*/) {
@@ -498,5 +698,53 @@
return sGlobalVendorTagDescriptor->getTagType(tag);
}
+int vendor_tag_descriptor_cache_get_tag_count(metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_TAG_COUNT_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getTagCount(id);
+}
+
+void vendor_tag_descriptor_cache_get_all_tags(uint32_t* tagArray,
+ metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ }
+ sGlobalVendorTagDescriptorCache->getTagArray(tagArray, id);
+}
+
+const char* vendor_tag_descriptor_cache_get_section_name(uint32_t tag,
+ metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_SECTION_NAME_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getSectionName(tag, id);
+}
+
+const char* vendor_tag_descriptor_cache_get_tag_name(uint32_t tag,
+ metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_TAG_NAME_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getTagName(tag, id);
+}
+
+int vendor_tag_descriptor_cache_get_tag_type(uint32_t tag,
+ metadata_vendor_id_t id) {
+ Mutex::Autolock al(sLock);
+ if (sGlobalVendorTagDescriptorCache == NULL) {
+ ALOGE("%s: Vendor tag descriptor cache not initialized.", __FUNCTION__);
+ return VENDOR_TAG_NAME_ERR;
+ }
+ return sGlobalVendorTagDescriptorCache->getTagType(tag, id);
+}
+
} /* extern "C" */
} /* namespace android */
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 99c479c..9c0f28b 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -21,6 +21,7 @@
import android.hardware.camera2.ICameraDeviceUser;
import android.hardware.camera2.ICameraDeviceCallbacks;
import android.hardware.camera2.params.VendorTagDescriptor;
+import android.hardware.camera2.params.VendorTagDescriptorCache;
import android.hardware.camera2.impl.CameraMetadataNative;
import android.hardware.ICameraServiceListener;
import android.hardware.CameraInfo;
@@ -130,6 +131,14 @@
VendorTagDescriptor getCameraVendorTagDescriptor();
/**
+ * Retrieve the vendor tag descriptor cache which can have multiple vendor
+ * providers.
+ * Intended to be used by the native code of CameraMetadataNative to correctly
+ * interpret camera metadata with vendor tags.
+ */
+ VendorTagDescriptorCache getCameraVendorTagCache();
+
+ /**
* Read the legacy camera1 parameters into a String
*/
String getLegacyParameters(int cameraId);
diff --git a/camera/aidl/android/hardware/camera2/params/VendorTagDescriptorCache.aidl b/camera/aidl/android/hardware/camera2/params/VendorTagDescriptorCache.aidl
new file mode 100644
index 0000000..d212207
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/params/VendorTagDescriptorCache.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2.params;
+
+/** @hide */
+parcelable VendorTagDescriptorCache cpp_header "camera/VendorTagDescriptor.h";
diff --git a/camera/include/camera/Camera.h b/camera/include/camera/Camera.h
index 57dc228..430aa1c 100644
--- a/camera/include/camera/Camera.h
+++ b/camera/include/camera/Camera.h
@@ -44,6 +44,9 @@
camera_frame_metadata_t *metadata) = 0;
virtual void postDataTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr) = 0;
virtual void postRecordingFrameHandleTimestamp(nsecs_t timestamp, native_handle_t* handle) = 0;
+ virtual void postRecordingFrameHandleTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) = 0;
};
class Camera;
@@ -118,6 +121,10 @@
// release a recording frame handle
void releaseRecordingFrameHandle(native_handle_t *handle);
+ // release a batch of recording frame handles
+ void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*> handles);
+
// autoFocus - status returned from callback
status_t autoFocus();
@@ -166,6 +173,10 @@
camera_frame_metadata_t *metadata);
virtual void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr);
virtual void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp, native_handle_t* handle);
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles);
+
class RecordingProxy : public BnCameraRecordingProxy
{
@@ -177,6 +188,8 @@
virtual void stopRecording();
virtual void releaseRecordingFrame(const sp<IMemory>& mem);
virtual void releaseRecordingFrameHandle(native_handle_t* handle);
+ virtual void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles);
private:
sp<Camera> mCamera;
diff --git a/camera/include/camera/ICameraRecordingProxy.h b/camera/include/camera/ICameraRecordingProxy.h
index cb6824a..02af2f3 100644
--- a/camera/include/camera/ICameraRecordingProxy.h
+++ b/camera/include/camera/ICameraRecordingProxy.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_H
#define ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_H
+#include <vector>
#include <binder/IInterface.h>
#include <cutils/native_handle.h>
#include <utils/RefBase.h>
@@ -85,6 +86,8 @@
virtual void stopRecording() = 0;
virtual void releaseRecordingFrame(const sp<IMemory>& mem) = 0;
virtual void releaseRecordingFrameHandle(native_handle_t *handle) = 0;
+ virtual void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/camera/include/camera/ICameraRecordingProxyListener.h b/camera/include/camera/ICameraRecordingProxyListener.h
index 1fee5b9..da03c56 100644
--- a/camera/include/camera/ICameraRecordingProxyListener.h
+++ b/camera/include/camera/ICameraRecordingProxyListener.h
@@ -17,6 +17,7 @@
#ifndef ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_LISTENER_H
#define ANDROID_HARDWARE_ICAMERA_RECORDING_PROXY_LISTENER_H
+#include <vector>
#include <binder/IInterface.h>
#include <cutils/native_handle.h>
#include <stdint.h>
@@ -38,6 +39,10 @@
virtual void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
native_handle_t* handle) = 0;
+
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/camera/include/camera/VendorTagDescriptor.h b/camera/include/camera/VendorTagDescriptor.h
index adfc8c7..904fba2 100644
--- a/camera/include/camera/VendorTagDescriptor.h
+++ b/camera/include/camera/VendorTagDescriptor.h
@@ -22,7 +22,7 @@
#include <utils/String8.h>
#include <utils/RefBase.h>
#include <system/camera_vendor_tags.h>
-
+#include <unordered_map>
#include <stdint.h>
namespace android {
@@ -166,8 +166,84 @@
};
-} /* namespace android */
+namespace hardware {
+namespace camera2 {
+namespace params {
+class VendorTagDescriptorCache : public Parcelable {
+ public:
+
+ VendorTagDescriptorCache() {};
+
+ int32_t addVendorDescriptor(metadata_vendor_id_t id,
+ sp<android::VendorTagDescriptor> desc);
+
+ int32_t getVendorTagDescriptor(
+ metadata_vendor_id_t id,
+ sp<android::VendorTagDescriptor> *desc /*out*/);
+
+ // Parcelable interface
+ status_t writeToParcel(Parcel* parcel) const override;
+ status_t readFromParcel(const Parcel* parcel) override;
+
+ // Returns the number of vendor tags defined.
+ int getTagCount(metadata_vendor_id_t id) const;
+
+ // Returns an array containing the id's of vendor tags defined.
+ void getTagArray(uint32_t* tagArray, metadata_vendor_id_t id) const;
+
+ // Returns the section name string for a given vendor tag id.
+ const char* getSectionName(uint32_t tag, metadata_vendor_id_t id) const;
+
+ // Returns the tag name string for a given vendor tag id.
+ const char* getTagName(uint32_t tag, metadata_vendor_id_t id) const;
+
+ // Returns the tag type for a given vendor tag id.
+ int getTagType(uint32_t tag, metadata_vendor_id_t id) const;
+
+ /**
+ * Dump the currently configured vendor tags to a file descriptor.
+ */
+ void dump(int fd, int verbosity, int indentation) const;
+
+ protected:
+ std::unordered_map<metadata_vendor_id_t, sp<android::VendorTagDescriptor>> mVendorMap;
+ struct vendor_tag_cache_ops mVendorCacheOps;
+};
+
+} /* namespace params */
+} /* namespace camera2 */
+} /* namespace hardware */
+
+class VendorTagDescriptorCache :
+ public ::android::hardware::camera2::params::VendorTagDescriptorCache,
+ public LightRefBase<VendorTagDescriptorCache> {
+ public:
+
+ /**
+ * Sets the global vendor tag descriptor cache to use for this process.
+ * Camera metadata operations that access vendor tags will use the
+ * vendor tag definitions set this way.
+ *
+ * Returns OK on success, or a negative error code.
+ */
+ static status_t setAsGlobalVendorTagCache(
+ const sp<VendorTagDescriptorCache>& cache);
+
+ /**
+ * Returns the global vendor tag cache used by this process.
+ * This will contain NULL if no vendor tags are defined.
+ */
+ static sp<VendorTagDescriptorCache> getGlobalVendorTagCache();
+
+ /**
+ * Clears the global vendor tag cache used by this process.
+ */
+ static void clearGlobalVendorTagCache();
+
+};
+
+} /* namespace android */
#define VENDOR_TAG_DESCRIPTOR_H
#endif /* VENDOR_TAG_DESCRIPTOR_H */
diff --git a/camera/include/camera/android/hardware/ICamera.h b/camera/include/camera/android/hardware/ICamera.h
index 315669e..80823d6 100644
--- a/camera/include/camera/android/hardware/ICamera.h
+++ b/camera/include/camera/android/hardware/ICamera.h
@@ -101,6 +101,11 @@
// ICameraClient::recordingFrameHandleCallbackTimestamp.
virtual void releaseRecordingFrameHandle(native_handle_t *handle) = 0;
+ // Release a batch of recording frame handles that was received via
+ // ICameraClient::recordingFrameHandleCallbackTimestampBatch
+ virtual void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles) = 0;
+
// auto focus
virtual status_t autoFocus() = 0;
diff --git a/camera/include/camera/android/hardware/ICameraClient.h b/camera/include/camera/android/hardware/ICameraClient.h
index f6ee311..8e46d17 100644
--- a/camera/include/camera/android/hardware/ICameraClient.h
+++ b/camera/include/camera/android/hardware/ICameraClient.h
@@ -41,6 +41,13 @@
// ICamera::releaseRecordingFrameHandle to release the frame handle.
virtual void recordingFrameHandleCallbackTimestamp(nsecs_t timestamp,
native_handle_t* handle) = 0;
+
+ // Invoked to send a batch of recording frame handles with timestamp. Call
+ // ICamera::releaseRecordingFrameHandleBatch to release the frame handles.
+ // Size of timestamps and handles must match
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) = 0;
};
// ----------------------------------------------------------------------------
diff --git a/camera/ndk/Android.bp b/camera/ndk/Android.bp
index c5fc646..ade0d72 100644
--- a/camera/ndk/Android.bp
+++ b/camera/ndk/Android.bp
@@ -17,7 +17,7 @@
// frameworks/av/include.
ndk_library {
- name: "libcamera2ndk.ndk",
+ name: "libcamera2ndk",
symbol_file: "libcamera2ndk.map.txt",
first_version: "24",
unversioned_until: "current",
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index ba2100c..3f64bcc 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -131,10 +131,36 @@
binder::Status ret = mCameraService->getCameraVendorTagDescriptor(/*out*/desc.get());
if (ret.isOk()) {
- status_t err = VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
- if (err != OK) {
- ALOGE("%s: Failed to set vendor tag descriptors, received error %s (%d)",
- __FUNCTION__, strerror(-err), err);
+ if (0 < desc->getTagCount()) {
+ status_t err = VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
+ if (err != OK) {
+ ALOGE("%s: Failed to set vendor tag descriptors, received error %s (%d)",
+ __FUNCTION__, strerror(-err), err);
+ }
+ } else {
+ sp<VendorTagDescriptorCache> cache =
+ new VendorTagDescriptorCache();
+ binder::Status res =
+ mCameraService->getCameraVendorTagCache(
+ /*out*/cache.get());
+ if (res.serviceSpecificErrorCode() ==
+ hardware::ICameraService::ERROR_DISCONNECTED) {
+ // No camera module available, not an error on devices with no cameras
+ VendorTagDescriptorCache::clearGlobalVendorTagCache();
+ } else if (res.isOk()) {
+ status_t err =
+ VendorTagDescriptorCache::setAsGlobalVendorTagCache(
+ cache);
+ if (err != OK) {
+ ALOGE("%s: Failed to set vendor tag cache,"
+ "received error %s (%d)", __FUNCTION__,
+ strerror(-err), err);
+ }
+ } else {
+ VendorTagDescriptorCache::clearGlobalVendorTagCache();
+ ALOGE("%s: Failed to setup vendor tag cache: %s",
+ __FUNCTION__, res.toString8().string());
+ }
}
} else if (ret.serviceSpecificErrorCode() ==
hardware::ICameraService::ERROR_DEPRECATED_HAL) {
diff --git a/camera/tests/CameraZSLTests.cpp b/camera/tests/CameraZSLTests.cpp
index 6c91fdc..ecca354 100644
--- a/camera/tests/CameraZSLTests.cpp
+++ b/camera/tests/CameraZSLTests.cpp
@@ -51,6 +51,9 @@
const sp<IMemory>&) override {};
void recordingFrameHandleCallbackTimestamp(nsecs_t,
native_handle_t*) override {};
+ void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<nsecs_t>&,
+ const std::vector<native_handle_t*>&) override {};
status_t waitForPreviewStart();
status_t waitForEvent(Mutex &mutex, Condition &condition, bool &flag);
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index 4d8dd3c..80aad2f 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -908,9 +908,7 @@
if (listComponents) {
sp<IOMX> omx;
- int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
- if ((trebleOmx == 1) || ((trebleOmx == -1) &&
- property_get_bool("persist.hal.binderization", 0))) {
+ if (property_get_bool("persist.media.treble_omx", true)) {
using namespace ::android::hardware::media::omx::V1_0;
sp<IOmx> tOmx = IOmx::getService();
diff --git a/drm/libmediadrm/Android.mk b/drm/libmediadrm/Android.mk
index 590622e..5b56501 100644
--- a/drm/libmediadrm/Android.mk
+++ b/drm/libmediadrm/Android.mk
@@ -18,6 +18,7 @@
LOCAL_SRC_FILES += \
CasImpl.cpp \
DescramblerImpl.cpp \
+ DrmPluginPath.cpp \
DrmSessionManager.cpp \
ICrypto.cpp \
IDrm.cpp \
diff --git a/drm/libmediadrm/CasImpl.cpp b/drm/libmediadrm/CasImpl.cpp
index de15244..fcedd6b 100644
--- a/drm/libmediadrm/CasImpl.cpp
+++ b/drm/libmediadrm/CasImpl.cpp
@@ -49,13 +49,24 @@
return result;
}
+struct CasImpl::PluginHolder : public RefBase {
+public:
+ explicit PluginHolder(CasPlugin *plugin) : mPlugin(plugin) {}
+ ~PluginHolder() { if (mPlugin != NULL) delete mPlugin; }
+ CasPlugin* get() { return mPlugin; }
+
+private:
+ CasPlugin *mPlugin;
+ DISALLOW_EVIL_CONSTRUCTORS(PluginHolder);
+};
+
CasImpl::CasImpl(const sp<ICasListener> &listener)
- : mPlugin(NULL), mListener(listener) {
- ALOGV("CTOR: mPlugin=%p", mPlugin);
+ : mPluginHolder(NULL), mListener(listener) {
+ ALOGV("CTOR");
}
CasImpl::~CasImpl() {
- ALOGV("DTOR: mPlugin=%p", mPlugin);
+ ALOGV("DTOR");
release();
}
@@ -76,7 +87,7 @@
void CasImpl::init(const sp<SharedLibrary>& library, CasPlugin *plugin) {
mLibrary = library;
- mPlugin = plugin;
+ mPluginHolder = new PluginHolder(plugin);
}
void CasImpl::onEvent(
@@ -95,13 +106,20 @@
Status CasImpl::setPrivateData(const CasData& pvtData) {
ALOGV("setPrivateData");
- return getBinderStatus(mPlugin->setPrivateData(pvtData));
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+ return getBinderStatus(holder->get()->setPrivateData(pvtData));
}
Status CasImpl::openSession(int32_t program_number, CasSessionId* sessionId) {
ALOGV("openSession: program_number=%d", program_number);
-
- status_t err = mPlugin->openSession(program_number, sessionId);
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+ status_t err = holder->get()->openSession(program_number, sessionId);
ALOGV("openSession: session opened for program_number=%d, sessionId=%s",
program_number, sessionIdToString(*sessionId).string());
@@ -115,8 +133,11 @@
CasSessionId* sessionId) {
ALOGV("openSession: program_number=%d, elementary_PID=%d",
program_number, elementary_PID);
-
- status_t err = mPlugin->openSession(
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+ status_t err = holder->get()->openSession(
program_number, elementary_PID, sessionId);
ALOGV("openSession: session opened for "
@@ -131,69 +152,92 @@
const CasSessionId &sessionId, const CasData& pvtData) {
ALOGV("setSessionPrivateData: sessionId=%s",
sessionIdToString(sessionId).string());
-
- return getBinderStatus(mPlugin->setSessionPrivateData(sessionId, pvtData));
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+ return getBinderStatus(holder->get()->setSessionPrivateData(sessionId, pvtData));
}
Status CasImpl::closeSession(const CasSessionId &sessionId) {
ALOGV("closeSession: sessionId=%s",
sessionIdToString(sessionId).string());
-
- return getBinderStatus(mPlugin->closeSession(sessionId));
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
+ return getBinderStatus(holder->get()->closeSession(sessionId));
}
Status CasImpl::processEcm(const CasSessionId &sessionId, const ParcelableCasData& ecm) {
ALOGV("processEcm: sessionId=%s",
sessionIdToString(sessionId).string());
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
- return getBinderStatus(mPlugin->processEcm(sessionId, ecm));
+ return getBinderStatus(holder->get()->processEcm(sessionId, ecm));
}
Status CasImpl::processEmm(const ParcelableCasData& emm) {
ALOGV("processEmm");
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
- return getBinderStatus(mPlugin->processEmm(emm));
+ return getBinderStatus(holder->get()->processEmm(emm));
}
Status CasImpl::sendEvent(
int32_t event, int32_t arg, const ::std::unique_ptr<CasData> &eventData) {
ALOGV("sendEvent");
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
status_t err;
if (eventData == nullptr) {
- err = mPlugin->sendEvent(event, arg, CasData());
+ err = holder->get()->sendEvent(event, arg, CasData());
} else {
- err = mPlugin->sendEvent(event, arg, *eventData);
+ err = holder->get()->sendEvent(event, arg, *eventData);
}
return getBinderStatus(err);
}
Status CasImpl::provision(const String16& provisionString) {
ALOGV("provision: provisionString=%s", String8(provisionString).string());
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
- return getBinderStatus(mPlugin->provision(String8(provisionString)));
+ return getBinderStatus(holder->get()->provision(String8(provisionString)));
}
Status CasImpl::refreshEntitlements(
int32_t refreshType, const ::std::unique_ptr<CasData> &refreshData) {
ALOGV("refreshEntitlements");
+ sp<PluginHolder> holder = mPluginHolder;
+ if (holder == NULL) {
+ return getBinderStatus(INVALID_OPERATION);
+ }
status_t err;
if (refreshData == nullptr) {
- err = mPlugin->refreshEntitlements(refreshType, CasData());
+ err = holder->get()->refreshEntitlements(refreshType, CasData());
} else {
- err = mPlugin->refreshEntitlements(refreshType, *refreshData);
+ err = holder->get()->refreshEntitlements(refreshType, *refreshData);
}
return getBinderStatus(err);
}
Status CasImpl::release() {
- ALOGV("release: mPlugin=%p", mPlugin);
-
- if (mPlugin != NULL) {
- delete mPlugin;
- mPlugin = NULL;
- }
+ ALOGV("release: plugin=%p",
+ mPluginHolder == NULL ? mPluginHolder->get() : NULL);
+ mPluginHolder.clear();
return Status::ok();
}
diff --git a/drm/libmediadrm/Crypto.cpp b/drm/libmediadrm/Crypto.cpp
index d93dad6..a5d7346 100644
--- a/drm/libmediadrm/Crypto.cpp
+++ b/drm/libmediadrm/Crypto.cpp
@@ -22,6 +22,7 @@
#include <binder/IMemory.h>
#include <media/Crypto.h>
+#include <media/DrmPluginPath.h>
#include <media/hardware/CryptoAPI.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AString.h>
@@ -102,7 +103,7 @@
}
// no luck, have to search
- String8 dirPath("/vendor/lib/mediadrm");
+ String8 dirPath(getDrmPluginPath());
String8 pluginPath;
DIR* pDir = opendir(dirPath.string());
diff --git a/drm/libmediadrm/Drm.cpp b/drm/libmediadrm/Drm.cpp
index e3176e3..1004eb8 100644
--- a/drm/libmediadrm/Drm.cpp
+++ b/drm/libmediadrm/Drm.cpp
@@ -21,6 +21,7 @@
#include <dirent.h>
#include <dlfcn.h>
+#include <media/DrmPluginPath.h>
#include <media/DrmSessionClientInterface.h>
#include <media/DrmSessionManager.h>
#include <media/Drm.h>
@@ -220,7 +221,7 @@
}
// no luck, have to search
- String8 dirPath("/vendor/lib/mediadrm");
+ String8 dirPath(getDrmPluginPath());
DIR* pDir = opendir(dirPath.string());
if (pDir == NULL) {
diff --git a/drm/libmediadrm/DrmPluginPath.cpp b/drm/libmediadrm/DrmPluginPath.cpp
new file mode 100644
index 0000000..c760825
--- /dev/null
+++ b/drm/libmediadrm/DrmPluginPath.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DrmPluginPath"
+#include <utils/Log.h>
+
+#include <cutils/properties.h>
+#include <media/DrmPluginPath.h>
+
+namespace android {
+
+const char* getDrmPluginPath() {
+ char value[PROPERTY_VALUE_MAX];
+ if (property_get("drm.64bit.enabled", value, NULL) == 0) {
+ return "/vendor/lib/mediadrm";
+ } else {
+ return "/vendor/lib64/mediadrm";
+ }
+}
+
+} // namespace android
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
index 221b74b..f4c3577 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyCasPlugin.cpp
@@ -84,7 +84,7 @@
///////////////////////////////////////////////////////////////////////////////
ClearKeyCasPlugin::ClearKeyCasPlugin(
uint64_t appData, CasPluginCallback callback)
- : mAppData(appData), mCallback(callback) {
+ : mCallback(callback), mAppData(appData) {
ALOGV("CTOR");
}
@@ -93,7 +93,7 @@
ClearKeySessionLibrary::get()->destroyPlugin(this);
}
-status_t ClearKeyCasPlugin::setPrivateData(const CasData &data) {
+status_t ClearKeyCasPlugin::setPrivateData(const CasData &/*data*/) {
ALOGV("setPrivateData");
return OK;
@@ -142,7 +142,7 @@
}
status_t ClearKeyCasPlugin::setSessionPrivateData(
- const CasSessionId &sessionId, const CasData &data) {
+ const CasSessionId &sessionId, const CasData & /*data*/) {
ALOGV("setSessionPrivateData: sessionId=%s",
sessionIdToString(sessionId).string());
sp<ClearKeyCasSession> session =
@@ -167,7 +167,7 @@
return session->updateECM(mKeyFetcher.get(), (void*)ecm.data(), ecm.size());
}
-status_t ClearKeyCasPlugin::processEmm(const CasEmm& emm) {
+status_t ClearKeyCasPlugin::processEmm(const CasEmm& /*emm*/) {
ALOGV("processEmm");
Mutex::Autolock lock(mKeyFetcherLock);
@@ -212,8 +212,8 @@
}
status_t ClearKeyCasPlugin::refreshEntitlements(
- int32_t refreshType, const CasData &refreshData) {
- ALOGV("refreshEntitlements");
+ int32_t refreshType, const CasData &/*refreshData*/) {
+ ALOGV("refreshEntitlements: refreshType=%d", refreshType);
Mutex::Autolock lock(mKeyFetcherLock);
return OK;
@@ -344,7 +344,7 @@
AES_BLOCK_SIZE * 8, &mKeyInfo[keyIndex].contentKey);
mKeyInfo[keyIndex].valid = (result == 0);
if (!mKeyInfo[keyIndex].valid) {
- ALOGE("updateECM: failed to set key %d, key_id=%d",
+ ALOGE("updateECM: failed to set key %zu, key_id=%d",
keyIndex, keys[keyIndex].key_id);
}
}
@@ -356,6 +356,10 @@
bool secure, DescramblerPlugin::ScramblingControl scramblingControl,
size_t numSubSamples, const DescramblerPlugin::SubSample *subSamples,
const void *srcPtr, void *dstPtr, AString * /* errorDetailMsg */) {
+ if (secure) {
+ return ERROR_DRM_CANNOT_HANDLE;
+ }
+
AES_KEY contentKey;
if (scramblingControl != DescramblerPlugin::kScrambling_Unscrambled) {
diff --git a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
index 210bab3..cb69f91 100644
--- a/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
+++ b/drm/mediacas/plugins/clearkey/ClearKeyFetcher.cpp
@@ -18,6 +18,7 @@
#define LOG_TAG "ClearKeyFetcher"
#include <algorithm>
+#include <inttypes.h>
#include <string>
#include "ClearKeyFetcher.h"
@@ -70,7 +71,7 @@
bool same_parity = (((container.descriptor(0).id() & 0x01) ^
(container.descriptor(1).id() & 0x01)) == 0);
if (same_parity) {
- ALOGW("asset_id=%llu: malformed Ecm, "
+ ALOGW("asset_id=%" PRIu64 ": malformed Ecm, "
"content keys have same parity, id0=%d, id1=%d",
container.descriptor(0).ecm().asset_id(),
container.descriptor(0).id(),
@@ -88,7 +89,7 @@
// asset_id change. If it sends an EcmContainer with 2 Ecms with different
// asset_ids (old and new) then it might be best to prefetch the Emm.
if ((asset_.id() != 0) && (*asset_id != asset_.id())) {
- ALOGW("Asset_id change from %llu to %llu", asset_.id(), *asset_id);
+ ALOGW("Asset_id change from %" PRIu64 " to %" PRIu64, asset_.id(), *asset_id);
asset_.Clear();
}
diff --git a/drm/mediacas/plugins/clearkey/ecm_generator.cpp b/drm/mediacas/plugins/clearkey/ecm_generator.cpp
index f1aa973..7d29659 100644
--- a/drm/mediacas/plugins/clearkey/ecm_generator.cpp
+++ b/drm/mediacas/plugins/clearkey/ecm_generator.cpp
@@ -80,7 +80,7 @@
CHECK(default_fields);
if (ecm->size() < kTotalEcmSize) {
- ALOGE("Short ECM: expected_length=%zu, actual_length=%zu",
+ ALOGE("Short ECM: expected_length=%d, actual_length=%zu",
kTotalEcmSize, ecm->size());
return BAD_VALUE;
}
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
index 8cc5ee9..5fdac5c 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.cpp
@@ -37,6 +37,9 @@
status_t DrmPlugin::closeSession(const Vector<uint8_t>& sessionId) {
sp<Session> session = mSessionLibrary->findSession(sessionId);
+ if (sessionId.size() == 0) {
+ return android::BAD_VALUE;
+ }
if (session.get()) {
mSessionLibrary->destroySession(session);
return android::OK;
@@ -54,6 +57,9 @@
String8& defaultUrl,
DrmPlugin::KeyRequestType *keyRequestType) {
UNUSED(optionalParameters);
+ if (scope.size() == 0) {
+ return android::BAD_VALUE;
+ }
if (keyType != kKeyType_Streaming) {
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -70,6 +76,9 @@
const Vector<uint8_t>& scope,
const Vector<uint8_t>& response,
Vector<uint8_t>& keySetId) {
+ if (scope.size() == 0 || response.size() == 0) {
+ return android::BAD_VALUE;
+ }
sp<Session> session = mSessionLibrary->findSession(scope);
if (!session.get()) {
return android::ERROR_DRM_SESSION_NOT_OPENED;
diff --git a/drm/mediadrm/plugins/clearkey/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
index c4d934e..58421b9 100644
--- a/drm/mediadrm/plugins/clearkey/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/DrmPlugin.h
@@ -63,22 +63,28 @@
Vector<uint8_t>& keySetId);
virtual status_t removeKeys(const Vector<uint8_t>& sessionId) {
- UNUSED(sessionId);
+ if (sessionId.size() == 0) {
+ return android::BAD_VALUE;
+ }
+
return android::ERROR_DRM_CANNOT_HANDLE;
}
virtual status_t restoreKeys(
const Vector<uint8_t>& sessionId,
const Vector<uint8_t>& keySetId) {
- UNUSED(sessionId);
- UNUSED(keySetId);
+ if (sessionId.size() == 0 || keySetId.size() == 0) {
+ return android::BAD_VALUE;
+ }
return android::ERROR_DRM_CANNOT_HANDLE;
}
virtual status_t queryKeyStatus(
const Vector<uint8_t>& sessionId,
KeyedVector<String8, String8>& infoMap) const {
- UNUSED(sessionId);
+ if (sessionId.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(infoMap);
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -99,9 +105,12 @@
const Vector<uint8_t>& response,
Vector<uint8_t>& certificate,
Vector<uint8_t>& wrappedKey) {
- UNUSED(response);
UNUSED(certificate);
UNUSED(wrappedKey);
+ if (response.size() == 0) {
+ // empty response
+ return android::BAD_VALUE;
+ }
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -111,13 +120,18 @@
}
virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) {
- UNUSED(ssid);
+ if (ssid.size() == 0) {
+ return android::BAD_VALUE;
+ }
+
UNUSED(secureStop);
return android::ERROR_DRM_CANNOT_HANDLE;
}
virtual status_t releaseSecureStops(const Vector<uint8_t>& ssRelease) {
- UNUSED(ssRelease);
+ if (ssRelease.size() == 0) {
+ return android::BAD_VALUE;
+ }
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -151,15 +165,17 @@
virtual status_t setCipherAlgorithm(
const Vector<uint8_t>& sessionId, const String8& algorithm) {
- UNUSED(sessionId);
- UNUSED(algorithm);
+ if (sessionId.size() == 0 || algorithm.size() == 0) {
+ return android::BAD_VALUE;
+ }
return android::ERROR_DRM_CANNOT_HANDLE;
}
virtual status_t setMacAlgorithm(
const Vector<uint8_t>& sessionId, const String8& algorithm) {
- UNUSED(sessionId);
- UNUSED(algorithm);
+ if (sessionId.size() == 0 || algorithm.size() == 0) {
+ return android::BAD_VALUE;
+ }
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -169,10 +185,10 @@
const Vector<uint8_t>& input,
const Vector<uint8_t>& iv,
Vector<uint8_t>& output) {
- UNUSED(sessionId);
- UNUSED(keyId);
- UNUSED(input);
- UNUSED(iv);
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ input.size() == 0 || iv.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(output);
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -183,10 +199,10 @@
const Vector<uint8_t>& input,
const Vector<uint8_t>& iv,
Vector<uint8_t>& output) {
- UNUSED(sessionId);
- UNUSED(keyId);
- UNUSED(input);
- UNUSED(iv);
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ input.size() == 0 || iv.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(output);
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -196,9 +212,10 @@
const Vector<uint8_t>& keyId,
const Vector<uint8_t>& message,
Vector<uint8_t>& signature) {
- UNUSED(sessionId);
- UNUSED(keyId);
- UNUSED(message);
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ message.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(signature);
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -208,10 +225,10 @@
const Vector<uint8_t>& keyId,
const Vector<uint8_t>& message,
const Vector<uint8_t>& signature, bool& match) {
- UNUSED(sessionId);
- UNUSED(keyId);
- UNUSED(message);
- UNUSED(signature);
+ if (sessionId.size() == 0 || keyId.size() == 0 ||
+ message.size() == 0 || signature.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(match);
return android::ERROR_DRM_CANNOT_HANDLE;
}
@@ -222,10 +239,10 @@
const Vector<uint8_t>& message,
const Vector<uint8_t>& wrappedKey,
Vector<uint8_t>& signature) {
- UNUSED(sessionId);
- UNUSED(algorithm);
- UNUSED(message);
- UNUSED(wrappedKey);
+ if (sessionId.size() == 0 || algorithm.size() == 0 ||
+ message.size() == 0 || wrappedKey.size() == 0) {
+ return android::BAD_VALUE;
+ }
UNUSED(signature);
return android::ERROR_DRM_CANNOT_HANDLE;
}
diff --git a/include/media/CasImpl.h b/include/media/CasImpl.h
index 80c901e..3c07092 100644
--- a/include/media/CasImpl.h
+++ b/include/media/CasImpl.h
@@ -84,8 +84,9 @@
virtual Status release() override;
private:
+ struct PluginHolder;
sp<SharedLibrary> mLibrary;
- CasPlugin *mPlugin;
+ sp<PluginHolder> mPluginHolder;
sp<ICasListener> mListener;
DISALLOW_EVIL_CONSTRUCTORS(CasImpl);
diff --git a/include/media/DrmPluginPath.h b/include/media/DrmPluginPath.h
new file mode 120000
index 0000000..06b12cf
--- /dev/null
+++ b/include/media/DrmPluginPath.h
@@ -0,0 +1 @@
+../../media/libmedia/include/DrmPluginPath.h
\ No newline at end of file
diff --git a/include/media/IMediaAnalyticsService.h b/include/media/IMediaAnalyticsService.h
index 97915e4..a596d60 120000
--- a/include/media/IMediaAnalyticsService.h
+++ b/include/media/IMediaAnalyticsService.h
@@ -1 +1 @@
-../../media/libmedia/include/IMediaAnalyticsService.h
\ No newline at end of file
+../../media/libmediametrics/include/IMediaAnalyticsService.h
\ No newline at end of file
diff --git a/include/media/MediaAnalyticsItem.h b/include/media/MediaAnalyticsItem.h
index 71957a5..e8124e0 120000
--- a/include/media/MediaAnalyticsItem.h
+++ b/include/media/MediaAnalyticsItem.h
@@ -1 +1 @@
-../../media/libmedia/include/MediaAnalyticsItem.h
\ No newline at end of file
+../../media/libmediametrics/include/MediaAnalyticsItem.h
\ No newline at end of file
diff --git a/include/ndk/NdkMediaDrm.h b/include/ndk/NdkMediaDrm.h
index 9dd6283..cba4380 100644
--- a/include/ndk/NdkMediaDrm.h
+++ b/include/ndk/NdkMediaDrm.h
@@ -159,8 +159,7 @@
* to obtain or release keys used to decrypt encrypted content.
* AMediaDrm_getKeyRequest is used to obtain an opaque key request byte array that
* is delivered to the license server. The opaque key request byte array is
- * returned in KeyRequest.data. The recommended URL to deliver the key request to
- * is returned in KeyRequest.defaultUrl.
+ * returned in KeyRequest.data.
*
* After the app has received the key request response from the server,
* it should deliver to the response to the DRM engine plugin using the method
diff --git a/media/libaaudio/Android.bp b/media/libaaudio/Android.bp
index e41d62b..f539ba9 100644
--- a/media/libaaudio/Android.bp
+++ b/media/libaaudio/Android.bp
@@ -21,7 +21,7 @@
}
ndk_library {
- name: "libaaudio.ndk",
+ name: "libaaudio",
symbol_file: "libaaudio.map.txt",
first_version: "26",
unversioned_until: "current",
diff --git a/media/libaaudio/examples/input_monitor/Android.mk b/media/libaaudio/examples/input_monitor/Android.mk
new file mode 100644
index 0000000..b56328b
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/Android.mk
@@ -0,0 +1,6 @@
+# include $(call all-subdir-makefiles)
+
+# Just include static/ for now.
+LOCAL_PATH := $(call my-dir)
+#include $(LOCAL_PATH)/jni/Android.mk
+include $(LOCAL_PATH)/static/Android.mk
diff --git a/media/libaaudio/examples/input_monitor/README.md b/media/libaaudio/examples/input_monitor/README.md
new file mode 100644
index 0000000..3e54ef0
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/README.md
@@ -0,0 +1 @@
+Monitor input level and print value.
diff --git a/media/libaaudio/examples/input_monitor/jni/Android.mk b/media/libaaudio/examples/input_monitor/jni/Android.mk
new file mode 100644
index 0000000..51a5a85
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/jni/Android.mk
@@ -0,0 +1,35 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine.cpp
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
+ libbinder libcutils libutils
+LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_MODULE := write_sine_ndk
+LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/liboboe/include
+
+LOCAL_SRC_FILES:= frameworks/av/media/liboboe/src/write_sine_threaded.cpp
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia libtinyalsa \
+ libbinder libcutils libutils
+LOCAL_STATIC_LIBRARIES := libsndfile
+LOCAL_MODULE := write_sine_threaded_ndk
+LOCAL_SHARED_LIBRARIES += liboboe_prebuilt
+include $(BUILD_EXECUTABLE)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := liboboe_prebuilt
+LOCAL_SRC_FILES := liboboe.so
+LOCAL_EXPORT_C_INCLUDES := $(LOCAL_PATH)/include
+include $(PREBUILT_SHARED_LIBRARY)
diff --git a/media/libaaudio/examples/input_monitor/jni/Application.mk b/media/libaaudio/examples/input_monitor/jni/Application.mk
new file mode 100644
index 0000000..e74475c
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/jni/Application.mk
@@ -0,0 +1,3 @@
+# TODO remove then when we support other architectures
+APP_ABI := arm64-v8a
+APP_CPPFLAGS += -std=c++11
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
new file mode 100644
index 0000000..545496f
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor.cpp
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Record input using AAudio and display the peak amplitudes.
+
+#include <new>
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#define SAMPLE_RATE 48000
+#define NUM_SECONDS 10
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
+
+#define DECAY_FACTOR 0.999
+#define MIN_FRAMES_TO_READ 48 /* arbitrary, 1 msec at 48000 Hz */
+
+static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
+ const char *modeText = "unknown";
+ switch (mode) {
+ case AAUDIO_SHARING_MODE_EXCLUSIVE:
+ modeText = "EXCLUSIVE";
+ break;
+ case AAUDIO_SHARING_MODE_SHARED:
+ modeText = "SHARED";
+ break;
+ default:
+ break;
+ }
+ return modeText;
+}
+
+int main(int argc, char **argv)
+{
+ (void)argc; // unused
+
+ aaudio_result_t result;
+
+ int actualSamplesPerFrame;
+ int actualSampleRate;
+ const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
+ aaudio_audio_format_t actualDataFormat;
+
+ const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
+ aaudio_sharing_mode_t actualSharingMode;
+
+ AAudioStreamBuilder *aaudioBuilder = nullptr;
+ AAudioStream *aaudioStream = nullptr;
+ aaudio_stream_state_t state;
+ int32_t framesPerBurst = 0;
+ int32_t framesPerRead = 0;
+ int32_t framesToRecord = 0;
+ int32_t framesLeft = 0;
+ int32_t xRunCount = 0;
+ int16_t *data = nullptr;
+ float peakLevel = 0.0;
+ int loopCounter = 0;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+
+ printf("%s - Monitor input level using AAudio\n", argv[0]);
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&aaudioBuilder);
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+
+ // Request stream properties.
+ AAudioStreamBuilder_setDirection(aaudioBuilder, AAUDIO_DIRECTION_INPUT);
+ AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
+ AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
+
+ // Create an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
+ if (result != AAUDIO_OK) {
+ goto finish;
+ }
+
+ actualSamplesPerFrame = AAudioStream_getSamplesPerFrame(aaudioStream);
+ printf("SamplesPerFrame = %d\n", actualSamplesPerFrame);
+ actualSampleRate = AAudioStream_getSampleRate(aaudioStream);
+ printf("SamplesPerFrame = %d\n", actualSampleRate);
+
+ actualSharingMode = AAudioStream_getSharingMode(aaudioStream);
+ printf("SharingMode: requested = %s, actual = %s\n",
+ getSharingModeText(requestedSharingMode),
+ getSharingModeText(actualSharingMode));
+
+ // This is the number of frames that are written in one chunk by a DMA controller
+ // or a DSP.
+ framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
+ printf("DataFormat: framesPerBurst = %d\n",framesPerBurst);
+
+ // Some DMA might use very short bursts of 16 frames. We don't need to read such small
+ // buffers. But it helps to use a multiple of the burst size for predictable scheduling.
+ framesPerRead = framesPerBurst;
+ while (framesPerRead < MIN_FRAMES_TO_READ) {
+ framesPerRead *= 2;
+ }
+ printf("DataFormat: framesPerRead = %d\n",framesPerRead);
+
+ actualDataFormat = AAudioStream_getFormat(aaudioStream);
+ printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
+ // TODO handle other data formats
+ assert(actualDataFormat == AAUDIO_FORMAT_PCM_I16);
+
+ // Allocate a buffer for the audio data.
+ data = new(std::nothrow) int16_t[framesPerRead * actualSamplesPerFrame];
+ if (data == nullptr) {
+ fprintf(stderr, "ERROR - could not allocate data buffer\n");
+ result = AAUDIO_ERROR_NO_MEMORY;
+ goto finish;
+ }
+
+ // Start the stream.
+ printf("call AAudioStream_requestStart()\n");
+ result = AAudioStream_requestStart(aaudioStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d\n", result);
+ goto finish;
+ }
+
+ state = AAudioStream_getState(aaudioStream);
+ printf("after start, state = %s\n", AAudio_convertStreamStateToText(state));
+
+ // Play for a while.
+ framesToRecord = actualSampleRate * NUM_SECONDS;
+ framesLeft = framesToRecord;
+ while (framesLeft > 0) {
+ // Read audio data from the stream.
+ int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
+ int minFrames = (framesToRecord < framesPerRead) ? framesToRecord : framesPerRead;
+ int actual = AAudioStream_read(aaudioStream, data, minFrames, timeoutNanos);
+ if (actual < 0) {
+ fprintf(stderr, "ERROR - AAudioStream_read() returned %zd\n", actual);
+ goto finish;
+ } else if (actual == 0) {
+ fprintf(stderr, "WARNING - AAudioStream_read() returned %zd\n", actual);
+ goto finish;
+ }
+ framesLeft -= actual;
+
+ // Peak follower.
+ for (int frameIndex = 0; frameIndex < actual; frameIndex++) {
+ float sample = data[frameIndex * actualSamplesPerFrame] * (1.0/32768);
+ peakLevel *= DECAY_FACTOR;
+ if (sample > peakLevel) {
+ peakLevel = sample;
+ }
+ }
+
+ // Display level as stars, eg. "******".
+ if ((loopCounter++ % 10) == 0) {
+ printf("%5.3f ", peakLevel);
+ int numStars = (int)(peakLevel * 50);
+ for (int i = 0; i < numStars; i++) {
+ printf("*");
+ }
+ printf("\n");
+ }
+ }
+
+ xRunCount = AAudioStream_getXRunCount(aaudioStream);
+ printf("AAudioStream_getXRunCount %d\n", xRunCount);
+
+finish:
+ delete[] data;
+ AAudioStream_close(aaudioStream);
+ AAudioStreamBuilder_delete(aaudioBuilder);
+ printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ return (result != AAUDIO_OK) ? EXIT_FAILURE : EXIT_SUCCESS;
+}
+
diff --git a/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
new file mode 100644
index 0000000..8d40d94
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/src/input_monitor_callback.cpp
@@ -0,0 +1,284 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Record input using AAudio and display the peak amplitudes.
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
+
+#define NUM_SECONDS 10
+#define NANOS_PER_MICROSECOND ((int64_t)1000)
+#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
+
+//#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
+
+/**
+ * Simple wrapper for AAudio that opens a default stream and then calls
+ * a callback function to fill the output buffers.
+ */
+class SimpleAAudioPlayer {
+public:
+ SimpleAAudioPlayer() {}
+ ~SimpleAAudioPlayer() {
+ close();
+ };
+
+ /**
+ * Call this before calling open().
+ * @param requestedSharingMode
+ */
+ void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+ mRequestedSharingMode = requestedSharingMode;
+ }
+
+ /**
+ * Also known as "sample rate"
+ * Only call this after open() has been called.
+ */
+ int32_t getFramesPerSecond() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getSampleRate(mStream);;
+ }
+
+ /**
+ * Only call this after open() has been called.
+ */
+ int32_t getSamplesPerFrame() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getSamplesPerFrame(mStream);;
+ }
+
+ /**
+ * Open a stream
+ */
+ aaudio_result_t open(AAudioStream_dataCallback proc, void *userContext) {
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&mBuilder);
+ if (result != AAUDIO_OK) return result;
+
+ AAudioStreamBuilder_setDirection(mBuilder, AAUDIO_DIRECTION_INPUT);
+ AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+ AAudioStreamBuilder_setDataCallback(mBuilder, proc, userContext);
+ AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_I16);
+
+ // Open an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStreamBuilder_openStream() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ goto finish1;
+ }
+
+ printf("AAudioStream_getFramesPerBurst() = %d\n",
+ AAudioStream_getFramesPerBurst(mStream));
+ printf("AAudioStream_getBufferSizeInFrames() = %d\n",
+ AAudioStream_getBufferSizeInFrames(mStream));
+ printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
+ AAudioStream_getBufferCapacityInFrames(mStream));
+ return result;
+
+ finish1:
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ return result;
+ }
+
+ aaudio_result_t close() {
+ if (mStream != nullptr) {
+ printf("call AAudioStream_close(%p)\n", mStream); fflush(stdout);
+ AAudioStream_close(mStream);
+ mStream = nullptr;
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ }
+ return AAUDIO_OK;
+ }
+
+ // Write zero data to fill up the buffer and prevent underruns.
+ // Assume format is PCM_I16. TODO use floats.
+ aaudio_result_t prime() {
+ int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
+ const int numFrames = 32; // arbitrary
+ int16_t zeros[numFrames * samplesPerFrame];
+ memset(zeros, 0, sizeof(zeros));
+ aaudio_result_t result = numFrames;
+ while (result == numFrames) {
+ result = AAudioStream_write(mStream, zeros, numFrames, 0);
+ }
+ return result;
+ }
+
+ // Start the stream. AAudio will start calling your callback function.
+ aaudio_result_t start() {
+ aaudio_result_t result = AAudioStream_requestStart(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
+ // Stop the stream. AAudio will stop calling your callback function.
+ aaudio_result_t stop() {
+ aaudio_result_t result = AAudioStream_requestStop(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ int32_t xRunCount = AAudioStream_getXRunCount(mStream);
+ printf("AAudioStream_getXRunCount %d\n", xRunCount);
+ return result;
+ }
+
+private:
+ AAudioStreamBuilder *mBuilder = nullptr;
+ AAudioStream *mStream = nullptr;
+ aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
+};
+
+// Application data that gets passed to the callback.
+typedef struct PeakTrackerData {
+ float peakLevel;
+} PeakTrackerData_t;
+
+#define DECAY_FACTOR 0.999
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t MyDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+ ) {
+
+ PeakTrackerData_t *data = (PeakTrackerData_t *) userData;
+ // printf("MyCallbackProc(): frameCount = %d\n", numFrames);
+ int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(stream);
+ float sample;
+ // This code assume mono or stereo.
+ switch (AAudioStream_getFormat(stream)) {
+ case AAUDIO_FORMAT_PCM_I16: {
+ int16_t *audioBuffer = (int16_t *) audioData;
+ // Peak follower
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ sample = audioBuffer[frameIndex * samplesPerFrame] * (1.0/32768);
+ data->peakLevel *= DECAY_FACTOR;
+ if (sample > data->peakLevel) {
+ data->peakLevel = sample;
+ }
+ }
+ }
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT: {
+ float *audioBuffer = (float *) audioData;
+ // Peak follower
+ for (int frameIndex = 0; frameIndex < numFrames; frameIndex++) {
+ sample = audioBuffer[frameIndex * samplesPerFrame];
+ data->peakLevel *= DECAY_FACTOR;
+ if (sample > data->peakLevel) {
+ data->peakLevel = sample;
+ }
+ }
+ }
+ break;
+ default:
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+void displayPeakLevel(float peakLevel) {
+ printf("%5.3f ", peakLevel);
+ const int maxStars = 50; // arbitrary, fits on one line
+ int numStars = (int) (peakLevel * maxStars);
+ for (int i = 0; i < numStars; i++) {
+ printf("*");
+ }
+ printf("\n");
+}
+
+int main(int argc, char **argv)
+{
+ (void)argc; // unused
+ SimpleAAudioPlayer player;
+ PeakTrackerData_t myData = {0.0};
+ aaudio_result_t result;
+ const int displayRateHz = 20; // arbitrary
+ const int loopsNeeded = NUM_SECONDS * displayRateHz;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+ printf("%s - Display audio input using an AAudio callback\n", argv[0]);
+
+ player.setSharingMode(SHARING_MODE);
+
+ result = player.open(MyDataCallbackProc, &myData);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.open() returned %d\n", result);
+ goto error;
+ }
+ printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
+ printf("player.getSamplesPerFrame() = %d\n", player.getSamplesPerFrame());
+
+ result = player.start();
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.start() returned %d\n", result);
+ goto error;
+ }
+
+ printf("Sleep for %d seconds while audio plays in a callback thread.\n", NUM_SECONDS);
+ for (int i = 0; i < loopsNeeded; i++)
+ {
+ const struct timespec request = { .tv_sec = 0,
+ .tv_nsec = NANOS_PER_SECOND / displayRateHz };
+ (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+ displayPeakLevel(myData.peakLevel);
+ }
+ printf("Woke up now.\n");
+
+ result = player.stop();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+ result = player.close();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+
+ printf("SUCCESS\n");
+ return EXIT_SUCCESS;
+error:
+ player.close();
+ printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/examples/input_monitor/static/Android.mk b/media/libaaudio/examples/input_monitor/static/Android.mk
new file mode 100644
index 0000000..e83f179
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/static/Android.mk
@@ -0,0 +1,35 @@
+LOCAL_PATH := $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := examples
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include
+
+# TODO reorganize folders to avoid using ../
+LOCAL_SRC_FILES:= ../src/input_monitor.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+ libbinder libcutils libutils \
+ libaudioclient liblog libtinyalsa
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := input_monitor
+include $(BUILD_EXECUTABLE)
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include
+
+LOCAL_SRC_FILES:= ../src/input_monitor_callback.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+ libbinder libcutils libutils \
+ libaudioclient liblog
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := input_monitor_callback
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/examples/input_monitor/static/README.md b/media/libaaudio/examples/input_monitor/static/README.md
new file mode 100644
index 0000000..6e26d7b
--- /dev/null
+++ b/media/libaaudio/examples/input_monitor/static/README.md
@@ -0,0 +1,2 @@
+Makefile for building simple command line examples.
+They link with AAudio as a static library.
diff --git a/media/libaaudio/examples/write_sine/src/SineGenerator.h b/media/libaaudio/examples/write_sine/src/SineGenerator.h
index ade7527..64b772d 100644
--- a/media/libaaudio/examples/write_sine/src/SineGenerator.h
+++ b/media/libaaudio/examples/write_sine/src/SineGenerator.h
@@ -79,7 +79,7 @@
}
}
- double mAmplitude = 0.01;
+ double mAmplitude = 0.05; // unitless scaler
double mPhase = 0.0;
double mPhaseIncrement = 440 * M_PI * 2 / 48000;
double mFrameRate = 48000;
diff --git a/media/libaaudio/examples/write_sine/src/write_sine.cpp b/media/libaaudio/examples/write_sine/src/write_sine.cpp
index 511fe94..d8e5ec1 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine.cpp
@@ -19,7 +19,6 @@
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
-#include <aaudio/AAudioDefinitions.h>
#include <aaudio/AAudio.h>
#include "SineGenerator.h"
@@ -27,6 +26,7 @@
#define NUM_SECONDS 10
#define NANOS_PER_MICROSECOND ((int64_t)1000)
#define NANOS_PER_MILLISECOND (NANOS_PER_MICROSECOND * 1000)
+#define NANOS_PER_SECOND (NANOS_PER_MILLISECOND * 1000)
static const char *getSharingModeText(aaudio_sharing_mode_t mode) {
const char *modeText = "unknown";
@@ -43,6 +43,16 @@
return modeText;
}
+// TODO move to a common utility library
+static int64_t getNanoseconds(clockid_t clockId = CLOCK_MONOTONIC) {
+ struct timespec time;
+ int result = clock_gettime(clockId, &time);
+ if (result < 0) {
+ return -errno;
+ }
+ return (time.tv_sec * NANOS_PER_SECOND) + time.tv_nsec;
+}
+
int main(int argc, char **argv)
{
(void)argc; // unused
@@ -56,13 +66,16 @@
const aaudio_audio_format_t requestedDataFormat = AAUDIO_FORMAT_PCM_I16;
aaudio_audio_format_t actualDataFormat = AAUDIO_FORMAT_PCM_I16;
- const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+ //const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_EXCLUSIVE;
+ const aaudio_sharing_mode_t requestedSharingMode = AAUDIO_SHARING_MODE_SHARED;
aaudio_sharing_mode_t actualSharingMode = AAUDIO_SHARING_MODE_SHARED;
AAudioStreamBuilder *aaudioBuilder = nullptr;
AAudioStream *aaudioStream = nullptr;
aaudio_stream_state_t state = AAUDIO_STREAM_STATE_UNINITIALIZED;
int32_t framesPerBurst = 0;
+ int32_t framesPerWrite = 0;
+ int32_t bufferCapacity = 0;
int32_t framesToPlay = 0;
int32_t framesLeft = 0;
int32_t xRunCount = 0;
@@ -89,7 +102,6 @@
AAudioStreamBuilder_setFormat(aaudioBuilder, requestedDataFormat);
AAudioStreamBuilder_setSharingMode(aaudioBuilder, requestedSharingMode);
-
// Create an AAudioStream using the Builder.
result = AAudioStreamBuilder_openStream(aaudioBuilder, &aaudioStream);
if (result != AAUDIO_OK) {
@@ -118,21 +130,25 @@
// This is the number of frames that are read in one chunk by a DMA controller
// or a DSP or a mixer.
framesPerBurst = AAudioStream_getFramesPerBurst(aaudioStream);
- printf("DataFormat: original framesPerBurst = %d\n",framesPerBurst);
+ printf("DataFormat: framesPerBurst = %d\n",framesPerBurst);
+ bufferCapacity = AAudioStream_getBufferCapacityInFrames(aaudioStream);
+ printf("DataFormat: bufferCapacity = %d, remainder = %d\n",
+ bufferCapacity, bufferCapacity % framesPerBurst);
// Some DMA might use very short bursts of 16 frames. We don't need to write such small
// buffers. But it helps to use a multiple of the burst size for predictable scheduling.
- while (framesPerBurst < 48) {
- framesPerBurst *= 2;
+ framesPerWrite = framesPerBurst;
+ while (framesPerWrite < 48) {
+ framesPerWrite *= 2;
}
- printf("DataFormat: final framesPerBurst = %d\n",framesPerBurst);
+ printf("DataFormat: framesPerWrite = %d\n",framesPerWrite);
actualDataFormat = AAudioStream_getFormat(aaudioStream);
printf("DataFormat: requested = %d, actual = %d\n", requestedDataFormat, actualDataFormat);
// TODO handle other data formats
// Allocate a buffer for the audio data.
- data = new int16_t[framesPerBurst * actualSamplesPerFrame];
+ data = new int16_t[framesPerWrite * actualSamplesPerFrame];
if (data == nullptr) {
fprintf(stderr, "ERROR - could not allocate data buffer\n");
result = AAUDIO_ERROR_NO_MEMORY;
@@ -155,14 +171,14 @@
framesLeft = framesToPlay;
while (framesLeft > 0) {
// Render sine waves to left and right channels.
- sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerBurst);
+ sineOsc1.render(&data[0], actualSamplesPerFrame, framesPerWrite);
if (actualSamplesPerFrame > 1) {
- sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerBurst);
+ sineOsc2.render(&data[1], actualSamplesPerFrame, framesPerWrite);
}
// Write audio data to the stream.
int64_t timeoutNanos = 100 * NANOS_PER_MILLISECOND;
- int minFrames = (framesToPlay < framesPerBurst) ? framesToPlay : framesPerBurst;
+ int minFrames = (framesToPlay < framesPerWrite) ? framesToPlay : framesPerWrite;
int actual = AAudioStream_write(aaudioStream, data, minFrames, timeoutNanos);
if (actual < 0) {
fprintf(stderr, "ERROR - AAudioStream_write() returned %zd\n", actual);
@@ -172,6 +188,26 @@
goto finish;
}
framesLeft -= actual;
+
+ // Use timestamp to estimate latency.
+ {
+ int64_t presentationFrame;
+ int64_t presentationTime;
+ result = AAudioStream_getTimestamp(aaudioStream,
+ CLOCK_MONOTONIC,
+ &presentationFrame,
+ &presentationTime
+ );
+ if (result == AAUDIO_OK) {
+ int64_t elapsedNanos = getNanoseconds() - presentationTime;
+ int64_t elapsedFrames = actualSampleRate * elapsedNanos / NANOS_PER_SECOND;
+ int64_t currentFrame = presentationFrame + elapsedFrames;
+ int64_t framesWritten = AAudioStream_getFramesWritten(aaudioStream);
+ int64_t estimatedLatencyFrames = framesWritten - currentFrame;
+ int64_t estimatedLatencyMillis = estimatedLatencyFrames * 1000 / actualSampleRate;
+ printf("estimatedLatencyMillis %d\n", (int)estimatedLatencyMillis);
+ }
+ }
}
xRunCount = AAudioStream_getXRunCount(aaudioStream);
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
new file mode 100644
index 0000000..9414236
--- /dev/null
+++ b/media/libaaudio/examples/write_sine/src/write_sine_callback.cpp
@@ -0,0 +1,320 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Play sine waves using an AAudio callback.
+
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sched.h>
+#include <stdio.h>
+#include <math.h>
+#include <time.h>
+#include <aaudio/AAudio.h>
+#include "SineGenerator.h"
+
+#define NUM_SECONDS 5
+
+//#define SHARING_MODE AAUDIO_SHARING_MODE_EXCLUSIVE
+#define SHARING_MODE AAUDIO_SHARING_MODE_SHARED
+
+#define CALLBACK_SIZE_FRAMES 128
+
+// TODO refactor common code into a single SimpleAAudio class
+/**
+ * Simple wrapper for AAudio that opens a default stream and then calls
+ * a callback function to fill the output buffers.
+ */
+class SimpleAAudioPlayer {
+public:
+ SimpleAAudioPlayer() {}
+ ~SimpleAAudioPlayer() {
+ close();
+ };
+
+ /**
+ * Call this before calling open().
+ * @param requestedSharingMode
+ */
+ void setSharingMode(aaudio_sharing_mode_t requestedSharingMode) {
+ mRequestedSharingMode = requestedSharingMode;
+ }
+
+ /**
+ * Also known as "sample rate"
+ * Only call this after open() has been called.
+ */
+ int32_t getFramesPerSecond() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getSampleRate(mStream);;
+ }
+
+ /**
+ * Only call this after open() has been called.
+ */
+ int32_t getSamplesPerFrame() {
+ if (mStream == nullptr) {
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+ return AAudioStream_getSamplesPerFrame(mStream);;
+ }
+
+ /**
+ * Open a stream
+ */
+ aaudio_result_t open(AAudioStream_dataCallback dataProc, void *userContext) {
+ aaudio_result_t result = AAUDIO_OK;
+
+ // Use an AAudioStreamBuilder to contain requested parameters.
+ result = AAudio_createStreamBuilder(&mBuilder);
+ if (result != AAUDIO_OK) return result;
+
+ AAudioStreamBuilder_setSharingMode(mBuilder, mRequestedSharingMode);
+ AAudioStreamBuilder_setDataCallback(mBuilder, dataProc, userContext);
+ AAudioStreamBuilder_setFormat(mBuilder, AAUDIO_FORMAT_PCM_FLOAT);
+ AAudioStreamBuilder_setFramesPerDataCallback(mBuilder, CALLBACK_SIZE_FRAMES);
+ // AAudioStreamBuilder_setBufferCapacityInFrames(mBuilder, CALLBACK_SIZE_FRAMES * 4);
+
+ // Open an AAudioStream using the Builder.
+ result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
+ if (result != AAUDIO_OK) goto finish1;
+
+ printf("AAudioStream_getFramesPerBurst() = %d\n",
+ AAudioStream_getFramesPerBurst(mStream));
+ printf("AAudioStream_getBufferSizeInFrames() = %d\n",
+ AAudioStream_getBufferSizeInFrames(mStream));
+ printf("AAudioStream_getBufferCapacityInFrames() = %d\n",
+ AAudioStream_getBufferCapacityInFrames(mStream));
+ return result;
+
+ finish1:
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ return result;
+ }
+
+ aaudio_result_t close() {
+ if (mStream != nullptr) {
+ printf("call AAudioStream_close(%p)\n", mStream); fflush(stdout);
+ AAudioStream_close(mStream);
+ mStream = nullptr;
+ AAudioStreamBuilder_delete(mBuilder);
+ mBuilder = nullptr;
+ }
+ return AAUDIO_OK;
+ }
+
+ // Write zero data to fill up the buffer and prevent underruns.
+ aaudio_result_t prime() {
+ int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(mStream);
+ const int numFrames = 32;
+ float zeros[numFrames * samplesPerFrame];
+ memset(zeros, 0, sizeof(zeros));
+ aaudio_result_t result = numFrames;
+ while (result == numFrames) {
+ result = AAudioStream_write(mStream, zeros, numFrames, 0);
+ }
+ return result;
+ }
+
+ // Start the stream. AAudio will start calling your callback function.
+ aaudio_result_t start() {
+ aaudio_result_t result = AAudioStream_requestStart(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStart() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ return result;
+ }
+
+ // Stop the stream. AAudio will stop calling your callback function.
+ aaudio_result_t stop() {
+ aaudio_result_t result = AAudioStream_requestStop(mStream);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_requestStop() returned %d %s\n",
+ result, AAudio_convertResultToText(result));
+ }
+ int32_t xRunCount = AAudioStream_getXRunCount(mStream);
+ printf("AAudioStream_getXRunCount %d\n", xRunCount);
+ return result;
+ }
+
+ AAudioStream *getStream() const {
+ return mStream;
+ }
+
+private:
+ AAudioStreamBuilder *mBuilder = nullptr;
+ AAudioStream *mStream = nullptr;
+ aaudio_sharing_mode_t mRequestedSharingMode = SHARING_MODE;
+};
+
+// Application data that gets passed to the callback.
+#define MAX_FRAME_COUNT_RECORDS 256
+typedef struct SineThreadedData_s {
+ SineGenerator sineOsc1;
+ SineGenerator sineOsc2;
+ // Remove these variables used for testing.
+ int32_t numFrameCounts;
+ int32_t frameCounts[MAX_FRAME_COUNT_RECORDS];
+ int scheduler;
+ bool schedulerChecked;
+} SineThreadedData_t;
+
+// Callback function that fills the audio output buffer.
+aaudio_data_callback_result_t MyDataCallbackProc(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames
+ ) {
+
+ SineThreadedData_t *sineData = (SineThreadedData_t *) userData;
+
+ if (sineData->numFrameCounts < MAX_FRAME_COUNT_RECORDS) {
+ sineData->frameCounts[sineData->numFrameCounts++] = numFrames;
+ }
+
+ if (!sineData->schedulerChecked) {
+ sineData->scheduler = sched_getscheduler(gettid());
+ sineData->schedulerChecked = true;
+ }
+
+ int32_t samplesPerFrame = AAudioStream_getSamplesPerFrame(stream);
+ // This code only plays on the first one or two channels.
+ // TODO Support arbitrary number of channels.
+ switch (AAudioStream_getFormat(stream)) {
+ case AAUDIO_FORMAT_PCM_I16: {
+ int16_t *audioBuffer = (int16_t *) audioData;
+ // Render sine waves as shorts to first channel.
+ sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ case AAUDIO_FORMAT_PCM_FLOAT: {
+ float *audioBuffer = (float *) audioData;
+ // Render sine waves as floats to first channel.
+ sineData->sineOsc1.render(&audioBuffer[0], samplesPerFrame, numFrames);
+ // Render sine waves to second channel if there is one.
+ if (samplesPerFrame > 1) {
+ sineData->sineOsc2.render(&audioBuffer[1], samplesPerFrame, numFrames);
+ }
+ }
+ break;
+ default:
+ return AAUDIO_CALLBACK_RESULT_STOP;
+ }
+
+ return AAUDIO_CALLBACK_RESULT_CONTINUE;
+}
+
+int main(int argc, char **argv)
+{
+ (void)argc; // unused
+ SimpleAAudioPlayer player;
+ SineThreadedData_t myData;
+ aaudio_result_t result;
+
+ // Make printf print immediately so that debug info is not stuck
+ // in a buffer if we hang or crash.
+ setvbuf(stdout, nullptr, _IONBF, (size_t) 0);
+ printf("%s - Play a sine sweep using an AAudio callback\n", argv[0]);
+
+ player.setSharingMode(SHARING_MODE);
+
+ myData.numFrameCounts = 0;
+ myData.schedulerChecked = false;
+
+ result = player.open(MyDataCallbackProc, &myData);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.open() returned %d\n", result);
+ goto error;
+ }
+ printf("player.getFramesPerSecond() = %d\n", player.getFramesPerSecond());
+ printf("player.getSamplesPerFrame() = %d\n", player.getSamplesPerFrame());
+ myData.sineOsc1.setup(440.0, 48000);
+ myData.sineOsc1.setSweep(300.0, 600.0, 5.0);
+ myData.sineOsc2.setup(660.0, 48000);
+ myData.sineOsc2.setSweep(350.0, 900.0, 7.0);
+
+#if 0
+ result = player.prime(); // FIXME crashes AudioTrack.cpp
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.prime() returned %d\n", result);
+ goto error;
+ }
+#endif
+
+ result = player.start();
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - player.start() returned %d\n", result);
+ goto error;
+ }
+
+ printf("Sleep for %d seconds while audio plays in a callback thread.\n", NUM_SECONDS);
+ for (int second = 0; second < NUM_SECONDS; second++)
+ {
+ const struct timespec request = { .tv_sec = 1, .tv_nsec = 0 };
+ (void) clock_nanosleep(CLOCK_MONOTONIC, 0 /*flags*/, &request, NULL /*remain*/);
+
+ aaudio_stream_state_t state;
+ result = AAudioStream_waitForStateChange(player.getStream(),
+ AAUDIO_STREAM_STATE_CLOSED,
+ &state,
+ 0);
+ if (result != AAUDIO_OK) {
+ fprintf(stderr, "ERROR - AAudioStream_waitForStateChange() returned %d\n", result);
+ goto error;
+ }
+ if (state != AAUDIO_STREAM_STATE_STARTING && state != AAUDIO_STREAM_STATE_STARTED) {
+ printf("Stream state is %d %s!\n", state, AAudio_convertStreamStateToText(state));
+ break;
+ }
+ }
+ printf("Woke up now.\n");
+
+ result = player.stop();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+ result = player.close();
+ if (result != AAUDIO_OK) {
+ goto error;
+ }
+
+ // Report data gathered in the callback.
+ for (int i = 0; i < myData.numFrameCounts; i++) {
+ printf("numFrames[%4d] = %4d\n", i, myData.frameCounts[i]);
+ }
+ if (myData.schedulerChecked) {
+ printf("scheduler = 0x%08x, SCHED_FIFO = 0x%08X\n",
+ myData.scheduler,
+ SCHED_FIFO);
+ }
+
+ printf("SUCCESS\n");
+ return EXIT_SUCCESS;
+error:
+ player.close();
+ printf("exiting - AAudio result = %d = %s\n", result, AAudio_convertResultToText(result));
+ return EXIT_FAILURE;
+}
+
diff --git a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
index 40e5016..8065c48 100644
--- a/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
+++ b/media/libaaudio/examples/write_sine/src/write_sine_threaded.cpp
@@ -22,7 +22,6 @@
#include <stdio.h>
#include <math.h>
#include <time.h>
-#include <aaudio/AAudioDefinitions.h>
#include <aaudio/AAudio.h>
#include "SineGenerator.h"
@@ -49,7 +48,7 @@
class SimpleAAudioPlayer {
public:
SimpleAAudioPlayer() {}
- virtual ~SimpleAAudioPlayer() {
+ ~SimpleAAudioPlayer() {
close();
};
@@ -83,7 +82,7 @@
// Open an AAudioStream using the Builder.
result = AAudioStreamBuilder_openStream(mBuilder, &mStream);
- if (result != AAUDIO_OK) goto finish1;
+ if (result != AAUDIO_OK) goto error;
// Check to see what kind of stream we actually got.
mFramesPerSecond = AAudioStream_getSampleRate(mStream);
@@ -126,7 +125,7 @@
}
return result;
- finish1:
+ error:
AAudioStreamBuilder_delete(mBuilder);
mBuilder = nullptr;
return result;
diff --git a/media/libaaudio/examples/write_sine/static/Android.mk b/media/libaaudio/examples/write_sine/static/Android.mk
index 139b70a..aeccb4a 100644
--- a/media/libaaudio/examples/write_sine/static/Android.mk
+++ b/media/libaaudio/examples/write_sine/static/Android.mk
@@ -17,6 +17,8 @@
LOCAL_MODULE := write_sine
include $(BUILD_EXECUTABLE)
+
+
include $(CLEAR_VARS)
LOCAL_MODULE_TAGS := tests
LOCAL_C_INCLUDES := \
@@ -27,8 +29,26 @@
LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
libbinder libcutils libutils \
- libaudioclient liblog libtinyalsa
+ libaudioclient liblog
LOCAL_STATIC_LIBRARIES := libaaudio
LOCAL_MODULE := write_sine_threaded
include $(BUILD_EXECUTABLE)
+
+
+
+include $(CLEAR_VARS)
+LOCAL_MODULE_TAGS := tests
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include
+
+LOCAL_SRC_FILES:= ../src/write_sine_callback.cpp
+
+LOCAL_SHARED_LIBRARIES := libaudioutils libmedia \
+ libbinder libcutils libutils \
+ libaudioclient liblog
+LOCAL_STATIC_LIBRARIES := libaaudio
+
+LOCAL_MODULE := write_sine_callback
+include $(BUILD_EXECUTABLE)
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 921248a..25ad5f8 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -15,7 +15,16 @@
*/
/**
- * This is the 'C' ABI for AAudio.
+ * @addtogroup Audio
+ * @{
+ */
+
+/**
+ * @file AAudio.h
+ */
+
+/**
+ * This is the 'C' API for AAudio.
*/
#ifndef AAUDIO_AAUDIO_H
#define AAUDIO_AAUDIO_H
@@ -80,7 +89,8 @@
* Request an audio device identified device using an ID.
* On Android, for example, the ID could be obtained from the Java AudioManager.
*
- * By default, the primary device will be used.
+ * The default, if you do not call this function, is AAUDIO_DEVICE_UNSPECIFIED,
+ * in which case the primary device will be used.
*
* @param builder reference provided by AAudio_createStreamBuilder()
* @param deviceId device identifier or AAUDIO_DEVICE_UNSPECIFIED
@@ -89,52 +99,71 @@
int32_t deviceId);
/**
- * Request a sample rate in Hz.
+ * Request a sample rate in Hertz.
+ *
* The stream may be opened with a different sample rate.
* So the application should query for the actual rate after the stream is opened.
*
* Technically, this should be called the "frame rate" or "frames per second",
* because it refers to the number of complete frames transferred per second.
- * But it is traditionally called "sample rate". Se we use that term.
+ * But it is traditionally called "sample rate". So we use that term.
*
- * Default is AAUDIO_UNSPECIFIED.
-
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param sampleRate frames per second. Common rates include 44100 and 48000 Hz.
*/
AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
int32_t sampleRate);
/**
* Request a number of samples per frame.
+ *
* The stream may be opened with a different value.
* So the application should query for the actual value after the stream is opened.
*
- * Default is AAUDIO_UNSPECIFIED.
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
*
* Note, this quantity is sometimes referred to as "channel count".
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param samplesPerFrame Number of samples in one frame, ie. numChannels.
*/
AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
int32_t samplesPerFrame);
/**
* Request a sample data format, for example AAUDIO_FORMAT_PCM_I16.
- * The application should query for the actual format after the stream is opened.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ *
+ * The stream may be opened with a different value.
+ * So the application should query for the actual value after the stream is opened.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param format Most common formats are AAUDIO_FORMAT_PCM_FLOAT and AAUDIO_FORMAT_PCM_I16.
*/
AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
aaudio_audio_format_t format);
/**
* Request a mode for sharing the device.
+ *
+ * The default, if you do not call this function, is AAUDIO_SHARING_MODE_SHARED.
+ *
* The requested sharing mode may not be available.
- * So the application should query for the actual mode after the stream is opened.
+ * The application can query for the actual mode after the stream is opened.
*
* @param builder reference provided by AAudio_createStreamBuilder()
- * @param sharingMode AAUDIO_SHARING_MODE_LEGACY or AAUDIO_SHARING_MODE_EXCLUSIVE
+ * @param sharingMode AAUDIO_SHARING_MODE_SHARED or AAUDIO_SHARING_MODE_EXCLUSIVE
*/
AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
aaudio_sharing_mode_t sharingMode);
/**
- * Request the direction for a stream. The default is AAUDIO_DIRECTION_OUTPUT.
+ * Request the direction for a stream.
+ *
+ * The default, if you do not call this function, is AAUDIO_DIRECTION_OUTPUT.
*
* @param builder reference provided by AAudio_createStreamBuilder()
* @param direction AAUDIO_DIRECTION_OUTPUT or AAUDIO_DIRECTION_INPUT
@@ -143,16 +172,162 @@
aaudio_direction_t direction);
/**
- * Set the requested maximum buffer capacity in frames.
+ * Set the requested buffer capacity in frames.
* The final AAudioStream capacity may differ, but will probably be at least this big.
*
- * Default is AAUDIO_UNSPECIFIED.
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
*
* @param builder reference provided by AAudio_createStreamBuilder()
- * @param frames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
+ * @param numFrames the desired buffer capacity in frames or AAUDIO_UNSPECIFIED
*/
AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
- int32_t frames);
+ int32_t numFrames);
+/**
+ * Return one of these values from the data callback function.
+ */
+enum {
+
+ /**
+ * Continue calling the callback.
+ */
+ AAUDIO_CALLBACK_RESULT_CONTINUE = 0,
+
+ /**
+ * Stop calling the callback.
+ *
+ * The application will still need to call AAudioStream_requestPause()
+ * or AAudioStream_requestStop().
+ */
+ AAUDIO_CALLBACK_RESULT_STOP,
+
+};
+typedef int32_t aaudio_data_callback_result_t;
+
+/**
+ * Prototype for the data function that is passed to AAudioStreamBuilder_setDataCallback().
+ *
+ * For an output stream, this function should render and write numFrames of data
+ * in the streams current data format to the audioData buffer.
+ *
+ * For an input stream, this function should read and process numFrames of data
+ * from the audioData buffer.
+ *
+ * Note that this callback function should be considered a "real-time" function.
+ * It must not do anything that could cause an unbounded delay because that can cause the
+ * audio to glitch or pop.
+ *
+ * These are things the function should NOT do:
+ * <ul>
+ * <li>allocate memory using, for example, malloc() or new</li>
+ * <li>any file operations such as opening, closing, reading or writing</li>
+ * <li>any network operations such as streaming</li>
+ * <li>use any mutexes or other synchronization primitives</li>
+ * <li>sleep</li>
+ * </ul>
+ *
+ * If you need to move data, eg. MIDI commands, in or out of the callback function then
+ * we recommend the use of non-blocking techniques such as an atomic FIFO.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @param userData the same address that was passed to AAudioStreamBuilder_setCallback()
+ * @param audioData a pointer to the audio data
+ * @param numFrames the number of frames to be processed
+ * @return AAUDIO_CALLBACK_RESULT_*
+ */
+typedef aaudio_data_callback_result_t (*AAudioStream_dataCallback)(
+ AAudioStream *stream,
+ void *userData,
+ void *audioData,
+ int32_t numFrames);
+
+/**
+ * Request that AAudio call this functions when the stream is running.
+ *
+ * Note that when using this callback, the audio data will be passed in or out
+ * of the function as an argument.
+ * So you cannot call AAudioStream_write() or AAudioStream_read() on the same stream
+ * that has an active data callback.
+ *
+ * The callback function will start being called after AAudioStream_requestStart() is called.
+ * It will stop being called after AAudioStream_requestPause() or
+ * AAudioStream_requestStop() is called.
+ *
+ * This callback function will be called on a real-time thread owned by AAudio. See
+ * {@link aaudio_data_callback_proc_t} for more information.
+ *
+ * Note that the AAudio callbacks will never be called simultaneously from multiple threads.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param callback pointer to a function that will process audio data.
+ * @param userData pointer to an application data structure that will be passed
+ * to the callback functions.
+ */
+AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* builder,
+ AAudioStream_dataCallback callback,
+ void *userData);
+
+/**
+ * Set the requested data callback buffer size in frames.
+ * See {@link AAudioStream_dataCallback}.
+ *
+ * The default, if you do not call this function, is AAUDIO_UNSPECIFIED.
+ *
+ * For the lowest possible latency, do not call this function. AAudio will then
+ * call the dataProc callback function with whatever size is optimal.
+ * That size may vary from one callback to another.
+ *
+ * Only use this function if the application requires a specific number of frames for processing.
+ * The application might, for example, be using an FFT that requires
+ * a specific power-of-two sized buffer.
+ *
+ * AAudio may need to add additional buffering in order to adapt between the internal
+ * buffer size and the requested buffer size.
+ *
+ * If you do call this function then the requested size should be less than
+ * half the buffer capacity, to allow double buffering.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param numFrames the desired buffer size in frames or AAUDIO_UNSPECIFIED
+ */
+AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* builder,
+ int32_t numFrames);
+
+/**
+ * Prototype for the callback function that is passed to
+ * AAudioStreamBuilder_setErrorCallback().
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @param userData the same address that was passed to AAudioStreamBuilder_setErrorCallback()
+ * @param error an AAUDIO_ERROR_* value.
+ */
+typedef void (*AAudioStream_errorCallback)(
+ AAudioStream *stream,
+ void *userData,
+ aaudio_result_t error);
+
+/**
+ * Request that AAudio call this functions if any error occurs on a callback thread.
+ *
+ * It will be called, for example, if a headset or a USB device is unplugged causing the stream's
+ * device to be unavailable.
+ * In response, this function could signal or launch another thread to reopen a
+ * stream on another device. Do not reopen the stream in this callback.
+ *
+ * This will not be called because of actions by the application, such as stopping
+ * or closing a stream.
+ *
+ * Another possible cause of error would be a timeout or an unanticipated internal error.
+ *
+ * Note that the AAudio callbacks will never be called simultaneously from multiple threads.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param callback pointer to a function that will be called if an error occurs.
+ * @param userData pointer to an application data structure that will be passed
+ * to the callback functions.
+ */
+AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
+ AAudioStream_errorCallback callback,
+ void *userData);
/**
* Open a stream based on the options in the StreamBuilder.
@@ -324,9 +499,14 @@
// High priority audio threads
// ============================================================
+/**
+ * @deprecated Use AudioStreamBuilder_setCallback()
+ */
typedef void *(*aaudio_audio_thread_proc_t)(void *);
/**
+ * @deprecated Use AudioStreamBuilder_setCallback()
+ *
* Create a thread associated with a stream. The thread has special properties for
* low latency audio performance. This thread can be used to implement a callback API.
*
@@ -351,6 +531,8 @@
void *arg);
/**
+ * @deprecated Use AudioStreamBuilder_setCallback()
+ *
* Wait until the thread exits or an error occurs.
*
* @param stream A stream created using AAudioStreamBuilder_openStream().
@@ -379,11 +561,11 @@
* Call AAudioStream_getBufferSizeInFrames() to see what the actual final size is.
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
- * @param requestedFrames requested number of frames that can be filled without blocking
+ * @param numFrames requested number of frames that can be filled without blocking
* @return actual buffer size in frames or a negative error
*/
AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream* stream,
- int32_t requestedFrames);
+ int32_t numFrames);
/**
* Query the maximum number of frames that can be filled without blocking.
@@ -412,11 +594,32 @@
* Query maximum buffer capacity in frames.
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
- * @return the buffer capacity in frames
+ * @return buffer capacity in frames
*/
AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream* stream);
/**
+ * Query the size of the buffer that will be passed to the dataProc callback
+ * in the numFrames parameter.
+ *
+ * This call can be used if the application needs to know the value of numFrames before
+ * the stream is started. This is not normally necessary.
+ *
+ * If a specific size was requested by calling AAudioStreamBuilder_setCallbackSizeInFrames()
+ * then this will be the same size.
+ *
+ * If AAudioStreamBuilder_setCallbackSizeInFrames() was not called then this will
+ * return the size chosen by AAudio, or AAUDIO_UNSPECIFIED.
+ *
+ * AAUDIO_UNSPECIFIED indicates that the callback buffer size for this stream
+ * may vary from one dataProc callback to the next.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return callback buffer size in frames or AAUDIO_UNSPECIFIED
+ */
+AAUDIO_API int32_t AAudioStream_getFramesPerDataCallback(AAudioStream* stream);
+
+/**
* An XRun is an Underrun or an Overrun.
* During playing, an underrun will occur if the stream is not written in time
* and the system runs out of valid data.
@@ -525,3 +728,5 @@
#endif
#endif //AAUDIO_AAUDIO_H
+
+/** @} */
diff --git a/media/libaaudio/include/aaudio/AAudioDefinitions.h b/media/libaaudio/include/aaudio/AAudioDefinitions.h
index 846318c..fbd284c 100644
--- a/media/libaaudio/include/aaudio/AAudioDefinitions.h
+++ b/media/libaaudio/include/aaudio/AAudioDefinitions.h
@@ -14,6 +14,15 @@
* limitations under the License.
*/
+/**
+ * @addtogroup Audio
+ * @{
+ */
+
+/**
+ * @file AAudioDefinitions.h
+ */
+
#ifndef AAUDIO_AAUDIODEFINITIONS_H
#define AAUDIO_AAUDIODEFINITIONS_H
@@ -117,3 +126,5 @@
#endif
#endif // AAUDIO_AAUDIODEFINITIONS_H
+
+/** @} */
diff --git a/media/libaaudio/libaaudio.map.txt b/media/libaaudio/libaaudio.map.txt
index a9e9109..f22fdfe 100644
--- a/media/libaaudio/libaaudio.map.txt
+++ b/media/libaaudio/libaaudio.map.txt
@@ -4,6 +4,9 @@
AAudio_convertStreamStateToText;
AAudio_createStreamBuilder;
AAudioStreamBuilder_setDeviceId;
+ AAudioStreamBuilder_setDataCallback;
+ AAudioStreamBuilder_setErrorCallback;
+ AAudioStreamBuilder_setFramesPerDataCallback;
AAudioStreamBuilder_setSampleRate;
AAudioStreamBuilder_setSamplesPerFrame;
AAudioStreamBuilder_setFormat;
@@ -25,6 +28,7 @@
AAudioStream_joinThread;
AAudioStream_setBufferSizeInFrames;
AAudioStream_getBufferSizeInFrames;
+ AAudioStream_getFramesPerDataCallback;
AAudioStream_getFramesPerBurst;
AAudioStream_getBufferCapacityInFrames;
AAudioStream_getXRunCount;
diff --git a/media/libaaudio/src/Android.mk b/media/libaaudio/src/Android.mk
index a016b49..1ee73bf 100644
--- a/media/libaaudio/src/Android.mk
+++ b/media/libaaudio/src/Android.mk
@@ -30,10 +30,14 @@
core/AudioStream.cpp \
core/AudioStreamBuilder.cpp \
core/AAudioAudio.cpp \
+ legacy/AudioStreamLegacy.cpp \
legacy/AudioStreamRecord.cpp \
legacy/AudioStreamTrack.cpp \
utility/HandleTracker.cpp \
utility/AAudioUtilities.cpp \
+ utility/FixedBlockAdapter.cpp \
+ utility/FixedBlockReader.cpp \
+ utility/FixedBlockWriter.cpp \
fifo/FifoBuffer.cpp \
fifo/FifoControllerBase.cpp \
client/AudioEndpoint.cpp \
@@ -79,10 +83,14 @@
LOCAL_SRC_FILES = core/AudioStream.cpp \
core/AudioStreamBuilder.cpp \
core/AAudioAudio.cpp \
+ legacy/AudioStreamLegacy.cpp \
legacy/AudioStreamRecord.cpp \
legacy/AudioStreamTrack.cpp \
utility/HandleTracker.cpp \
utility/AAudioUtilities.cpp \
+ utility/FixedBlockAdapter.cpp \
+ utility/FixedBlockReader.cpp \
+ utility/FixedBlockWriter.cpp \
fifo/FifoBuffer.cpp \
fifo/FifoControllerBase.cpp \
client/AudioEndpoint.cpp \
diff --git a/media/libaaudio/src/client/AudioEndpoint.cpp b/media/libaaudio/src/client/AudioEndpoint.cpp
index 47c4774..90c619c 100644
--- a/media/libaaudio/src/client/AudioEndpoint.cpp
+++ b/media/libaaudio/src/client/AudioEndpoint.cpp
@@ -19,7 +19,7 @@
#include <utils/Log.h>
#include <cassert>
-#include <aaudio/AAudioDefinitions.h>
+#include <aaudio/AAudio.h>
#include "AudioEndpointParcelable.h"
#include "AudioEndpoint.h"
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 54f4870..1f9ce4f 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -18,23 +18,19 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
-#include <stdint.h>
#include <assert.h>
#include <binder/IServiceManager.h>
#include <utils/Mutex.h>
#include <aaudio/AAudio.h>
+#include <utils/String16.h>
-#include "AudioClock.h"
-#include "AudioEndpointParcelable.h"
-#include "binding/AAudioStreamRequest.h"
-#include "binding/AAudioStreamConfiguration.h"
-#include "binding/IAAudioService.h"
+#include "utility/AudioClock.h"
+#include "AudioStreamInternal.h"
#include "binding/AAudioServiceMessage.h"
#include "core/AudioStreamBuilder.h"
-#include "AudioStreamInternal.h"
#define LOG_TIMESTAMPS 0
@@ -51,6 +47,11 @@
#define AAUDIO_SERVICE_NAME "AAudioService"
+#define MIN_TIMEOUT_NANOS (1000 * AAUDIO_NANOS_PER_MILLISECOND)
+
+// Wait at least this many times longer than the operation should take.
+#define MIN_TIMEOUT_OPERATIONS 4
+
// Helper function to get access to the "AAudioService" service.
// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
static const sp<IAAudioService> getAAudioService() {
@@ -151,6 +152,29 @@
mClockModel.setSampleRate(getSampleRate());
mClockModel.setFramesPerBurst(mFramesPerBurst);
+ if (getDataCallbackProc()) {
+ mCallbackFrames = builder.getFramesPerDataCallback();
+ if (mCallbackFrames > getBufferCapacity() / 2) {
+ ALOGE("AudioStreamInternal.open(): framesPerCallback too large");
+ service->closeStream(mServiceStreamHandle);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+
+ } else if (mCallbackFrames < 0) {
+ ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
+ service->closeStream(mServiceStreamHandle);
+ return AAUDIO_ERROR_OUT_OF_RANGE;
+
+ }
+ if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
+ mCallbackFrames = mFramesPerBurst;
+ }
+
+ int32_t bytesPerFrame = getSamplesPerFrame()
+ * AAudioConvert_formatToSizeInBytes(getFormat());
+ int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
+ mCallbackBuffer = new uint8_t[callbackBufferSize];
+ }
+
setState(AAUDIO_STREAM_STATE_OPEN);
}
return result;
@@ -164,12 +188,69 @@
const sp<IAAudioService>& aaudioService = getAAudioService();
if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
aaudioService->closeStream(serviceStreamHandle);
+ delete[] mCallbackBuffer;
return AAUDIO_OK;
} else {
return AAUDIO_ERROR_INVALID_HANDLE;
}
}
+// Render audio in the application callback and then write the data to the stream.
+void *AudioStreamInternal::callbackLoop() {
+ aaudio_result_t result = AAUDIO_OK;
+ aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
+ int32_t framesWritten = 0;
+ AAudioStream_dataCallback appCallback = getDataCallbackProc();
+ if (appCallback == nullptr) return NULL;
+
+ while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) { // result might be a frame count
+ // Call application using the AAudio callback interface.
+ callbackResult = (*appCallback)(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ mCallbackBuffer,
+ mCallbackFrames);
+
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+ // Write audio data to stream
+ int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
+ result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
+ if (result == AAUDIO_ERROR_DISCONNECTED) {
+ if (getErrorCallbackProc() != nullptr) {
+ ALOGD("AudioStreamAAudio(): callbackLoop() stream disconnected");
+ (*getErrorCallbackProc())(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ AAUDIO_OK);
+ }
+ break;
+ } else if (result != mCallbackFrames) {
+ ALOGE("AudioStreamAAudio(): callbackLoop() wrote %d / %d",
+ framesWritten, mCallbackFrames);
+ break;
+ }
+ } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
+ ALOGD("AudioStreamAAudio(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
+ break;
+ }
+ }
+
+ ALOGD("AudioStreamAAudio(): callbackLoop() exiting, result = %d, isPlaying() = %d",
+ result, (int) isPlaying());
+ return NULL; // TODO review
+}
+
+static void *aaudio_callback_thread_proc(void *context)
+{
+ AudioStreamInternal *stream = (AudioStreamInternal *)context;
+ //LOGD("AudioStreamAAudio(): oboe_callback_thread, stream = %p", stream);
+ if (stream != NULL) {
+ return stream->callbackLoop();
+ } else {
+ return NULL;
+ }
+}
+
aaudio_result_t AudioStreamInternal::requestStart()
{
int64_t startTime;
@@ -178,35 +259,81 @@
return AAUDIO_ERROR_INVALID_STATE;
}
const sp<IAAudioService>& aaudioService = getAAudioService();
- if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+ if (aaudioService == 0) {
+ return AAUDIO_ERROR_NO_SERVICE;
+ }
startTime = AudioClock::getNanoseconds();
mClockModel.start(startTime);
processTimestamp(0, startTime);
setState(AAUDIO_STREAM_STATE_STARTING);
- return aaudioService->startStream(mServiceStreamHandle);
+ aaudio_result_t result = aaudioService->startStream(mServiceStreamHandle);
+
+ if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
+ // Launch the callback loop thread.
+ int64_t periodNanos = mCallbackFrames
+ * AAUDIO_NANOS_PER_SECOND
+ / getSampleRate();
+ mCallbackEnabled.store(true);
+ result = createThread(periodNanos, aaudio_callback_thread_proc, this);
+ }
+ return result;
}
-aaudio_result_t AudioStreamInternal::requestPause()
+int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
+
+ // Wait for at least a second or some number of callbacks to join the thread.
+ int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
+ / getSampleRate();
+ if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
+ timeoutNanoseconds = MIN_TIMEOUT_NANOS;
+ }
+ return timeoutNanoseconds;
+}
+
+aaudio_result_t AudioStreamInternal::stopCallback()
+{
+ if (isDataCallbackActive()) {
+ mCallbackEnabled.store(false);
+ return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
+ } else {
+ return AAUDIO_OK;
+ }
+}
+
+aaudio_result_t AudioStreamInternal::requestPauseInternal()
{
ALOGD("AudioStreamInternal(): pause()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
return AAUDIO_ERROR_INVALID_STATE;
}
const sp<IAAudioService>& aaudioService = getAAudioService();
- if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
+ if (aaudioService == 0) {
+ return AAUDIO_ERROR_NO_SERVICE;
+ }
mClockModel.stop(AudioClock::getNanoseconds());
setState(AAUDIO_STREAM_STATE_PAUSING);
return aaudioService->pauseStream(mServiceStreamHandle);
}
+aaudio_result_t AudioStreamInternal::requestPause()
+{
+ aaudio_result_t result = stopCallback();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+ return requestPauseInternal();
+}
+
aaudio_result_t AudioStreamInternal::requestFlush() {
ALOGD("AudioStreamInternal(): flush()");
if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
return AAUDIO_ERROR_INVALID_STATE;
}
const sp<IAAudioService>& aaudioService = getAAudioService();
- if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
-setState(AAUDIO_STREAM_STATE_FLUSHING);
+ if (aaudioService == 0) {
+ return AAUDIO_ERROR_NO_SERVICE;
+ }
+ setState(AAUDIO_STREAM_STATE_FLUSHING);
return aaudioService->flushStream(mServiceStreamHandle);
}
@@ -260,18 +387,20 @@
return aaudioService->unregisterAudioThread(mServiceStreamHandle, gettid());
}
-// TODO use aaudio_clockid_t all the way down to AudioClock
aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
int64_t *framePosition,
int64_t *timeNanoseconds) {
-// TODO implement using real HAL
+ // TODO implement using real HAL
int64_t time = AudioClock::getNanoseconds();
*framePosition = mClockModel.convertTimeToPosition(time);
*timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
return AAUDIO_OK;
}
-aaudio_result_t AudioStreamInternal::updateState() {
+aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
+ if (isDataCallbackActive()) {
+ return AAUDIO_OK; // state is getting updated by the callback thread read/write call
+ }
return processCommands();
}
@@ -485,43 +614,6 @@
return framesWritten;
}
-aaudio_result_t AudioStreamInternal::waitForStateChange(aaudio_stream_state_t currentState,
- aaudio_stream_state_t *nextState,
- int64_t timeoutNanoseconds)
-
-{
- aaudio_result_t result = processCommands();
-// ALOGD("AudioStreamInternal::waitForStateChange() - processCommands() returned %d", result);
- if (result != AAUDIO_OK) {
- return result;
- }
- // TODO replace this polling with a timed sleep on a futex on the message queue
- int32_t durationNanos = 5 * AAUDIO_NANOS_PER_MILLISECOND;
- aaudio_stream_state_t state = getState();
-// ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
- while (state == currentState && timeoutNanoseconds > 0) {
- // TODO use futex from service message queue
- if (durationNanos > timeoutNanoseconds) {
- durationNanos = timeoutNanoseconds;
- }
- AudioClock::sleepForNanos(durationNanos);
- timeoutNanoseconds -= durationNanos;
-
- result = processCommands();
- if (result != AAUDIO_OK) {
- return result;
- }
-
- state = getState();
-// ALOGD("AudioStreamInternal::waitForStateChange() - state = %d", state);
- }
- if (nextState != nullptr) {
- *nextState = state;
- }
- return (state == currentState) ? AAUDIO_ERROR_TIMEOUT : AAUDIO_OK;
-}
-
-
void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
mClockModel.processTimestamp( position, time);
}
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 6f3a7ac..9a15a9b 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -53,7 +53,7 @@
int64_t *timeNanoseconds) override;
- virtual aaudio_result_t updateState() override;
+ virtual aaudio_result_t updateStateWhileWaiting() override;
// =========== End ABSTRACT methods ===========================
virtual aaudio_result_t open(const AudioStreamBuilder &builder) override;
@@ -64,10 +64,6 @@
int32_t numFrames,
int64_t timeoutNanoseconds) override;
- virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
- aaudio_stream_state_t *nextState,
- int64_t timeoutNanoseconds) override;
-
virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
virtual int32_t getBufferSize() const override;
@@ -86,10 +82,17 @@
virtual aaudio_result_t unregisterThread() override;
+ // Called internally from 'C'
+ void *callbackLoop();
+
protected:
aaudio_result_t processCommands();
+ aaudio_result_t requestPauseInternal();
+
+ aaudio_result_t stopCallback();
+
/**
* Low level write that will not block. It will just write as much as it can.
*
@@ -108,17 +111,22 @@
aaudio_result_t onTimestampFromServer(AAudioServiceMessage *message);
+ // Calculate timeout for an operation involving framesPerOperation.
+ int64_t calculateReasonableTimeout(int32_t framesPerOperation);
+
private:
IsochronousClockModel mClockModel;
AudioEndpoint mAudioEndpoint;
aaudio_handle_t mServiceStreamHandle;
EndpointDescriptor mEndpointDescriptor;
+ uint8_t *mCallbackBuffer = nullptr;
+ int32_t mCallbackFrames = 0;
+
// Offset from underlying frame position.
int64_t mFramesOffsetFromService = 0;
int64_t mLastFramesRead = 0;
int32_t mFramesPerBurst;
int32_t mXRunCount = 0;
-
void processTimestamp(uint64_t position, int64_t time);
};
diff --git a/media/libaaudio/src/client/IsochronousClockModel.cpp b/media/libaaudio/src/client/IsochronousClockModel.cpp
index 4c8aabc..c278c8b 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.cpp
+++ b/media/libaaudio/src/client/IsochronousClockModel.cpp
@@ -19,7 +19,6 @@
#include <utils/Log.h>
#include <stdint.h>
-#include <aaudio/AAudioDefinitions.h>
#include "utility/AudioClock.h"
#include "IsochronousClockModel.h"
diff --git a/media/libaaudio/src/client/IsochronousClockModel.h b/media/libaaudio/src/client/IsochronousClockModel.h
index 524c286..205c341 100644
--- a/media/libaaudio/src/client/IsochronousClockModel.h
+++ b/media/libaaudio/src/client/IsochronousClockModel.h
@@ -14,11 +14,10 @@
* limitations under the License.
*/
-#ifndef AAUDIO_ISOCHRONOUSCLOCKMODEL_H
-#define AAUDIO_ISOCHRONOUSCLOCKMODEL_H
+#ifndef AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
+#define AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
#include <stdint.h>
-#include <aaudio/AAudio.h>
namespace aaudio {
@@ -107,4 +106,4 @@
} /* namespace aaudio */
-#endif //AAUDIO_ISOCHRONOUSCLOCKMODEL_H
+#endif //AAUDIO_ISOCHRONOUS_CLOCK_MODEL_H
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index 52bad70..bc2f281 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -114,53 +114,79 @@
AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder* builder,
int32_t deviceId)
{
- AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
streamBuilder->setDeviceId(deviceId);
}
AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
int32_t sampleRate)
{
- AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
streamBuilder->setSampleRate(sampleRate);
}
AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
int32_t samplesPerFrame)
{
- AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
streamBuilder->setSamplesPerFrame(samplesPerFrame);
}
AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
aaudio_direction_t direction)
{
- AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
streamBuilder->setDirection(direction);
}
-
AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
aaudio_audio_format_t format)
{
- AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
streamBuilder->setFormat(format);
}
AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
aaudio_sharing_mode_t sharingMode)
{
- AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
streamBuilder->setSharingMode(sharingMode);
}
AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
int32_t frames)
{
- AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);;
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
streamBuilder->setBufferCapacity(frames);
}
+AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* builder,
+ AAudioStream_dataCallback callback,
+ void *userData)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ ALOGD("AAudioStreamBuilder_setCallback(): userData = %p", userData);
+ streamBuilder->setDataCallbackProc(callback);
+ streamBuilder->setDataCallbackUserData(userData);
+}
+AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
+ AAudioStream_errorCallback callback,
+ void *userData)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ ALOGD("AAudioStreamBuilder_setCallback(): userData = %p", userData);
+ streamBuilder->setErrorCallbackProc(callback);
+ streamBuilder->setErrorCallbackUserData(userData);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* builder,
+ int32_t frames)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ ALOGD("%s: frames = %d", __func__, frames);
+ streamBuilder->setFramesPerDataCallback(frames);
+}
+
static aaudio_result_t AAudioInternal_openStream(AudioStreamBuilder *streamBuilder,
AAudioStream** streamPtr)
{
@@ -276,6 +302,13 @@
if (buffer == nullptr) {
return AAUDIO_ERROR_NULL;
}
+
+ // Don't allow writes when playing with a callback.
+ if (audioStream->getDataCallbackProc() != nullptr && audioStream->isPlaying()) {
+ ALOGE("Cannot write to a callback stream when running.");
+ return AAUDIO_ERROR_INVALID_STATE;
+ }
+
if (numFrames < 0) {
return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
} else if (numFrames == 0) {
@@ -297,6 +330,9 @@
aaudio_audio_thread_proc_t threadProc, void *arg)
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ if (audioStream->getDataCallbackProc() != nullptr) {
+ return AAUDIO_ERROR_INCOMPATIBLE;
+ }
return audioStream->createThread(periodNanoseconds, threadProc, arg);
}
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index b054d94..68579fd 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -28,7 +28,9 @@
using namespace aaudio;
-AudioStream::AudioStream() {
+AudioStream::AudioStream()
+ : mCallbackEnabled(false)
+{
// mThread is a pthread_t of unknown size so we need memset.
memset(&mThread, 0, sizeof(mThread));
setPeriodNanoseconds(0);
@@ -36,13 +38,30 @@
aaudio_result_t AudioStream::open(const AudioStreamBuilder& builder)
{
- // TODO validate parameters.
+
// Copy parameters from the Builder because the Builder may be deleted after this call.
mSamplesPerFrame = builder.getSamplesPerFrame();
mSampleRate = builder.getSampleRate();
mDeviceId = builder.getDeviceId();
mFormat = builder.getFormat();
- mSharingMode = builder.getSharingMode();
+ mDirection = builder.getDirection();
+
+ // callbacks
+ mFramesPerDataCallback = builder.getFramesPerDataCallback();
+ mDataCallbackProc = builder.getDataCallbackProc();
+ mErrorCallbackProc = builder.getErrorCallbackProc();
+ mDataCallbackUserData = builder.getDataCallbackUserData();
+
+ // TODO validate more parameters.
+ if (mErrorCallbackProc != nullptr && mDataCallbackProc == nullptr) {
+ ALOGE("AudioStream::open(): disconnect callback cannot be used without a data callback.");
+ return AAUDIO_ERROR_UNEXPECTED_VALUE;
+ }
+ if (mDirection != AAUDIO_DIRECTION_INPUT && mDirection != AAUDIO_DIRECTION_OUTPUT) {
+ ALOGE("AudioStream::open(): illegal direction %d", mDirection);
+ return AAUDIO_ERROR_UNEXPECTED_VALUE;
+ }
+
return AAUDIO_OK;
}
@@ -75,8 +94,13 @@
aaudio_stream_state_t *nextState,
int64_t timeoutNanoseconds)
{
+ aaudio_result_t result = updateStateWhileWaiting();
+ if (result != AAUDIO_OK) {
+ return result;
+ }
+
// TODO replace this when similar functionality added to AudioTrack.cpp
- int64_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND;
+ int64_t durationNanos = 20 * AAUDIO_NANOS_PER_MILLISECOND; // arbitrary
aaudio_stream_state_t state = getState();
while (state == currentState && timeoutNanoseconds > 0) {
if (durationNanos > timeoutNanoseconds) {
@@ -85,7 +109,7 @@
AudioClock::sleepForNanos(durationNanos);
timeoutNanoseconds -= durationNanos;
- aaudio_result_t result = updateState();
+ aaudio_result_t result = updateStateWhileWaiting();
if (result != AAUDIO_OK) {
return result;
}
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index af0593d..1485d20 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -18,8 +18,8 @@
#define AAUDIO_AUDIOSTREAM_H
#include <atomic>
+#include <mutex>
#include <stdint.h>
-#include <aaudio/AAudioDefinitions.h>
#include <aaudio/AAudio.h>
#include "AAudioUtilities.h"
@@ -50,20 +50,23 @@
virtual aaudio_result_t requestFlush() = 0;
virtual aaudio_result_t requestStop() = 0;
- // TODO use aaudio_clockid_t all the way down to AudioClock
virtual aaudio_result_t getTimestamp(clockid_t clockId,
int64_t *framePosition,
int64_t *timeNanoseconds) = 0;
- virtual aaudio_result_t updateState() = 0;
+ /**
+ * Update state while in the middle of waitForStateChange()
+ * @return
+ */
+ virtual aaudio_result_t updateStateWhileWaiting() = 0;
// =========== End ABSTRACT methods ===========================
virtual aaudio_result_t waitForStateChange(aaudio_stream_state_t currentState,
- aaudio_stream_state_t *nextState,
- int64_t timeoutNanoseconds);
+ aaudio_stream_state_t *nextState,
+ int64_t timeoutNanoseconds);
/**
* Open the stream using the parameters in the builder.
@@ -153,10 +156,16 @@
return mDirection;
}
+ /**
+ * This is only valid after setSamplesPerFrame() and setFormat() have been called.
+ */
int32_t getBytesPerFrame() const {
return mSamplesPerFrame * getBytesPerSample();
}
+ /**
+ * This is only valid after setFormat() has been called.
+ */
int32_t getBytesPerSample() const {
return AAudioConvert_formatToSizeInBytes(mFormat);
}
@@ -169,6 +178,27 @@
return mFramesRead.get();
}
+ AAudioStream_dataCallback getDataCallbackProc() const {
+ return mDataCallbackProc;
+ }
+ AAudioStream_errorCallback getErrorCallbackProc() const {
+ return mErrorCallbackProc;
+ }
+
+ void *getDataCallbackUserData() const {
+ return mDataCallbackUserData;
+ }
+ void *getErrorCallbackUserData() const {
+ return mErrorCallbackUserData;
+ }
+
+ int32_t getFramesPerDataCallback() const {
+ return mFramesPerDataCallback;
+ }
+
+ bool isDataCallbackActive() {
+ return (mDataCallbackProc != nullptr) && isPlaying();
+ }
// ============== I/O ===========================
// A Stream will only implement read() or write() depending on its direction.
@@ -236,6 +266,9 @@
mState = state;
}
+ std::mutex mStreamMutex;
+
+ std::atomic<bool> mCallbackEnabled;
protected:
@@ -260,6 +293,15 @@
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
+ // callback ----------------------------------
+
+ AAudioStream_dataCallback mDataCallbackProc = nullptr; // external callback functions
+ void *mDataCallbackUserData = nullptr;
+ int32_t mFramesPerDataCallback = AAUDIO_UNSPECIFIED; // frames
+
+ AAudioStream_errorCallback mErrorCallbackProc = nullptr;
+ void *mErrorCallbackUserData = nullptr;
+
// background thread ----------------------------------
bool mHasThread = false;
pthread_t mThread; // initialized in constructor
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index 5a54e62..858ae80 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -44,6 +44,7 @@
aaudio_result_t AudioStreamBuilder::build(AudioStream** streamPtr) {
AudioStream* audioStream = nullptr;
const aaudio_sharing_mode_t sharingMode = getSharingMode();
+ ALOGE("AudioStreamBuilder.build() sharingMode = %d", sharingMode);
switch (getDirection()) {
case AAUDIO_DIRECTION_INPUT:
switch (sharingMode) {
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.h b/media/libaaudio/src/core/AudioStreamBuilder.h
index 7b5f35c..93ca7f5 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.h
+++ b/media/libaaudio/src/core/AudioStreamBuilder.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef AAUDIO_AUDIOSTREAMBUILDER_H
-#define AAUDIO_AUDIOSTREAMBUILDER_H
+#ifndef AAUDIO_AUDIO_STREAM_BUILDER_H
+#define AAUDIO_AUDIO_STREAM_BUILDER_H
#include <stdint.h>
@@ -101,6 +101,52 @@
return this;
}
+ AAudioStream_dataCallback getDataCallbackProc() const {
+ return mDataCallbackProc;
+ }
+
+ AudioStreamBuilder* setDataCallbackProc(AAudioStream_dataCallback proc) {
+ mDataCallbackProc = proc;
+ return this;
+ }
+
+
+ void *getDataCallbackUserData() const {
+ return mDataCallbackUserData;
+ }
+
+ AudioStreamBuilder* setDataCallbackUserData(void *userData) {
+ mDataCallbackUserData = userData;
+ return this;
+ }
+
+ AAudioStream_errorCallback getErrorCallbackProc() const {
+ return mErrorCallbackProc;
+ }
+
+ AudioStreamBuilder* setErrorCallbackProc(AAudioStream_errorCallback proc) {
+ mErrorCallbackProc = proc;
+ return this;
+ }
+
+ AudioStreamBuilder* setErrorCallbackUserData(void *userData) {
+ mErrorCallbackUserData = userData;
+ return this;
+ }
+
+ void *getErrorCallbackUserData() const {
+ return mErrorCallbackUserData;
+ }
+
+ int32_t getFramesPerDataCallback() const {
+ return mFramesPerDataCallback;
+ }
+
+ AudioStreamBuilder* setFramesPerDataCallback(int32_t sizeInFrames) {
+ mFramesPerDataCallback = sizeInFrames;
+ return this;
+ }
+
aaudio_result_t build(AudioStream **streamPtr);
private:
@@ -111,8 +157,15 @@
aaudio_audio_format_t mFormat = AAUDIO_FORMAT_UNSPECIFIED;
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
int32_t mBufferCapacity = AAUDIO_UNSPECIFIED;
+
+ AAudioStream_dataCallback mDataCallbackProc = nullptr; // external callback functions
+ void *mDataCallbackUserData = nullptr;
+ int32_t mFramesPerDataCallback = AAUDIO_UNSPECIFIED; // frames
+
+ AAudioStream_errorCallback mErrorCallbackProc = nullptr;
+ void *mErrorCallbackUserData = nullptr;
};
} /* namespace aaudio */
-#endif /* AAUDIO_AUDIOSTREAMBUILDER_H */
+#endif //AAUDIO_AUDIO_STREAM_BUILDER_H
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
new file mode 100644
index 0000000..baa24c9
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioStreamLegacy"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <stdint.h>
+#include <utils/String16.h>
+#include <media/AudioTrack.h>
+#include <aaudio/AAudio.h>
+
+#include "core/AudioStream.h"
+#include "legacy/AudioStreamLegacy.h"
+
+using namespace android;
+using namespace aaudio;
+
+AudioStreamLegacy::AudioStreamLegacy()
+ : AudioStream() {
+}
+
+AudioStreamLegacy::~AudioStreamLegacy() {
+}
+
+// Called from AudioTrack.cpp or AudioRecord.cpp
+static void AudioStreamLegacy_callback(int event, void* userData, void *info) {
+ AudioStreamLegacy *streamLegacy = (AudioStreamLegacy *) userData;
+ streamLegacy->processCallback(event, info);
+}
+
+aaudio_legacy_callback_t AudioStreamLegacy::getLegacyCallback() {
+ return AudioStreamLegacy_callback;
+}
+
+// Implement FixedBlockProcessor
+int32_t AudioStreamLegacy::onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) {
+ int32_t frameCount = numBytes / getBytesPerFrame();
+ // Call using the AAudio callback interface.
+ AAudioStream_dataCallback appCallback = getDataCallbackProc();
+ return (*appCallback)(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ buffer,
+ frameCount);
+}
+
+void AudioStreamLegacy::processCallbackCommon(aaudio_callback_operation_t opcode, void *info) {
+ aaudio_data_callback_result_t callbackResult;
+ switch (opcode) {
+ case AAUDIO_CALLBACK_OPERATION_PROCESS_DATA: {
+ // Note that this code assumes an AudioTrack::Buffer is the same as AudioRecord::Buffer
+ // TODO define our own AudioBuffer and pass it from the subclasses.
+ AudioTrack::Buffer *audioBuffer = static_cast<AudioTrack::Buffer *>(info);
+ if (audioBuffer->frameCount == 0) return;
+
+ // If the caller specified an exact size then use a block size adapter.
+ if (mBlockAdapter != nullptr) {
+ int32_t byteCount = audioBuffer->frameCount * getBytesPerFrame();
+ callbackResult = mBlockAdapter->processVariableBlock((uint8_t *) audioBuffer->raw,
+ byteCount);
+ } else {
+ // Call using the AAudio callback interface.
+ callbackResult = (*getDataCallbackProc())(
+ (AAudioStream *) this,
+ getDataCallbackUserData(),
+ audioBuffer->raw,
+ audioBuffer->frameCount
+ );
+ }
+ if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
+ audioBuffer->size = audioBuffer->frameCount * getBytesPerFrame();
+ } else {
+ audioBuffer->size = 0;
+ }
+ }
+ break;
+
+ // Stream got rerouted so we disconnect.
+ case AAUDIO_CALLBACK_OPERATION_DISCONNECTED: {
+ ALOGD("AudioStreamAAudio(): callbackLoop() stream disconnected");
+ if (getErrorCallbackProc() != nullptr) {
+ (*getErrorCallbackProc())(
+ (AAudioStream *) this,
+ getErrorCallbackUserData(),
+ AAUDIO_OK
+ );
+ }
+ mCallbackEnabled.store(false);
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.h b/media/libaaudio/src/legacy/AudioStreamLegacy.h
new file mode 100644
index 0000000..c109ee7
--- /dev/null
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef LEGACY_AUDIO_STREAM_LEGACY_H
+#define LEGACY_AUDIO_STREAM_LEGACY_H
+
+
+#include <aaudio/AAudio.h>
+
+#include "AudioStream.h"
+#include "AAudioLegacy.h"
+#include "utility/FixedBlockAdapter.h"
+
+namespace aaudio {
+
+
+typedef void (*aaudio_legacy_callback_t)(int event, void* user, void *info);
+
+enum {
+ /**
+ * Request that the callback function should fill the data buffer of an output stream,
+ * or process the data of an input stream.
+ * The address parameter passed to the callback function will point to a data buffer.
+ * For an input stream, the data is read-only.
+ * The value1 parameter will be the number of frames.
+ * The value2 parameter is reserved and will be set to zero.
+ * The callback should return AAUDIO_CALLBACK_RESULT_CONTINUE or AAUDIO_CALLBACK_RESULT_STOP.
+ */
+ AAUDIO_CALLBACK_OPERATION_PROCESS_DATA,
+
+ /**
+ * Inform the callback function that the stream was disconnected.
+ * The address parameter passed to the callback function will be NULL.
+ * The value1 will be an error code or AAUDIO_OK.
+ * The value2 parameter is reserved and will be set to zero.
+ * The callback return value will be ignored.
+ */
+ AAUDIO_CALLBACK_OPERATION_DISCONNECTED,
+};
+typedef int32_t aaudio_callback_operation_t;
+
+
+class AudioStreamLegacy : public AudioStream, public FixedBlockProcessor {
+public:
+ AudioStreamLegacy();
+
+ virtual ~AudioStreamLegacy();
+
+ aaudio_legacy_callback_t getLegacyCallback();
+
+ // This is public so it can be called from the C callback function.
+ // This is called from the AudioTrack/AudioRecord client.
+ virtual void processCallback(int event, void *info) = 0;
+
+ void processCallbackCommon(aaudio_callback_operation_t opcode, void *info);
+
+ // Implement FixedBlockProcessor
+ int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override;
+
+protected:
+ FixedBlockAdapter *mBlockAdapter = nullptr;
+ aaudio_wrapping_frames_t mPositionWhenStarting = 0;
+ int32_t mCallbackBufferSize = 0;
+};
+
+} /* namespace aaudio */
+
+#endif //LEGACY_AUDIO_STREAM_LEGACY_H
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index dd040a0..b608434 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -24,13 +24,16 @@
#include <aaudio/AAudio.h>
#include "AudioClock.h"
-#include "AudioStreamRecord.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "legacy/AudioStreamRecord.h"
+#include "utility/FixedBlockWriter.h"
using namespace android;
using namespace aaudio;
AudioStreamRecord::AudioStreamRecord()
- : AudioStream()
+ : AudioStreamLegacy()
+ , mFixedBlockWriter(*this)
{
}
@@ -57,7 +60,6 @@
? 2 : getSamplesPerFrame();
audio_channel_mask_t channelMask = audio_channel_in_mask_from_count(samplesPerFrame);
- AudioRecord::callback_t callback = nullptr;
audio_input_flags_t flags = (audio_input_flags_t) AUDIO_INPUT_FLAG_NONE;
size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
@@ -67,6 +69,17 @@
? AUDIO_FORMAT_PCM_FLOAT
: AAudioConvert_aaudioToAndroidDataFormat(getFormat());
+ // Setup the callback if there is one.
+ AudioRecord::callback_t callback = nullptr;
+ void *callbackData = nullptr;
+ AudioRecord::transfer_type streamTransferType = AudioRecord::transfer_type::TRANSFER_SYNC;
+ if (builder.getDataCallbackProc() != nullptr) {
+ streamTransferType = AudioRecord::transfer_type::TRANSFER_CALLBACK;
+ callback = getLegacyCallback();
+ callbackData = this;
+ }
+ mCallbackBufferSize = builder.getFramesPerDataCallback();
+
mAudioRecord = new AudioRecord(
AUDIO_SOURCE_DEFAULT,
getSampleRate(),
@@ -75,10 +88,10 @@
mOpPackageName, // const String16& opPackageName TODO does not compile
frameCount,
callback,
- nullptr, // void* user = nullptr,
+ callbackData,
0, // uint32_t notificationFrames = 0,
AUDIO_SESSION_ALLOCATE,
- AudioRecord::TRANSFER_DEFAULT,
+ streamTransferType,
flags
// int uid = -1,
// pid_t pid = -1,
@@ -98,6 +111,15 @@
setSamplesPerFrame(mAudioRecord->channelCount());
setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioRecord->format()));
+ // We may need to pass the data through a block size adapter to guarantee constant size.
+ if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
+ int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
+ mFixedBlockWriter.open(callbackSizeBytes);
+ mBlockAdapter = &mFixedBlockWriter;
+ } else {
+ mBlockAdapter = nullptr;
+ }
+
setState(AAUDIO_STREAM_STATE_OPEN);
return AAUDIO_OK;
@@ -110,9 +132,29 @@
mAudioRecord.clear();
setState(AAUDIO_STREAM_STATE_CLOSED);
}
+ mFixedBlockWriter.close();
return AAUDIO_OK;
}
+void AudioStreamRecord::processCallback(int event, void *info) {
+
+ ALOGD("AudioStreamRecord::processCallback(), event %d", event);
+ switch (event) {
+ case AudioRecord::EVENT_MORE_DATA:
+ processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+ break;
+
+ // Stream got rerouted so we disconnect.
+ case AudioRecord::EVENT_NEW_IAUDIORECORD:
+ processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+ break;
+
+ default:
+ break;
+ }
+ return;
+}
+
aaudio_result_t AudioStreamRecord::requestStart()
{
if (mAudioRecord.get() == nullptr) {
@@ -123,6 +165,7 @@
if (err != OK) {
return AAudioConvert_androidToAAudioResult(err);
}
+
err = mAudioRecord->start();
if (err != OK) {
return AAudioConvert_androidToAAudioResult(err);
@@ -150,7 +193,7 @@
return AAUDIO_OK;
}
-aaudio_result_t AudioStreamRecord::updateState()
+aaudio_result_t AudioStreamRecord::updateStateWhileWaiting()
{
aaudio_result_t result = AAUDIO_OK;
aaudio_wrapping_frames_t position;
@@ -224,5 +267,28 @@
return 192; // TODO add query to AudioRecord.cpp
}
-// TODO implement getTimestamp
-
+aaudio_result_t AudioStreamRecord::getTimestamp(clockid_t clockId,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds) {
+ ExtendedTimestamp extendedTimestamp;
+ status_t status = mAudioRecord->getTimestamp(&extendedTimestamp);
+ if (status != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(status);
+ }
+ // TODO Merge common code into AudioStreamLegacy after rebasing.
+ int timebase;
+ switch(clockId) {
+ case CLOCK_BOOTTIME:
+ timebase = ExtendedTimestamp::TIMEBASE_BOOTTIME;
+ break;
+ case CLOCK_MONOTONIC:
+ timebase = ExtendedTimestamp::TIMEBASE_MONOTONIC;
+ break;
+ default:
+ ALOGE("getTimestamp() - Unrecognized clock type %d", (int) clockId);
+ return AAUDIO_ERROR_UNEXPECTED_VALUE;
+ break;
+ }
+ status = extendedTimestamp.getBestTimestamp(framePosition, timeNanoseconds, timebase);
+ return AAudioConvert_androidToAAudioResult(status);
+}
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index c8d389b..897a5b3 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -23,53 +23,58 @@
#include "AudioStreamBuilder.h"
#include "AudioStream.h"
#include "AAudioLegacy.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "utility/FixedBlockWriter.h"
namespace aaudio {
/**
* Internal stream that uses the legacy AudioTrack path.
*/
-class AudioStreamRecord : public AudioStream {
+class AudioStreamRecord : public AudioStreamLegacy {
public:
AudioStreamRecord();
virtual ~AudioStreamRecord();
- virtual aaudio_result_t open(const AudioStreamBuilder & builder) override;
- virtual aaudio_result_t close() override;
+ aaudio_result_t open(const AudioStreamBuilder & builder) override;
+ aaudio_result_t close() override;
- virtual aaudio_result_t requestStart() override;
- virtual aaudio_result_t requestPause() override;
- virtual aaudio_result_t requestFlush() override;
- virtual aaudio_result_t requestStop() override;
+ aaudio_result_t requestStart() override;
+ aaudio_result_t requestPause() override;
+ aaudio_result_t requestFlush() override;
+ aaudio_result_t requestStop() override;
virtual aaudio_result_t getTimestamp(clockid_t clockId,
- int64_t *framePosition,
- int64_t *timeNanoseconds) override {
- return AAUDIO_ERROR_UNIMPLEMENTED; // TODO
- }
+ int64_t *framePosition,
+ int64_t *timeNanoseconds) override;
- virtual aaudio_result_t read(void *buffer,
+ aaudio_result_t read(void *buffer,
int32_t numFrames,
int64_t timeoutNanoseconds) override;
- virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+ aaudio_result_t setBufferSize(int32_t requestedFrames) override;
- virtual int32_t getBufferSize() const override;
+ int32_t getBufferSize() const override;
- virtual int32_t getBufferCapacity() const override;
+ int32_t getBufferCapacity() const override;
- virtual int32_t getXRunCount() const override;
+ int32_t getXRunCount() const override;
- virtual int32_t getFramesPerBurst() const override;
+ int32_t getFramesPerBurst() const override;
- virtual aaudio_result_t updateState() override;
+ aaudio_result_t updateStateWhileWaiting() override;
+
+ // This is public so it can be called from the C callback function.
+ void processCallback(int event, void *info) override;
private:
android::sp<android::AudioRecord> mAudioRecord;
+ // adapts between variable sized blocks and fixed size blocks
+ FixedBlockWriter mFixedBlockWriter;
+
// TODO add 64-bit position reporting to AudioRecord and use it.
- aaudio_wrapping_frames_t mPositionWhenStarting = 0;
- android::String16 mOpPackageName;
+ android::String16 mOpPackageName;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index e0a04c3..3b79953 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -23,17 +23,22 @@
#include <aaudio/AAudio.h>
#include "AudioClock.h"
-#include "AudioStreamTrack.h"
-
+#include "legacy/AudioStreamLegacy.h"
+#include "legacy/AudioStreamTrack.h"
+#include "utility/FixedBlockReader.h"
using namespace android;
using namespace aaudio;
+// Arbitrary and somewhat generous number of bursts.
+#define DEFAULT_BURSTS_PER_BUFFER_CAPACITY 8
+
/*
* Create a stream that uses the AudioTrack.
*/
AudioStreamTrack::AudioStreamTrack()
- : AudioStream()
+ : AudioStreamLegacy()
+ , mFixedBlockReader(*this)
{
}
@@ -53,6 +58,8 @@
return result;
}
+ ALOGD("AudioStreamTrack::open = %p", this);
+
// Try to create an AudioTrack
// TODO Support UNSPECIFIED in AudioTrack. For now, use stereo if unspecified.
int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
@@ -61,16 +68,40 @@
ALOGD("AudioStreamTrack::open(), samplesPerFrame = %d, channelMask = 0x%08x",
samplesPerFrame, channelMask);
- AudioTrack::callback_t callback = nullptr;
// TODO add more performance options
audio_output_flags_t flags = (audio_output_flags_t) AUDIO_OUTPUT_FLAG_FAST;
- size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
- : builder.getBufferCapacity();
+
+ int32_t frameCount = builder.getBufferCapacity();
+ ALOGD("AudioStreamTrack::open(), requested buffer capacity %d", frameCount);
+
+ int32_t notificationFrames = 0;
+
// TODO implement an unspecified AudioTrack format then use that.
- audio_format_t format = (getFormat() == AAUDIO_UNSPECIFIED)
+ audio_format_t format = (getFormat() == AAUDIO_FORMAT_UNSPECIFIED)
? AUDIO_FORMAT_PCM_FLOAT
: AAudioConvert_aaudioToAndroidDataFormat(getFormat());
+ // Setup the callback if there is one.
+ AudioTrack::callback_t callback = nullptr;
+ void *callbackData = nullptr;
+ // Note that TRANSFER_SYNC does not allow FAST track
+ AudioTrack::transfer_type streamTransferType = AudioTrack::transfer_type::TRANSFER_SYNC;
+ if (builder.getDataCallbackProc() != nullptr) {
+ streamTransferType = AudioTrack::transfer_type::TRANSFER_CALLBACK;
+ callback = getLegacyCallback();
+ callbackData = this;
+
+ notificationFrames = builder.getFramesPerDataCallback();
+ // If the total buffer size is unspecified then base the size on the burst size.
+ if (frameCount == AAUDIO_UNSPECIFIED) {
+ // Take advantage of a special trick that allows us to create a buffer
+ // that is some multiple of the burst size.
+ notificationFrames = 0 - DEFAULT_BURSTS_PER_BUFFER_CAPACITY;
+ }
+ }
+ mCallbackBufferSize = builder.getFramesPerDataCallback();
+
+ ALOGD("AudioStreamTrack::open(), notificationFrames = %d", notificationFrames);
mAudioTrack = new AudioTrack(
(audio_stream_type_t) AUDIO_STREAM_MUSIC,
getSampleRate(),
@@ -79,10 +110,10 @@
frameCount,
flags,
callback,
- nullptr, // user callback data
- 0, // notificationFrames
+ callbackData,
+ notificationFrames,
AUDIO_SESSION_ALLOCATE,
- AudioTrack::transfer_type::TRANSFER_SYNC // TODO - this does not allow FAST
+ streamTransferType
);
// Did we get a valid track?
@@ -97,7 +128,18 @@
// Get the actual values from the AudioTrack.
setSamplesPerFrame(mAudioTrack->channelCount());
setSampleRate(mAudioTrack->getSampleRate());
- setFormat(AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format()));
+ aaudio_audio_format_t aaudioFormat =
+ AAudioConvert_androidToAAudioDataFormat(mAudioTrack->format());
+ setFormat(aaudioFormat);
+
+ // We may need to pass the data through a block size adapter to guarantee constant size.
+ if (mCallbackBufferSize != AAUDIO_UNSPECIFIED) {
+ int callbackSizeBytes = getBytesPerFrame() * mCallbackBufferSize;
+ mFixedBlockReader.open(callbackSizeBytes);
+ mBlockAdapter = &mFixedBlockReader;
+ } else {
+ mBlockAdapter = nullptr;
+ }
setState(AAUDIO_STREAM_STATE_OPEN);
@@ -111,11 +153,32 @@
mAudioTrack.clear(); // TODO is this right?
setState(AAUDIO_STREAM_STATE_CLOSED);
}
+ mFixedBlockReader.close();
return AAUDIO_OK;
}
+void AudioStreamTrack::processCallback(int event, void *info) {
+
+ switch (event) {
+ case AudioTrack::EVENT_MORE_DATA:
+ processCallbackCommon(AAUDIO_CALLBACK_OPERATION_PROCESS_DATA, info);
+ break;
+
+ // Stream got rerouted so we disconnect.
+ case AudioTrack::EVENT_NEW_IAUDIOTRACK:
+ processCallbackCommon(AAUDIO_CALLBACK_OPERATION_DISCONNECTED, info);
+ break;
+
+ default:
+ break;
+ }
+ return;
+}
+
aaudio_result_t AudioStreamTrack::requestStart()
{
+ std::lock_guard<std::mutex> lock(mStreamMutex);
+
if (mAudioTrack.get() == nullptr) {
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -124,6 +187,7 @@
if (err != OK) {
return AAudioConvert_androidToAAudioResult(err);
}
+
err = mAudioTrack->start();
if (err != OK) {
return AAudioConvert_androidToAAudioResult(err);
@@ -135,11 +199,14 @@
aaudio_result_t AudioStreamTrack::requestPause()
{
+ std::lock_guard<std::mutex> lock(mStreamMutex);
+
if (mAudioTrack.get() == nullptr) {
return AAUDIO_ERROR_INVALID_STATE;
} else if (getState() != AAUDIO_STREAM_STATE_STARTING
&& getState() != AAUDIO_STREAM_STATE_STARTED) {
- ALOGE("requestPause(), called when state is %s", AAudio_convertStreamStateToText(getState()));
+ ALOGE("requestPause(), called when state is %s",
+ AAudio_convertStreamStateToText(getState()));
return AAUDIO_ERROR_INVALID_STATE;
}
setState(AAUDIO_STREAM_STATE_PAUSING);
@@ -152,6 +219,8 @@
}
aaudio_result_t AudioStreamTrack::requestFlush() {
+ std::lock_guard<std::mutex> lock(mStreamMutex);
+
if (mAudioTrack.get() == nullptr) {
return AAUDIO_ERROR_INVALID_STATE;
} else if (getState() != AAUDIO_STREAM_STATE_PAUSED) {
@@ -165,6 +234,8 @@
}
aaudio_result_t AudioStreamTrack::requestStop() {
+ std::lock_guard<std::mutex> lock(mStreamMutex);
+
if (mAudioTrack.get() == nullptr) {
return AAUDIO_ERROR_INVALID_STATE;
}
@@ -175,7 +246,7 @@
return AAUDIO_OK;
}
-aaudio_result_t AudioStreamTrack::updateState()
+aaudio_result_t AudioStreamTrack::updateStateWhileWaiting()
{
status_t err;
aaudio_wrapping_frames_t position;
@@ -292,3 +363,29 @@
}
return AudioStream::getFramesRead();
}
+
+aaudio_result_t AudioStreamTrack::getTimestamp(clockid_t clockId,
+ int64_t *framePosition,
+ int64_t *timeNanoseconds) {
+ ExtendedTimestamp extendedTimestamp;
+ status_t status = mAudioTrack->getTimestamp(&extendedTimestamp);
+ if (status != NO_ERROR) {
+ return AAudioConvert_androidToAAudioResult(status);
+ }
+ // TODO Merge common code into AudioStreamLegacy after rebasing.
+ int timebase;
+ switch (clockId) {
+ case CLOCK_BOOTTIME:
+ timebase = ExtendedTimestamp::TIMEBASE_BOOTTIME;
+ break;
+ case CLOCK_MONOTONIC:
+ timebase = ExtendedTimestamp::TIMEBASE_MONOTONIC;
+ break;
+ default:
+ ALOGE("getTimestamp() - Unrecognized clock type %d", (int) clockId);
+ return AAUDIO_ERROR_UNEXPECTED_VALUE;
+ break;
+ }
+ status = extendedTimestamp.getBestTimestamp(framePosition, timeNanoseconds, timebase);
+ return AAudioConvert_androidToAAudioResult(status);
+}
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index 1de07ce..29f5d15 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -17,56 +17,63 @@
#ifndef LEGACY_AUDIO_STREAM_TRACK_H
#define LEGACY_AUDIO_STREAM_TRACK_H
+#include <math.h>
#include <media/AudioTrack.h>
#include <aaudio/AAudio.h>
#include "AudioStreamBuilder.h"
#include "AudioStream.h"
-#include "AAudioLegacy.h"
+#include "legacy/AAudioLegacy.h"
+#include "legacy/AudioStreamLegacy.h"
+#include "utility/FixedBlockReader.h"
namespace aaudio {
-
/**
* Internal stream that uses the legacy AudioTrack path.
*/
-class AudioStreamTrack : public AudioStream {
+class AudioStreamTrack : public AudioStreamLegacy {
public:
AudioStreamTrack();
virtual ~AudioStreamTrack();
- virtual aaudio_result_t open(const AudioStreamBuilder & builder) override;
- virtual aaudio_result_t close() override;
+ aaudio_result_t open(const AudioStreamBuilder & builder) override;
+ aaudio_result_t close() override;
- virtual aaudio_result_t requestStart() override;
- virtual aaudio_result_t requestPause() override;
- virtual aaudio_result_t requestFlush() override;
- virtual aaudio_result_t requestStop() override;
+ aaudio_result_t requestStart() override;
+ aaudio_result_t requestPause() override;
+ aaudio_result_t requestFlush() override;
+ aaudio_result_t requestStop() override;
- virtual aaudio_result_t getTimestamp(clockid_t clockId,
+ aaudio_result_t getTimestamp(clockid_t clockId,
int64_t *framePosition,
- int64_t *timeNanoseconds) override {
- return AAUDIO_ERROR_UNIMPLEMENTED; // TODO call getTimestamp(ExtendedTimestamp *timestamp);
- }
+ int64_t *timeNanoseconds) override;
- virtual aaudio_result_t write(const void *buffer,
+ aaudio_result_t write(const void *buffer,
int32_t numFrames,
int64_t timeoutNanoseconds) override;
- virtual aaudio_result_t setBufferSize(int32_t requestedFrames) override;
- virtual int32_t getBufferSize() const override;
- virtual int32_t getBufferCapacity() const override;
- virtual int32_t getFramesPerBurst()const override;
- virtual int32_t getXRunCount() const override;
+ aaudio_result_t setBufferSize(int32_t requestedFrames) override;
+ int32_t getBufferSize() const override;
+ int32_t getBufferCapacity() const override;
+ int32_t getFramesPerBurst()const override;
+ int32_t getXRunCount() const override;
- virtual int64_t getFramesRead() override;
+ int64_t getFramesRead() override;
- virtual aaudio_result_t updateState() override;
+ aaudio_result_t updateStateWhileWaiting() override;
+
+ // This is public so it can be called from the C callback function.
+ void processCallback(int event, void *info) override;
private:
+
android::sp<android::AudioTrack> mAudioTrack;
+ // adapts between variable sized blocks and fixed size blocks
+ FixedBlockReader mFixedBlockReader;
+
// TODO add 64-bit position reporting to AudioRecord and use it.
aaudio_wrapping_frames_t mPositionWhenStarting = 0;
aaudio_wrapping_frames_t mPositionWhenPausing = 0;
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.cpp b/media/libaaudio/src/utility/FixedBlockAdapter.cpp
new file mode 100644
index 0000000..f4666af
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.cpp
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+FixedBlockAdapter::~FixedBlockAdapter() {
+ close();
+}
+
+int32_t FixedBlockAdapter::open(int32_t bytesPerFixedBlock)
+{
+ mSize = bytesPerFixedBlock;
+ mStorage = new uint8_t[bytesPerFixedBlock]; // TODO use std::nothrow
+ mPosition = 0;
+ return 0;
+}
+
+int32_t FixedBlockAdapter::close()
+{
+ delete[] mStorage;
+ mStorage = nullptr;
+ mSize = 0;
+ mPosition = 0;
+ return 0;
+}
diff --git a/media/libaaudio/src/utility/FixedBlockAdapter.h b/media/libaaudio/src/utility/FixedBlockAdapter.h
new file mode 100644
index 0000000..7008b25
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockAdapter.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_ADAPTER_H
+#define AAUDIO_FIXED_BLOCK_ADAPTER_H
+
+#include <stdio.h>
+
+/**
+ * Interface for a class that needs fixed-size blocks.
+ */
+class FixedBlockProcessor {
+public:
+ virtual ~FixedBlockProcessor() = default;
+ virtual int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) = 0;
+};
+
+/**
+ * Base class for a variable-to-fixed-size block adapter.
+ */
+class FixedBlockAdapter
+{
+public:
+ FixedBlockAdapter(FixedBlockProcessor &fixedBlockProcessor)
+ : mFixedBlockProcessor(fixedBlockProcessor) {}
+
+ virtual ~FixedBlockAdapter();
+
+ /**
+ * Allocate internal resources needed for buffering data.
+ */
+ virtual int32_t open(int32_t bytesPerFixedBlock);
+
+ /**
+ * Note that if the fixed-sized blocks must be aligned, then the variable-sized blocks
+ * must have the same alignment.
+ * For example, if the fixed-size blocks must be a multiple of 8, then the variable-sized
+ * blocks must also be a multiple of 8.
+ *
+ * @param buffer
+ * @param numBytes
+ * @return zero if OK or a non-zero code
+ */
+ virtual int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) = 0;
+
+ /**
+ * Free internal resources.
+ */
+ int32_t close();
+
+protected:
+ FixedBlockProcessor &mFixedBlockProcessor;
+ uint8_t *mStorage = nullptr; // Store data here while assembling buffers.
+ int32_t mSize = 0; // Size in bytes of the fixed size buffer.
+ int32_t mPosition = 0; // Offset of the last byte read or written.
+};
+
+#endif /* AAUDIO_FIXED_BLOCK_ADAPTER_H */
diff --git a/media/libaaudio/src/utility/FixedBlockReader.cpp b/media/libaaudio/src/utility/FixedBlockReader.cpp
new file mode 100644
index 0000000..21ea70e
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockReader.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <memory.h>
+
+#include "FixedBlockAdapter.h"
+
+#include "FixedBlockReader.h"
+
+
+FixedBlockReader::FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor)
+ : FixedBlockAdapter(fixedBlockProcessor) {
+ mPosition = mSize;
+}
+
+int32_t FixedBlockReader::open(int32_t bytesPerFixedBlock) {
+ int32_t result = FixedBlockAdapter::open(bytesPerFixedBlock);
+ mPosition = mSize; // Indicate no data in storage.
+ return result;
+}
+
+int32_t FixedBlockReader::readFromStorage(uint8_t *buffer, int32_t numBytes) {
+ int32_t bytesToRead = numBytes;
+ int32_t dataAvailable = mSize - mPosition;
+ if (bytesToRead > dataAvailable) {
+ bytesToRead = dataAvailable;
+ }
+ memcpy(buffer, mStorage + mPosition, bytesToRead);
+ mPosition += bytesToRead;
+ return bytesToRead;
+}
+
+int32_t FixedBlockReader::processVariableBlock(uint8_t *buffer, int32_t numBytes) {
+ int32_t result = 0;
+ int32_t bytesLeft = numBytes;
+ while(bytesLeft > 0 && result == 0) {
+ if (mPosition < mSize) {
+ // Use up bytes currently in storage.
+ int32_t bytesRead = readFromStorage(buffer, bytesLeft);
+ buffer += bytesRead;
+ bytesLeft -= bytesRead;
+ } else if (bytesLeft >= mSize) {
+ // Read through if enough for a complete block.
+ result = mFixedBlockProcessor.onProcessFixedBlock(buffer, mSize);
+ buffer += mSize;
+ bytesLeft -= mSize;
+ } else {
+ // Just need a partial block so we have to use storage.
+ result = mFixedBlockProcessor.onProcessFixedBlock(mStorage, mSize);
+ mPosition = 0;
+ }
+ }
+ return result;
+}
+
diff --git a/media/libaaudio/src/utility/FixedBlockReader.h b/media/libaaudio/src/utility/FixedBlockReader.h
new file mode 100644
index 0000000..128dd52
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockReader.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_READER_H
+#define AAUDIO_FIXED_BLOCK_READER_H
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+/**
+ * Read from a fixed-size block to a variable sized block.
+ *
+ * This can be used to convert a pull data flow from fixed sized buffers to variable sized buffers.
+ * An example would be an audio output callback that reads from the app.
+ */
+class FixedBlockReader : public FixedBlockAdapter
+{
+public:
+ FixedBlockReader(FixedBlockProcessor &fixedBlockProcessor);
+
+ virtual ~FixedBlockReader() = default;
+
+ int32_t open(int32_t bytesPerFixedBlock) override;
+
+ int32_t readFromStorage(uint8_t *buffer, int32_t numBytes);
+
+ /**
+ * Read into a variable sized block.
+ */
+ int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) override;
+};
+
+
+#endif /* AAUDIO_FIXED_BLOCK_READER_H */
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.cpp b/media/libaaudio/src/utility/FixedBlockWriter.cpp
new file mode 100644
index 0000000..2ce8046
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockWriter.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <stdint.h>
+#include <memory.h>
+
+#include "FixedBlockAdapter.h"
+#include "FixedBlockWriter.h"
+
+FixedBlockWriter::FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor)
+ : FixedBlockAdapter(fixedBlockProcessor) {}
+
+
+int32_t FixedBlockWriter::writeToStorage(uint8_t *buffer, int32_t numBytes) {
+ int32_t bytesToStore = numBytes;
+ int32_t roomAvailable = mSize - mPosition;
+ if (bytesToStore > roomAvailable) {
+ bytesToStore = roomAvailable;
+ }
+ memcpy(mStorage + mPosition, buffer, bytesToStore);
+ mPosition += bytesToStore;
+ return bytesToStore;
+}
+
+int32_t FixedBlockWriter::processVariableBlock(uint8_t *buffer, int32_t numBytes) {
+ int32_t result = 0;
+ int32_t bytesLeft = numBytes;
+
+ // If we already have data in storage then add to it.
+ if (mPosition > 0) {
+ int32_t bytesWritten = writeToStorage(buffer, bytesLeft);
+ buffer += bytesWritten;
+ bytesLeft -= bytesWritten;
+ // If storage full then flush it out
+ if (mPosition == mSize) {
+ result = mFixedBlockProcessor.onProcessFixedBlock(mStorage, mSize);
+ mPosition = 0;
+ }
+ }
+
+ // Write through if enough for a complete block.
+ while(bytesLeft > mSize && result == 0) {
+ result = mFixedBlockProcessor.onProcessFixedBlock(buffer, mSize);
+ buffer += mSize;
+ bytesLeft -= mSize;
+ }
+
+ // Save any remaining partial block for next time.
+ if (bytesLeft > 0) {
+ writeToStorage(buffer, bytesLeft);
+ }
+
+ return result;
+}
diff --git a/media/libaaudio/src/utility/FixedBlockWriter.h b/media/libaaudio/src/utility/FixedBlockWriter.h
new file mode 100644
index 0000000..f1d917c
--- /dev/null
+++ b/media/libaaudio/src/utility/FixedBlockWriter.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AAUDIO_FIXED_BLOCK_WRITER_H
+#define AAUDIO_FIXED_BLOCK_WRITER_H
+
+#include <stdint.h>
+
+#include "FixedBlockAdapter.h"
+
+/**
+ * This can be used to convert a push data flow from variable sized buffers to fixed sized buffers.
+ * An example would be an audio input callback.
+ */
+class FixedBlockWriter : public FixedBlockAdapter
+{
+public:
+ FixedBlockWriter(FixedBlockProcessor &fixedBlockProcessor);
+
+ virtual ~FixedBlockWriter() = default;
+
+ int32_t writeToStorage(uint8_t *buffer, int32_t numBytes);
+
+ /**
+ * Write from a variable sized block.
+ */
+ int32_t processVariableBlock(uint8_t *buffer, int32_t numBytes) override;
+};
+
+#endif /* AAUDIO_FIXED_BLOCK_WRITER_H */
diff --git a/media/libaaudio/tests/Android.mk b/media/libaaudio/tests/Android.mk
index 7899cf5..06c9364 100644
--- a/media/libaaudio/tests/Android.mk
+++ b/media/libaaudio/tests/Android.mk
@@ -4,8 +4,7 @@
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src/core \
- frameworks/av/media/libaaudio/src/utility
+ frameworks/av/media/libaaudio/src
LOCAL_SRC_FILES:= test_handle_tracker.cpp
LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
libcutils liblog libmedia libutils
@@ -17,13 +16,22 @@
LOCAL_C_INCLUDES := \
$(call include-path-for, audio-utils) \
frameworks/av/media/libaaudio/include \
- frameworks/av/media/libaaudio/src \
- frameworks/av/media/libaaudio/src/core \
- frameworks/av/media/libaaudio/src/fifo \
- frameworks/av/media/libaaudio/src/utility
+ frameworks/av/media/libaaudio/src
LOCAL_SRC_FILES:= test_marshalling.cpp
LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
libcutils liblog libmedia libutils
LOCAL_STATIC_LIBRARIES := libaaudio
-LOCAL_MODULE := test_marshalling
+LOCAL_MODULE := test_aaudio_marshalling
+include $(BUILD_NATIVE_TEST)
+
+include $(CLEAR_VARS)
+LOCAL_C_INCLUDES := \
+ $(call include-path-for, audio-utils) \
+ frameworks/av/media/libaaudio/include \
+ frameworks/av/media/libaaudio/src
+LOCAL_SRC_FILES:= test_block_adapter.cpp
+LOCAL_SHARED_LIBRARIES := libaudioclient libaudioutils libbinder \
+ libcutils liblog libmedia libutils
+LOCAL_STATIC_LIBRARIES := libaaudio
+LOCAL_MODULE := test_block_adapter
include $(BUILD_NATIVE_TEST)
diff --git a/media/libaaudio/tests/test_block_adapter.cpp b/media/libaaudio/tests/test_block_adapter.cpp
new file mode 100644
index 0000000..a22abb9
--- /dev/null
+++ b/media/libaaudio/tests/test_block_adapter.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+
+#include <gtest/gtest.h>
+
+#include "utility/FixedBlockAdapter.h"
+#include "utility/FixedBlockWriter.h"
+#include "utility/FixedBlockReader.h"
+
+#define FIXED_BLOCK_SIZE 47
+#define TEST_BUFFER_SIZE 103
+
+// Pass varying sized blocks.
+// Frames contain a sequential index, which are easily checked.
+class TestBlockAdapter {
+public:
+ TestBlockAdapter()
+ : mTestIndex(0), mLastIndex(0) {
+ }
+
+ ~TestBlockAdapter() = default;
+
+ void fillSequence(int32_t *indexBuffer, int32_t frameCount) {
+ ASSERT_LE(frameCount, TEST_BUFFER_SIZE);
+ for (int i = 0; i < frameCount; i++) {
+ indexBuffer[i] = mLastIndex++;
+ }
+ }
+
+ int checkSequence(const int32_t *indexBuffer, int32_t frameCount) {
+ // This is equivalent to calling an output callback.
+ for (int i = 0; i < frameCount; i++) {
+ int32_t expected = mTestIndex++;
+ int32_t actual = indexBuffer[i];
+ EXPECT_EQ(expected, actual);
+ if (actual != expected) {
+ return -1;
+ }
+ }
+ return 0;
+ }
+
+ int32_t mTestBuffer[TEST_BUFFER_SIZE];
+ int32_t mTestIndex;
+ int32_t mLastIndex;
+};
+
+class TestBlockWriter : public TestBlockAdapter, FixedBlockProcessor {
+public:
+ TestBlockWriter()
+ : mFixedBlockWriter(*this) {
+ mFixedBlockWriter.open(sizeof(int32_t) * FIXED_BLOCK_SIZE);
+ }
+
+ ~TestBlockWriter() {
+ mFixedBlockWriter.close();
+ }
+
+ int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override {
+ int32_t frameCount = numBytes / sizeof(int32_t);
+ return checkSequence((int32_t *) buffer, frameCount);
+ }
+
+ // Simulate audio input from a variable sized callback.
+ int32_t testInputWrite(int32_t variableCount) {
+ fillSequence(mTestBuffer, variableCount);
+ int32_t sizeBytes = variableCount * sizeof(int32_t);
+ return mFixedBlockWriter.processVariableBlock((uint8_t *) mTestBuffer, sizeBytes);
+ }
+
+private:
+ FixedBlockWriter mFixedBlockWriter;
+};
+
+class TestBlockReader : public TestBlockAdapter, FixedBlockProcessor {
+public:
+ TestBlockReader()
+ : mFixedBlockReader(*this) {
+ mFixedBlockReader.open(sizeof(int32_t) * FIXED_BLOCK_SIZE);
+ }
+
+ ~TestBlockReader() {
+ mFixedBlockReader.close();
+ }
+
+ int32_t onProcessFixedBlock(uint8_t *buffer, int32_t numBytes) override {
+ int32_t frameCount = numBytes / sizeof(int32_t);
+ fillSequence((int32_t *) buffer, frameCount);
+ return 0;
+ }
+
+ // Simulate audio output from a variable sized callback.
+ int32_t testOutputRead(int32_t variableCount) {
+ int32_t sizeBytes = variableCount * sizeof(int32_t);
+ int32_t result = mFixedBlockReader.processVariableBlock((uint8_t *) mTestBuffer, sizeBytes);
+ if (result >= 0) {
+ result = checkSequence((int32_t *)mTestBuffer, variableCount);
+ }
+ return result;
+ }
+
+private:
+ FixedBlockReader mFixedBlockReader;
+};
+
+
+TEST(test_block_adapter, block_adapter_write) {
+ TestBlockWriter tester;
+ int result = 0;
+ const int numLoops = 1000;
+
+ for (int i = 0; i<numLoops && result == 0; i++) {
+ long r = random();
+ int32_t size = (r % TEST_BUFFER_SIZE);
+ ASSERT_LE(size, TEST_BUFFER_SIZE);
+ ASSERT_GE(size, 0);
+ result = tester.testInputWrite(size);
+ }
+ ASSERT_EQ(0, result);
+}
+
+TEST(test_block_adapter, block_adapter_read) {
+ TestBlockReader tester;
+ int result = 0;
+ const int numLoops = 1000;
+
+ for (int i = 0; i < numLoops && result == 0; i++) {
+ long r = random();
+ int32_t size = (r % TEST_BUFFER_SIZE);
+ ASSERT_LE(size, TEST_BUFFER_SIZE);
+ ASSERT_GE(size, 0);
+ result = tester.testOutputRead(size);
+ }
+ ASSERT_EQ(0, result);
+};
+
diff --git a/media/libaaudio/tests/test_handle_tracker.cpp b/media/libaaudio/tests/test_handle_tracker.cpp
index e51c39c..e1cb676 100644
--- a/media/libaaudio/tests/test_handle_tracker.cpp
+++ b/media/libaaudio/tests/test_handle_tracker.cpp
@@ -22,7 +22,7 @@
#include <gtest/gtest.h>
#include <aaudio/AAudioDefinitions.h>
-#include "HandleTracker.h"
+#include "utility/HandleTracker.h"
// Test adding one address.
TEST(test_handle_tracker, aaudio_handle_tracker) {
diff --git a/media/libeffects/factory/EffectsFactory.c b/media/libeffects/factory/EffectsFactory.c
index 554c14d..523b6e1 100644
--- a/media/libeffects/factory/EffectsFactory.c
+++ b/media/libeffects/factory/EffectsFactory.c
@@ -48,6 +48,7 @@
static int gCanQueryEffect; // indicates that call to EffectQueryEffect() is valid, i.e. that the list of effects
// was not modified since last call to EffectQueryNumberEffects()
+static list_elem_t *gLibraryFailedList; //list of lib_failed_entry_t: libraries failed to load
/////////////////////////////////////////////////
// Local functions prototypes
@@ -584,6 +585,17 @@
if (hdl != NULL) {
dlclose(hdl);
}
+ //add entry for library errors in gLibraryFailedList
+ lib_failed_entry_t *fl = malloc(sizeof(lib_failed_entry_t));
+ fl->name = strndup(name, PATH_MAX);
+ fl->path = strndup(path, PATH_MAX);
+
+ list_elem_t *fe = malloc(sizeof(list_elem_t));
+ fe->object = fl;
+ fe->next = gLibraryFailedList;
+ gLibraryFailedList = fe;
+ ALOGV("getLibrary() linked error in library %p for path %s", fl, path);
+
return -EINVAL;
}
@@ -986,16 +998,31 @@
int EffectDumpEffects(int fd) {
char s[512];
+
+ list_elem_t *fe = gLibraryFailedList;
+ lib_failed_entry_t *fl = NULL;
+
+ dprintf(fd, "Libraries NOT loaded:\n");
+
+ while (fe) {
+ fl = (lib_failed_entry_t *)fe->object;
+ dprintf(fd, " Library %s\n", fl->name);
+ dprintf(fd, " path: %s\n", fl->path);
+ fe = fe->next;
+ }
+
list_elem_t *e = gLibraryList;
lib_entry_t *l = NULL;
effect_descriptor_t *d = NULL;
int found = 0;
int ret = 0;
+ dprintf(fd, "Libraries loaded:\n");
while (e) {
l = (lib_entry_t *)e->object;
list_elem_t *efx = l->effects;
- dprintf(fd, "Library %s\n", l->name);
+ dprintf(fd, " Library %s\n", l->name);
+ dprintf(fd, " path: %s\n", l->path);
if (!efx) {
dprintf(fd, " (no effects)\n");
}
diff --git a/media/libeffects/factory/EffectsFactory.h b/media/libeffects/factory/EffectsFactory.h
index b7936e0..72e0931 100644
--- a/media/libeffects/factory/EffectsFactory.h
+++ b/media/libeffects/factory/EffectsFactory.h
@@ -58,6 +58,11 @@
lib_entry_t *lib;
} effect_entry_t;
+typedef struct lib_failed_entry_s {
+ char *name;
+ char *path;
+} lib_failed_entry_t;
+
// Structure used to store the lib entry
// and the descriptor of the sub effects.
// The library entry is to be stored in case of
@@ -69,6 +74,7 @@
} sub_effect_entry_t;
+
////////////////////////////////////////////////////////////////////////////////
//
// Function: EffectGetSubEffects
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 8a1ce22..b0bd22e 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -26,7 +26,6 @@
IMediaPlayer.cpp \
IMediaRecorder.cpp \
IMediaSource.cpp \
- IMediaAnalyticsService.cpp \
IRemoteDisplay.cpp \
IRemoteDisplayClient.cpp \
IResourceManagerClient.cpp \
@@ -35,7 +34,6 @@
MediaCodecBuffer.cpp \
MediaCodecInfo.cpp \
MediaDefs.cpp \
- MediaAnalyticsItem.cpp \
MediaUtils.cpp \
Metadata.cpp \
mediarecorder.cpp \
@@ -66,6 +64,7 @@
libcamera_client libstagefright_foundation \
libgui libdl libaudioutils libaudioclient \
libmedia_helper libmediadrm \
+ libmediametrics \
libbase \
libhidlbase \
libhidltransport \
diff --git a/media/libmedia/include/DrmPluginPath.h b/media/libmedia/include/DrmPluginPath.h
new file mode 100644
index 0000000..51ba26e
--- /dev/null
+++ b/media/libmedia/include/DrmPluginPath.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DRM_PLUGIN_PATH_H_
+
+#define DRM_PLUGIN_PATH_H_
+
+namespace android {
+
+const char* getDrmPluginPath();
+
+} // namespace android
+
+#endif // DRM_PLUGIN_PATH_H_
diff --git a/media/libmedia/include/mediaplayer.h b/media/libmedia/include/mediaplayer.h
index 18d69a7..623c374 100644
--- a/media/libmedia/include/mediaplayer.h
+++ b/media/libmedia/include/mediaplayer.h
@@ -133,6 +133,10 @@
MEDIA_INFO_NOT_SEEKABLE = 801,
// New media metadata is available.
MEDIA_INFO_METADATA_UPDATE = 802,
+ // Audio can not be played.
+ MEDIA_INFO_PLAY_AUDIO_ERROR = 804,
+ // Video can not be played.
+ MEDIA_INFO_PLAY_VIDEO_ERROR = 805,
//9xx
MEDIA_INFO_TIMED_TEXT_ERROR = 900,
diff --git a/media/libmedia/omx/1.0/WOmxNode.cpp b/media/libmedia/omx/1.0/WOmxNode.cpp
index b5186b5..6c92b52 100644
--- a/media/libmedia/omx/1.0/WOmxNode.cpp
+++ b/media/libmedia/omx/1.0/WOmxNode.cpp
@@ -411,7 +411,7 @@
getExtensionIndex_cb _hidl_cb) {
OMX_INDEXTYPE index;
Status status = toStatus(mBase->getExtensionIndex(
- parameterName, &index));
+ parameterName.c_str(), &index));
_hidl_cb(status, toRawIndexType(index));
return Void();
}
diff --git a/media/libmediametrics/Android.mk b/media/libmediametrics/Android.mk
new file mode 100644
index 0000000..f8c4bb3
--- /dev/null
+++ b/media/libmediametrics/Android.mk
@@ -0,0 +1,34 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+
+LOCAL_SRC_FILES += \
+ IMediaAnalyticsService.cpp \
+ MediaAnalyticsItem.cpp \
+
+LOCAL_SHARED_LIBRARIES := \
+ liblog libcutils libutils libbinder \
+ libstagefright_foundation \
+ libbase \
+
+LOCAL_MODULE:= libmediametrics
+
+LOCAL_ADDITIONAL_DEPENDENCIES := $(LOCAL_PATH)/Android.mk
+
+LOCAL_C_INCLUDES := \
+ $(TOP)/system/libhidl/base/include \
+ $(TOP)/frameworks/native/include/media/openmax \
+ $(TOP)/frameworks/av/include/media/ \
+ $(TOP)/frameworks/av/media/libmedia/aidl \
+ $(TOP)/frameworks/av/include \
+ $(TOP)/frameworks/native/include \
+ $(call include-path-for, audio-utils)
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+ frameworks/av/include/media \
+
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_SANITIZE := unsigned-integer-overflow signed-integer-overflow cfi
+LOCAL_SANITIZE_DIAG := cfi
+
+include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libmedia/IMediaAnalyticsService.cpp b/media/libmediametrics/IMediaAnalyticsService.cpp
similarity index 94%
rename from media/libmedia/IMediaAnalyticsService.cpp
rename to media/libmediametrics/IMediaAnalyticsService.cpp
index 340cf19..68bafe1 100644
--- a/media/libmedia/IMediaAnalyticsService.cpp
+++ b/media/libmediametrics/IMediaAnalyticsService.cpp
@@ -23,15 +23,6 @@
#include <binder/Parcel.h>
#include <binder/IMemory.h>
#include <binder/IPCThreadState.h>
-#include <media/IHDCP.h>
-#include <media/IMediaCodecList.h>
-#include <media/IMediaHTTPService.h>
-#include <media/IMediaPlayerService.h>
-#include <media/IMediaRecorder.h>
-#include <media/IOMX.h>
-#include <media/IRemoteDisplay.h>
-#include <media/IRemoteDisplayClient.h>
-#include <media/IStreamSource.h>
#include <utils/Errors.h> // for status_t
#include <utils/List.h>
diff --git a/media/libmedia/MediaAnalyticsItem.cpp b/media/libmediametrics/MediaAnalyticsItem.cpp
similarity index 100%
rename from media/libmedia/MediaAnalyticsItem.cpp
rename to media/libmediametrics/MediaAnalyticsItem.cpp
diff --git a/media/libmedia/include/IMediaAnalyticsService.h b/media/libmediametrics/include/IMediaAnalyticsService.h
similarity index 100%
rename from media/libmedia/include/IMediaAnalyticsService.h
rename to media/libmediametrics/include/IMediaAnalyticsService.h
diff --git a/media/libmedia/include/MediaAnalyticsItem.h b/media/libmediametrics/include/MediaAnalyticsItem.h
similarity index 100%
rename from media/libmedia/include/MediaAnalyticsItem.h
rename to media/libmediametrics/include/MediaAnalyticsItem.h
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index f7e1ff5..7af7031 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -27,6 +27,7 @@
libgui \
libaudioclient \
libmedia \
+ libmediametrics \
libmediadrm \
libmediautils \
libmemunreachable \
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index f3fc924..b082654 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -736,9 +736,7 @@
mExtractorDeathListener = new ServiceDeathNotifier(binder, p, MEDIAEXTRACTOR_PROCESS_DEATH);
binder->linkToDeath(mExtractorDeathListener);
- int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
- if ((trebleOmx == 1) || ((trebleOmx == -1) &&
- property_get_bool("persist.hal.binderization", 0))) {
+ if (property_get_bool("persist.media.treble_omx", true)) {
// Treble IOmx
sp<IOmx> omx = IOmx::getService();
if (omx == nullptr) {
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 638eec3..6400481 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -450,9 +450,7 @@
}
sCameraChecked = true;
- int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
- if ((trebleOmx == 1) || ((trebleOmx == -1) &&
- property_get_bool("persist.hal.binderization", 0))) {
+ if (property_get_bool("persist.media.treble_omx", true)) {
// Treble IOmx
sp<IOmx> omx = IOmx::getService();
if (omx == nullptr) {
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index f689ac9..95f378f 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -397,13 +397,13 @@
// Attempt to parse an float literal optionally surrounded by whitespace,
// returns true on success, false otherwise.
-static bool safe_strtof(const char *s, float *val) {
+static bool safe_strtod(const char *s, double *val) {
char *end;
// It is lame, but according to man page, we have to set errno to 0
- // before calling strtof().
+ // before calling strtod().
errno = 0;
- *val = strtof(s, &end);
+ *val = strtod(s, &end);
if (end == s || errno == ERANGE) {
return false;
@@ -706,13 +706,23 @@
return OK;
}
-status_t StagefrightRecorder::setParamCaptureFps(float fps) {
+status_t StagefrightRecorder::setParamCaptureFps(double fps) {
ALOGV("setParamCaptureFps: %.2f", fps);
- int64_t timeUs = (int64_t) (1000000.0 / fps + 0.5f);
+ constexpr int64_t k1E12 = 1000000000000ll;
+ int64_t fpsx1e12 = k1E12 * fps;
+ if (fpsx1e12 == 0) {
+ ALOGE("FPS is zero or too small");
+ return BAD_VALUE;
+ }
- // Not allowing time more than a day
- if (timeUs <= 0 || timeUs > 86400*1E6) {
+ // This does not overflow since 10^6 * 10^12 < 2^63
+ int64_t timeUs = 1000000ll * k1E12 / fpsx1e12;
+
+ // Not allowing time more than a day and a millisecond for error margin.
+ // Note: 1e12 / 86400 = 11574074.(074) and 1e18 / 11574074 = 86400000553;
+ // therefore 1 ms of margin should be sufficient.
+ if (timeUs <= 0 || timeUs > 86400001000ll) {
ALOGE("Time between frame capture (%lld) is out of range [0, 1 Day]", (long long)timeUs);
return BAD_VALUE;
}
@@ -846,8 +856,8 @@
return setParamCaptureFpsEnable(captureFpsEnable);
}
} else if (key == "time-lapse-fps") {
- float fps;
- if (safe_strtof(value.string(), &fps)) {
+ double fps;
+ if (safe_strtod(value.string(), &fps)) {
return setParamCaptureFps(fps);
}
} else {
@@ -2073,7 +2083,7 @@
mMaxFileSizeBytes = 0;
mTrackEveryTimeDurationUs = 0;
mCaptureFpsEnable = false;
- mCaptureFps = 0.0f;
+ mCaptureFps = 0.0;
mTimeBetweenCaptureUs = -1;
mCameraSourceTimeLapse = NULL;
mMetaDataStoredInVideoBuffers = kMetadataBufferTypeInvalid;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 38377d2..9a6c4da 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -122,7 +122,7 @@
int32_t mTotalBitRate;
bool mCaptureFpsEnable;
- float mCaptureFps;
+ double mCaptureFps;
int64_t mTimeBetweenCaptureUs;
sp<CameraSourceTimeLapse> mCameraSourceTimeLapse;
@@ -172,7 +172,7 @@
status_t setParamAudioSamplingRate(int32_t sampleRate);
status_t setParamAudioTimeScale(int32_t timeScale);
status_t setParamCaptureFpsEnable(int32_t timeLapseEnable);
- status_t setParamCaptureFps(float fps);
+ status_t setParamCaptureFps(double fps);
status_t setParamVideoEncodingBitRate(int32_t bitRate);
status_t setParamVideoIFramesInterval(int32_t seconds);
status_t setParamVideoEncoderProfile(int32_t profile);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 50d5343..d9a5c26 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -197,6 +197,8 @@
mPrepared(false),
mResetting(false),
mSourceStarted(false),
+ mAudioDecoderError(false),
+ mVideoDecoderError(false),
mPaused(false),
mPausedByClient(true),
mPausedForBuffering(false),
@@ -1093,12 +1095,14 @@
ALOGV("%s shutdown completed", audio ? "audio" : "video");
if (audio) {
mAudioDecoder.clear();
+ mAudioDecoderError = false;
++mAudioDecoderGeneration;
CHECK_EQ((int)mFlushingAudio, (int)SHUTTING_DOWN_DECODER);
mFlushingAudio = SHUT_DOWN;
} else {
mVideoDecoder.clear();
+ mVideoDecoderError = false;
++mVideoDecoderGeneration;
CHECK_EQ((int)mFlushingVideo, (int)SHUTTING_DOWN_DECODER);
@@ -1153,7 +1157,29 @@
finishFlushIfPossible(); // Should not occur.
break; // Finish anyways.
}
- notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+ if (mSource != nullptr) {
+ if (audio) {
+ if (mVideoDecoderError || mSource->getFormat(false /* audio */) == NULL) {
+ // When both audio and video have error, or this stream has only audio
+ // which has error, notify client of error.
+ notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+ } else {
+ // Only audio track has error. Video track could be still good to play.
+ notifyListener(MEDIA_INFO, MEDIA_INFO_PLAY_AUDIO_ERROR, err);
+ }
+ mAudioDecoderError = true;
+ } else {
+ if (mAudioDecoderError || mSource->getFormat(true /* audio */) == NULL) {
+ // When both audio and video have error, or this stream has only video
+ // which has error, notify client of error.
+ notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
+ } else {
+ // Only video track has error. Audio track could be still good to play.
+ notifyListener(MEDIA_INFO, MEDIA_INFO_PLAY_VIDEO_ERROR, err);
+ }
+ mVideoDecoderError = true;
+ }
+ }
} else {
ALOGV("Unhandled decoder notification %d '%c%c%c%c'.",
what,
@@ -1535,8 +1561,6 @@
if (driver != NULL) {
int64_t now = systemTime();
int64_t played = now - mLastStartedPlayingTimeNs;
- ALOGD("played from %" PRId64 " to %" PRId64 " = %" PRId64 ,
- mLastStartedPlayingTimeNs, now, played);
driver->notifyMorePlayingTimeUs((played+500)/1000);
}
@@ -1619,7 +1643,8 @@
// is possible; otherwise the decoders call the renderer openAudioSink directly.
status_t err = mRenderer->openAudioSink(
- format, true /* offloadOnly */, hasVideo, AUDIO_OUTPUT_FLAG_NONE, &mOffloadAudio);
+ format, true /* offloadOnly */, hasVideo,
+ AUDIO_OUTPUT_FLAG_NONE, &mOffloadAudio, mSource->isStreaming());
if (err != OK) {
// Any failure we turn off mOffloadAudio.
mOffloadAudio = false;
@@ -1637,6 +1662,7 @@
if (mAudioDecoder != NULL) {
mAudioDecoder->pause();
mAudioDecoder.clear();
+ mAudioDecoderError = false;
++mAudioDecoderGeneration;
}
if (mFlushingAudio == FLUSHING_DECODER) {
@@ -1774,6 +1800,7 @@
*decoder = new Decoder(notify, mSource, mPID, mUID, mRenderer);
ALOGV("instantiateDecoder audio Decoder");
}
+ mAudioDecoderError = false;
} else {
sp<AMessage> notify = new AMessage(kWhatVideoNotify, this);
++mVideoDecoderGeneration;
@@ -1781,6 +1808,7 @@
*decoder = new Decoder(
notify, mSource, mPID, mUID, mRenderer, mSurface, mCCDecoder);
+ mVideoDecoderError = false;
// enable FRC if high-quality AV sync is requested, even if not
// directly queuing to display, as this will even improve textureview
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index d3cb7c1..d542749 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -218,6 +218,8 @@
bool mPrepared;
bool mResetting;
bool mSourceStarted;
+ bool mAudioDecoderError;
+ bool mVideoDecoderError;
// Actual pause state, either as requested by client or due to buffering.
bool mPaused;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
index 9a2224e..9e579f9 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoder.cpp
@@ -762,8 +762,7 @@
int64_t durationUs;
bool hasVideo = (mSource->getFormat(false /* audio */) != NULL);
if (getAudioDeepBufferSetting() // override regardless of source duration
- || (!hasVideo
- && mSource->getDuration(&durationUs) == OK
+ || (mSource->getDuration(&durationUs) == OK
&& durationUs > AUDIO_SINK_MIN_DEEP_BUFFER_DURATION_US)) {
flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
} else {
@@ -773,7 +772,8 @@
sp<AMessage> reply = new AMessage(kWhatAudioOutputFormatChanged, this);
reply->setInt32("generation", mBufferGeneration);
mRenderer->changeAudioFormat(
- format, false /* offloadOnly */, hasVideo, flags, reply);
+ format, false /* offloadOnly */, hasVideo,
+ flags, mSource->isStreaming(), reply);
}
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
index cb668e4..6b05b53 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDecoderPassThrough.cpp
@@ -76,7 +76,7 @@
// format is different.
status_t err = mRenderer->openAudioSink(
format, true /* offloadOnly */, hasVideo,
- AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */);
+ AUDIO_OUTPUT_FLAG_NONE /* flags */, NULL /* isOffloaded */, mSource->isStreaming());
if (err != OK) {
handleError(err);
}
@@ -294,6 +294,9 @@
return;
}
+ if (streamErr != ERROR_END_OF_STREAM) {
+ handleError(streamErr);
+ }
mReachedEOS = true;
if (mRenderer != NULL) {
mRenderer->queueEOS(true /* audio */, ERROR_END_OF_STREAM);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 66b64f8..01008b4 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -583,8 +583,12 @@
return;
}
- // only bother to log non-empty records
- if (mAnalyticsItem->count() > 0) {
+ // log only non-empty records
+ // we always updateMetrics() before we get here
+ // and that always injects 2 fields (duration and playing time) into
+ // the record.
+ // So the canonical "empty" record has 2 elements in it.
+ if (mAnalyticsItem->count() > 2) {
mAnalyticsItem->setFinalized(true);
mAnalyticsItem->selfrecord();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 9350440..9fe61703 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -397,12 +397,14 @@
bool offloadOnly,
bool hasVideo,
uint32_t flags,
- bool *isOffloaded) {
+ bool *isOffloaded,
+ bool isStreaming) {
sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
msg->setMessage("format", format);
msg->setInt32("offload-only", offloadOnly);
msg->setInt32("has-video", hasVideo);
msg->setInt32("flags", flags);
+ msg->setInt32("isStreaming", isStreaming);
sp<AMessage> response;
status_t postStatus = msg->postAndAwaitResponse(&response);
@@ -430,12 +432,14 @@
bool offloadOnly,
bool hasVideo,
uint32_t flags,
+ bool isStreaming,
const sp<AMessage> ¬ify) {
sp<AMessage> meta = new AMessage;
meta->setMessage("format", format);
meta->setInt32("offload-only", offloadOnly);
meta->setInt32("has-video", hasVideo);
meta->setInt32("flags", flags);
+ meta->setInt32("isStreaming", isStreaming);
sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
@@ -460,7 +464,10 @@
uint32_t flags;
CHECK(msg->findInt32("flags", (int32_t *)&flags));
- status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
+ uint32_t isStreaming;
+ CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming));
+
+ status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
sp<AMessage> response = new AMessage;
response->setInt32("err", err);
@@ -1838,7 +1845,8 @@
const sp<AMessage> &format,
bool offloadOnly,
bool hasVideo,
- uint32_t flags) {
+ uint32_t flags,
+ bool isStreaming) {
ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
offloadOnly, offloadingAudio());
bool audioSinkChanged = false;
@@ -1891,7 +1899,7 @@
offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
offloadInfo.bit_rate = avgBitRate;
offloadInfo.has_video = hasVideo;
- offloadInfo.is_streaming = true;
+ offloadInfo.is_streaming = isStreaming;
if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
ALOGV("openAudioSink: no change in offload mode");
@@ -2043,7 +2051,10 @@
uint32_t flags;
CHECK(meta->findInt32("flags", (int32_t *)&flags));
- status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
+ uint32_t isStreaming;
+ CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming));
+
+ status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming);
if (err != OK) {
notify->setInt32("err", err);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index 385bb06..e6850b5 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -76,7 +76,8 @@
bool offloadOnly,
bool hasVideo,
uint32_t flags,
- bool *isOffloaded);
+ bool *isOffloaded,
+ bool isStreaming);
void closeAudioSink();
// re-open audio sink after all pending audio buffers played.
@@ -85,6 +86,7 @@
bool offloadOnly,
bool hasVideo,
uint32_t flags,
+ bool isStreaming,
const sp<AMessage> ¬ify);
enum {
@@ -267,7 +269,8 @@
const sp<AMessage> &format,
bool offloadOnly,
bool hasVideo,
- uint32_t flags);
+ uint32_t flags,
+ bool isStreaming);
void onCloseAudioSink();
void onChangeAudioFormat(const sp<AMessage> &meta, const sp<AMessage> ¬ify);
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 37e1546..72645ab 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -549,6 +549,7 @@
mTimePerFrameUs(-1ll),
mTimePerCaptureUs(-1ll),
mCreateInputBuffersSuspended(false),
+ mLatency(0),
mTunneled(false),
mDescribeColorAspectsIndex((OMX_INDEXTYPE)0),
mDescribeHDRStaticInfoIndex((OMX_INDEXTYPE)0),
@@ -1211,7 +1212,7 @@
break;
}
- sp<GraphicBuffer> graphicBuffer(new GraphicBuffer(buf, false));
+ sp<GraphicBuffer> graphicBuffer(GraphicBuffer::from(buf));
BufferInfo info;
info.mStatus = BufferInfo::OWNED_BY_US;
info.mFenceFd = fenceFd;
@@ -1515,7 +1516,7 @@
CHECK(storingMetadataInDecodedBuffers());
// discard buffer in LRU info and replace with new buffer
- oldest->mGraphicBuffer = new GraphicBuffer(buf, false);
+ oldest->mGraphicBuffer = GraphicBuffer::from(buf);
oldest->mNewGraphicBuffer = true;
oldest->mStatus = BufferInfo::OWNED_BY_US;
oldest->setWriteFence(fenceFd, "dequeueBufferFromNativeWindow for oldest");
@@ -2281,6 +2282,30 @@
return err;
}
+status_t ACodec::setLatency(uint32_t latency) {
+ OMX_PARAM_U32TYPE config;
+ InitOMXParams(&config);
+ config.nPortIndex = kPortIndexInput;
+ config.nU32 = (OMX_U32)latency;
+ status_t err = mOMXNode->setConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigLatency,
+ &config, sizeof(config));
+ return err;
+}
+
+status_t ACodec::getLatency(uint32_t *latency) {
+ OMX_PARAM_U32TYPE config;
+ InitOMXParams(&config);
+ config.nPortIndex = kPortIndexInput;
+ status_t err = mOMXNode->getConfig(
+ (OMX_INDEXTYPE)OMX_IndexConfigLatency,
+ &config, sizeof(config));
+ if (err == OK) {
+ *latency = config.nU32;
+ }
+ return err;
+}
+
status_t ACodec::setPriority(int32_t priority) {
if (priority < 0) {
return BAD_VALUE;
@@ -3798,6 +3823,8 @@
}
}
+ configureEncoderLatency(msg);
+
switch (compressionFormat) {
case OMX_VIDEO_CodingMPEG4:
err = setupMPEG4EncoderParameters(msg);
@@ -4257,7 +4284,7 @@
h264type.nSliceHeaderSpacing = 0;
h264type.bUseHadamard = OMX_TRUE;
h264type.nRefFrames = 2;
- h264type.nBFrames = 1;
+ h264type.nBFrames = mLatency == 0 ? 1 : std::min(1U, mLatency - 1);
h264type.nPFrames = setPFramesSpacing(iFrameInterval, frameRate, h264type.nBFrames);
h264type.nAllowedPictureTypes =
OMX_VIDEO_PictureTypeI | OMX_VIDEO_PictureTypeP | OMX_VIDEO_PictureTypeB;
@@ -4528,6 +4555,29 @@
OMX_IndexParamVideoBitrate, &bitrateType, sizeof(bitrateType));
}
+void ACodec::configureEncoderLatency(const sp<AMessage> &msg) {
+ if (!mIsEncoder || !mIsVideo) {
+ return;
+ }
+
+ int32_t latency = 0, bitrateMode;
+ if (msg->findInt32("latency", &latency) && latency > 0) {
+ status_t err = setLatency(latency);
+ if (err != OK) {
+ ALOGW("[%s] failed setLatency. Failure is fine since this key is optional",
+ mComponentName.c_str());
+ err = OK;
+ } else {
+ mLatency = latency;
+ }
+ } else if ((!msg->findInt32("bitrate-mode", &bitrateMode) &&
+ bitrateMode == OMX_Video_ControlRateConstant)) {
+ // default the latency to be 1 if latency key is not specified or unsupported and bitrateMode
+ // is CBR.
+ mLatency = 1;
+ }
+}
+
status_t ACodec::setupErrorCorrectionParameters() {
OMX_VIDEO_PARAM_ERRORCORRECTIONTYPE errorCorrectionType;
InitOMXParams(&errorCorrectionType);
@@ -4786,6 +4836,10 @@
if (mConfigFormat->contains("hdr-static-info")) {
(void)getHDRStaticInfoForVideoCodec(kPortIndexInput, notify);
}
+ uint32_t latency = 0;
+ if (mIsEncoder && getLatency(&latency) == OK && latency > 0) {
+ notify->setInt32("latency", latency);
+ }
}
break;
@@ -5728,8 +5782,7 @@
case IOMX::kPortModeDynamicANWBuffer:
if (info->mCodecData->size() >= sizeof(VideoNativeMetadata)) {
VideoNativeMetadata *vnmd = (VideoNativeMetadata*)info->mCodecData->base();
- sp<GraphicBuffer> graphicBuffer = new GraphicBuffer(
- vnmd->pBuffer, false /* keepOwnership */);
+ sp<GraphicBuffer> graphicBuffer = GraphicBuffer::from(vnmd->pBuffer);
err2 = mCodec->mOMXNode->emptyBuffer(
bufferID, graphicBuffer, flags, timeUs, info->mFenceFd);
}
@@ -7183,6 +7236,16 @@
}
}
+ int32_t latency = 0;
+ if (params->findInt32("latency", &latency) && latency > 0) {
+ status_t err = setLatency(latency);
+ if (err != OK) {
+ ALOGI("[%s] failed setLatency. Failure is fine since this key is optional",
+ mComponentName.c_str());
+ err = OK;
+ }
+ }
+
status_t err = configureTemporalLayers(params, false /* inConfigure */, mOutputFormat);
if (err != OK) {
err = OK; // ignore failure
diff --git a/media/libstagefright/ACodecBufferChannel.cpp b/media/libstagefright/ACodecBufferChannel.cpp
index 40ac986..0d9696f 100644
--- a/media/libstagefright/ACodecBufferChannel.cpp
+++ b/media/libstagefright/ACodecBufferChannel.cpp
@@ -300,8 +300,10 @@
});
size_t destinationBufferSize = maxSize;
size_t heapSize = totalSize + destinationBufferSize;
- mDealer = makeMemoryDealer(heapSize);
- mDecryptDestination = mDealer->allocate(destinationBufferSize);
+ if (heapSize > 0) {
+ mDealer = makeMemoryDealer(heapSize);
+ mDecryptDestination = mDealer->allocate(destinationBufferSize);
+ }
}
std::vector<const BufferInfo> inputBuffers;
for (const BufferAndId &elem : array) {
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 18cfc0e..bdc37a5 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -86,6 +86,7 @@
liblog \
libmedia \
libaudioclient \
+ libmediametrics \
libmediautils \
libnetd_client \
libsonivox \
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 0fe44eb..a569f5d 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -58,6 +58,10 @@
virtual void postRecordingFrameHandleTimestamp(nsecs_t timestamp, native_handle_t* handle);
+ virtual void postRecordingFrameHandleTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles);
+
protected:
virtual ~CameraSourceListener();
@@ -110,6 +114,20 @@
}
}
+void CameraSourceListener::postRecordingFrameHandleTimestampBatch(
+ const std::vector<nsecs_t>& timestamps,
+ const std::vector<native_handle_t*>& handles) {
+ sp<CameraSource> source = mSource.promote();
+ if (source.get() != nullptr) {
+ int n = timestamps.size();
+ std::vector<nsecs_t> modifiedTimestamps(n);
+ for (int i = 0; i < n; i++) {
+ modifiedTimestamps[i] = timestamps[i] / 1000;
+ }
+ source->recordingFrameHandleCallbackTimestampBatch(modifiedTimestamps, handles);
+ }
+}
+
static int32_t getColorFormat(const char* colorFormat) {
if (!colorFormat) {
ALOGE("Invalid color format");
@@ -952,10 +970,35 @@
}
if (handle != nullptr) {
- // Frame contains a VideoNativeHandleMetadata. Send the handle back to camera.
- releaseRecordingFrameHandle(handle);
- mMemoryBases.push_back(frame);
- mMemoryBaseAvailableCond.signal();
+ uint32_t batchSize = 0;
+ {
+ Mutex::Autolock autoLock(mBatchLock);
+ if (mInflightBatchSizes.size() > 0) {
+ batchSize = mInflightBatchSizes[0];
+ }
+ }
+ if (batchSize == 0) { // return buffers one by one
+ // Frame contains a VideoNativeHandleMetadata. Send the handle back to camera.
+ releaseRecordingFrameHandle(handle);
+ mMemoryBases.push_back(frame);
+ mMemoryBaseAvailableCond.signal();
+ } else { // Group buffers in batch then return
+ Mutex::Autolock autoLock(mBatchLock);
+ mInflightReturnedHandles.push_back(handle);
+ mInflightReturnedMemorys.push_back(frame);
+ if (mInflightReturnedHandles.size() == batchSize) {
+ releaseRecordingFrameHandleBatch(mInflightReturnedHandles);
+
+ mInflightBatchSizes.pop_front();
+ mInflightReturnedHandles.clear();
+ for (const auto& mem : mInflightReturnedMemorys) {
+ mMemoryBases.push_back(mem);
+ mMemoryBaseAvailableCond.signal();
+ }
+ mInflightReturnedMemorys.clear();
+ }
+ }
+
} else if (mCameraRecordingProxy != nullptr) {
// mCamera is created by application. Return the frame back to camera via camera
// recording proxy.
@@ -1126,6 +1169,21 @@
}
}
+void CameraSource::releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+ if (mCameraRecordingProxy != nullptr) {
+ mCameraRecordingProxy->releaseRecordingFrameHandleBatch(handles);
+ } else if (mCamera != nullptr) {
+ int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ mCamera->releaseRecordingFrameHandleBatch(handles);
+ IPCThreadState::self()->restoreCallingIdentity(token);
+ } else {
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ }
+}
+
void CameraSource::recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
native_handle_t* handle) {
ALOGV("%s: timestamp %lld us", __FUNCTION__, (long long)timestampUs);
@@ -1163,6 +1221,62 @@
mFrameAvailableCondition.signal();
}
+void CameraSource::recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles) {
+ size_t n = timestampsUs.size();
+ if (n != handles.size()) {
+ ALOGE("%s: timestampsUs(%zu) and handles(%zu) size mismatch!",
+ __FUNCTION__, timestampsUs.size(), handles.size());
+ }
+
+ Mutex::Autolock autoLock(mLock);
+ int batchSize = 0;
+ for (size_t i = 0; i < n; i++) {
+ int64_t timestampUs = timestampsUs[i];
+ native_handle_t* handle = handles[i];
+
+ ALOGV("%s: timestamp %lld us", __FUNCTION__, (long long)timestampUs);
+ if (handle == nullptr) continue;
+
+ if (shouldSkipFrameLocked(timestampUs)) {
+ releaseRecordingFrameHandle(handle);
+ continue;
+ }
+
+ while (mMemoryBases.empty()) {
+ if (mMemoryBaseAvailableCond.waitRelative(mLock, kMemoryBaseAvailableTimeoutNs) ==
+ TIMED_OUT) {
+ ALOGW("Waiting on an available memory base timed out. Dropping a recording frame.");
+ releaseRecordingFrameHandle(handle);
+ continue;
+ }
+ }
+ ++batchSize;
+ ++mNumFramesReceived;
+ sp<IMemory> data = *mMemoryBases.begin();
+ mMemoryBases.erase(mMemoryBases.begin());
+
+ // Wrap native handle in sp<IMemory> so it can be pushed to mFramesReceived.
+ VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(data->pointer());
+ metadata->eType = kMetadataBufferTypeNativeHandleSource;
+ metadata->pHandle = handle;
+
+ mFramesReceived.push_back(data);
+ int64_t timeUs = mStartTimeUs + (timestampUs - mFirstFrameTimeUs);
+ mFrameTimes.push_back(timeUs);
+ ALOGV("initial delay: %" PRId64 ", current time stamp: %" PRId64, mStartTimeUs, timeUs);
+
+ }
+ if (batchSize > 0) {
+ Mutex::Autolock autoLock(mBatchLock);
+ mInflightBatchSizes.push_back(batchSize);
+ }
+ for (int i = 0; i < batchSize; i++) {
+ mFrameAvailableCondition.signal();
+ }
+}
+
CameraSource::BufferQueueListener::BufferQueueListener(const sp<BufferItemConsumer>& consumer,
const sp<CameraSource>& cameraSource) {
mConsumer = consumer;
@@ -1279,6 +1393,17 @@
mSource->recordingFrameHandleCallbackTimestamp(timestamp / 1000, handle);
}
+void CameraSource::ProxyListener::recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles) {
+ int n = timestampsUs.size();
+ std::vector<nsecs_t> modifiedTimestamps(n);
+ for (int i = 0; i < n; i++) {
+ modifiedTimestamps[i] = timestampsUs[i] / 1000;
+ }
+ mSource->recordingFrameHandleCallbackTimestampBatch(modifiedTimestamps, handles);
+}
+
void CameraSource::DeathNotifier::binderDied(const wp<IBinder>& who __unused) {
ALOGI("Camera recording proxy died");
}
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 390c556..970526a 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -315,6 +315,17 @@
CameraSource::recordingFrameHandleCallbackTimestamp(timestampUs, handle);
}
+void CameraSourceTimeLapse::recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles) {
+ ALOGV("recordingFrameHandleCallbackTimestampBatch");
+ int n = timestampsUs.size();
+ for (int i = 0; i < n; i++) {
+ // Don't do batching for CameraSourceTimeLapse for now
+ recordingFrameHandleCallbackTimestamp(timestampsUs[i], handles[i]);
+ }
+}
+
void CameraSourceTimeLapse::processBufferQueueFrame(BufferItem& buffer) {
ALOGV("processBufferQueueFrame");
int64_t timestampUs = buffer.mTimestamp / 1000;
diff --git a/media/libstagefright/MP3Extractor.cpp b/media/libstagefright/MP3Extractor.cpp
index 82e7a26..22df522 100644
--- a/media/libstagefright/MP3Extractor.cpp
+++ b/media/libstagefright/MP3Extractor.cpp
@@ -538,7 +538,7 @@
buffer->release();
buffer = NULL;
- return ERROR_END_OF_STREAM;
+ return (n < 0 ? n : ERROR_END_OF_STREAM);
}
uint32_t header = U32_AT((const uint8_t *)buffer->data());
@@ -582,7 +582,7 @@
buffer->release();
buffer = NULL;
- return ERROR_END_OF_STREAM;
+ return (n < 0 ? n : ERROR_END_OF_STREAM);
}
buffer->set_range(0, frame_size);
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index a017737..f2a4d06 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -3914,7 +3914,8 @@
while (true) {
if (mDataSource->readAt(*offset, hdr, 8) < 8) {
- return ERROR_END_OF_STREAM;
+ // no more box to the end of file.
+ break;
}
chunk_size = ntohl(hdr[0]);
chunk_type = ntohl(hdr[1]);
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 059a730..bb20850 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -940,6 +940,10 @@
CHECK(msg->findInt32("err", &err));
ALOGE("Encoder (%s) reported error : 0x%x",
mIsVideo ? "video" : "audio", err);
+ if (!(mFlags & FLAG_USE_SURFACE_INPUT)) {
+ mStopping = true;
+ mPuller->stop();
+ }
signalEOS();
}
break;
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 1706221..02d275b 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -38,9 +38,7 @@
}
status_t OMXClient::connect(bool* trebleFlag) {
- int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
- if ((trebleOmx == 1) || ((trebleOmx == -1) &&
- property_get_bool("persist.hal.binderization", 0))) {
+ if (property_get_bool("persist.media.treble_omx", true)) {
if (trebleFlag != nullptr) {
*trebleFlag = true;
}
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index 82e959e..b6b315d 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -176,7 +176,7 @@
break;
}
- sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+ sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
// Fill the buffer with the a 1x1 checkerboard pattern ;)
uint32_t *img = NULL;
diff --git a/media/libstagefright/include/ACodec.h b/media/libstagefright/include/ACodec.h
index c57005d..6c1a5c6 100644
--- a/media/libstagefright/include/ACodec.h
+++ b/media/libstagefright/include/ACodec.h
@@ -296,6 +296,7 @@
int64_t mTimePerFrameUs;
int64_t mTimePerCaptureUs;
bool mCreateInputBuffersSuspended;
+ uint32_t mLatency;
bool mTunneled;
@@ -483,6 +484,8 @@
AudioEncoding encoding = kAudioEncodingPcm16bit);
status_t setPriority(int32_t priority);
+ status_t setLatency(uint32_t latency);
+ status_t getLatency(uint32_t *latency);
status_t setOperatingRate(float rateFloat, bool isVideo);
status_t getIntraRefreshPeriod(uint32_t *intraRefreshPeriod);
status_t setIntraRefreshPeriod(uint32_t intraRefreshPeriod, bool inConfigure);
@@ -505,6 +508,7 @@
status_t configureBitrate(
int32_t bitrate, OMX_VIDEO_CONTROLRATETYPE bitrateMode);
+ void configureEncoderLatency(const sp<AMessage> &msg);
status_t setupErrorCorrectionParameters();
diff --git a/media/libstagefright/include/CameraSource.h b/media/libstagefright/include/CameraSource.h
index c604f2d..aa56d27 100644
--- a/media/libstagefright/include/CameraSource.h
+++ b/media/libstagefright/include/CameraSource.h
@@ -18,6 +18,7 @@
#define CAMERA_SOURCE_H_
+#include <deque>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaSource.h>
#include <camera/android/hardware/ICamera.h>
@@ -141,6 +142,9 @@
const sp<IMemory> &data);
virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
native_handle_t* handle);
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles);
private:
sp<CameraSource> mSource;
@@ -213,6 +217,8 @@
virtual status_t startCameraRecording();
virtual void releaseRecordingFrame(const sp<IMemory>& frame);
virtual void releaseRecordingFrameHandle(native_handle_t* handle);
+ // stagefright recorder not using this for now
+ virtual void releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles);
// Returns true if need to skip the current frame.
// Called from dataCallbackTimestamp.
@@ -227,6 +233,10 @@
virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
native_handle_t* handle);
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles);
+
// Process a buffer item received in BufferQueueListener.
virtual void processBufferQueueFrame(BufferItem& buffer);
@@ -271,6 +281,13 @@
KeyedVector<ANativeWindowBuffer*, BufferItem> mReceivedBufferItemMap;
sp<BufferQueueListener> mBufferQueueListener;
+ Mutex mBatchLock; // protecting access to mInflightXXXXX members below
+ // Start of members protected by mBatchLock
+ std::deque<uint32_t> mInflightBatchSizes;
+ std::vector<native_handle_t*> mInflightReturnedHandles;
+ std::vector<const sp<IMemory>> mInflightReturnedMemorys;
+ // End of members protected by mBatchLock
+
void releaseQueuedFrames();
void releaseOneRecordingFrame(const sp<IMemory>& frame);
void createVideoBufferMemoryHeap(size_t size, uint32_t bufferCount);
diff --git a/media/libstagefright/include/CameraSourceTimeLapse.h b/media/libstagefright/include/CameraSourceTimeLapse.h
index 871c1d9..b066f9a 100644
--- a/media/libstagefright/include/CameraSourceTimeLapse.h
+++ b/media/libstagefright/include/CameraSourceTimeLapse.h
@@ -147,12 +147,23 @@
// In the video camera case calls skipFrameAndModifyTimeStamp() to modify
// timestamp and set mSkipCurrentFrame.
- // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp()
+ // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp() or
+ // CameraSource::recordingFrameHandleCallbackTimestampBatch()
// This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode when
// the metadata is VideoNativeHandleMetadata.
virtual void recordingFrameHandleCallbackTimestamp(int64_t timestampUs,
native_handle_t* handle);
+ // In the video camera case calls skipFrameAndModifyTimeStamp() to modify
+ // timestamp and set mSkipCurrentFrame.
+ // Then it calls the base CameraSource::recordingFrameHandleCallbackTimestamp() or
+ // CameraSource::recordingFrameHandleCallbackTimestampBatch()
+ // This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode when
+ // the metadata is VideoNativeHandleMetadata.
+ virtual void recordingFrameHandleCallbackTimestampBatch(
+ const std::vector<int64_t>& timestampsUs,
+ const std::vector<native_handle_t*>& handles);
+
// Process a buffer item received in CameraSource::BufferQueueListener.
// This will be called in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
virtual void processBufferQueueFrame(BufferItem& buffer);
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index b933002..6bac1db 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -577,7 +577,7 @@
// fill in CryptoInfo fields for AnotherPacketSource::read()
// MediaCas doesn't use cryptoMode, but set to non-zero value here.
scrambledAccessUnit->meta()->setInt32(
- "cryptoMode", CryptoPlugin::kMode_AES_CBC);
+ "cryptoMode", CryptoPlugin::kMode_AES_CTR);
scrambledAccessUnit->meta()->setInt32("cryptoKey", keyId);
scrambledAccessUnit->meta()->setBuffer("clearBytes", clearSizes);
scrambledAccessUnit->meta()->setBuffer("encBytes", encSizes);
diff --git a/media/libstagefright/omx/1.0/Omx.cpp b/media/libstagefright/omx/1.0/Omx.cpp
index 134c661..e5b89da 100644
--- a/media/libstagefright/omx/1.0/Omx.cpp
+++ b/media/libstagefright/omx/1.0/Omx.cpp
@@ -95,11 +95,11 @@
}
sp<OMXNodeInstance> instance = new OMXNodeInstance(
- this, new LWOmxObserver(observer), name);
+ this, new LWOmxObserver(observer), name.c_str());
OMX_COMPONENTTYPE *handle;
OMX_ERRORTYPE err = mMaster->makeComponentInstance(
- name, &OMXNodeInstance::kCallbacks,
+ name.c_str(), &OMXNodeInstance::kCallbacks,
instance.get(), &handle);
if (err != OMX_ErrorNone) {
diff --git a/media/libstagefright/omx/1.0/WOmxNode.cpp b/media/libstagefright/omx/1.0/WOmxNode.cpp
index dc5c8e1..ea9fb35 100644
--- a/media/libstagefright/omx/1.0/WOmxNode.cpp
+++ b/media/libstagefright/omx/1.0/WOmxNode.cpp
@@ -414,7 +414,7 @@
getExtensionIndex_cb _hidl_cb) {
OMX_INDEXTYPE index;
Status status = toStatus(mBase->getExtensionIndex(
- parameterName, &index));
+ parameterName.c_str(), &index));
_hidl_cb(status, toRawIndexType(index));
return Void();
}
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index dae1ee9..afbde6a 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -384,6 +384,7 @@
// to be handled and [pause, 1us], [resume 2us] will be discarded.
bool dropped = false;
bool done = false;
+ bool seeStopAction = false;
if (!mActionQueue.empty()) {
// First scan to check if bufferTimestamp is smaller than first action's timestamp.
ActionItem nextAction = *(mActionQueue.begin());
@@ -431,7 +432,7 @@
dropped = true;
// Clear the whole ActionQueue as recording is done
mActionQueue.clear();
- submitEndOfInputStream_l();
+ seeStopAction = true;
break;
}
default:
@@ -443,6 +444,14 @@
if (dropped) {
releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
+ if (seeStopAction) {
+ // Clear all the buffers before setting mEndOfStream and signal EndOfInputStream.
+ if (!releaseAllBuffers()) {
+ ALOGW("Failed to release all the buffers when handling STOP action");
+ }
+ mEndOfStream = true;
+ submitEndOfInputStream_l();
+ }
return true;
}
@@ -922,18 +931,8 @@
if (suspend) {
mSuspended = true;
- while (mNumFramesAvailable > 0) {
- BufferItem item;
- status_t err = acquireBuffer(&item);
-
- if (err != OK) {
- ALOGE("setSuspend: acquireBuffer returned err=%d", err);
- break;
- }
-
- --mNumFramesAvailable;
-
- releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
+ if (!releaseAllBuffers()) {
+ ALOGW("Failed to release all the buffers during suspend");
}
return OK;
} else {
@@ -954,6 +953,23 @@
return OK;
}
+bool GraphicBufferSource::releaseAllBuffers() {
+ while (mNumFramesAvailable > 0) {
+ BufferItem item;
+ status_t err = acquireBuffer(&item);
+
+ if (err != OK) {
+ ALOGE("releaseAllBuffers: acquireBuffer fail returned err=%d", err);
+ return false;;
+ }
+
+ --mNumFramesAvailable;
+
+ releaseBuffer(item.mSlot, item.mFrameNumber, item.mFence);
+ }
+ return true;
+}
+
status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(int64_t repeatAfterUs) {
ALOGV("setRepeatPreviousFrameDelayUs: delayUs=%lld", (long long)repeatAfterUs);
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 371c5ed..ab52ce2 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -220,6 +220,8 @@
// Acquire buffer from the consumer
status_t acquireBuffer(BufferItem *bi);
+ bool releaseAllBuffers();
+
// Release buffer to the consumer
void releaseBuffer(int id, uint64_t frameNum, const sp<Fence> &fence);
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 39ed759..7132f9b 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -902,6 +902,9 @@
? kMetadataBufferTypeGrallocSource : requestedType;
err = OMX_SetParameter(mHandle, index, ¶ms);
}
+ if (err == OMX_ErrorBadParameter) {
+ err = OMX_ErrorUnsupportedIndex;
+ }
}
// don't log loud error if component does not support metadata mode on the output
@@ -1030,6 +1033,11 @@
}
Mutex::Autolock autoLock(mLock);
+ if (!mSailed) {
+ ALOGE("b/35467458");
+ android_errorWriteLog(0x534e4554, "35467458");
+ return BAD_VALUE;
+ }
switch (omxBuffer.mBufferType) {
case OMXBuffer::kBufferTypePreset:
@@ -1467,6 +1475,11 @@
Mutex::Autolock autoLock(mLock);
+ if (!mSailed) {
+ ALOGE("b/35467458");
+ android_errorWriteLog(0x534e4554, "35467458");
+ return BAD_VALUE;
+ }
BufferMeta *buffer_meta = new BufferMeta(portIndex);
OMX_BUFFERHEADERTYPE *header;
diff --git a/media/libstagefright/omx/tests/OMXHarness.cpp b/media/libstagefright/omx/tests/OMXHarness.cpp
index cbca461..fcc44d8 100644
--- a/media/libstagefright/omx/tests/OMXHarness.cpp
+++ b/media/libstagefright/omx/tests/OMXHarness.cpp
@@ -78,9 +78,7 @@
}
status_t Harness::initOMX() {
- int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
- if ((trebleOmx == 1) || ((trebleOmx == -1) &&
- property_get_bool("persist.hal.binderization", 0))) {
+ if (property_get_bool("persist.media.treble_omx", true)) {
using namespace ::android::hardware::media::omx::V1_0;
sp<IOmx> tOmx = IOmx::getService();
if (tOmx == nullptr) {
diff --git a/media/libstagefright/tests/SurfaceMediaSource_test.cpp b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
index ea58343..7c464ff 100644
--- a/media/libstagefright/tests/SurfaceMediaSource_test.cpp
+++ b/media/libstagefright/tests/SurfaceMediaSource_test.cpp
@@ -510,7 +510,7 @@
// Fill the buffer with the a checkerboard pattern
uint8_t* img = NULL;
- sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+ sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
SurfaceMediaSourceTest::fillYV12Buffer(img, width, height, buf->getStride());
buf->unlock();
@@ -527,7 +527,7 @@
ASSERT_TRUE(anb != NULL);
// We do not fill the buffer in. Just queue it back.
- sp<GraphicBuffer> buf(new GraphicBuffer(anb, false));
+ sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
ASSERT_EQ(NO_ERROR, mANW->queueBuffer(mANW.get(), buf->getNativeBuffer(),
-1));
}
diff --git a/media/mtp/AsyncIO.cpp b/media/mtp/AsyncIO.cpp
index e77ad38..bfb07dc 100644
--- a/media/mtp/AsyncIO.cpp
+++ b/media/mtp/AsyncIO.cpp
@@ -96,6 +96,10 @@
} // end anonymous namespace
+aiocb::~aiocb() {
+ CHECK(!thread.joinable());
+}
+
void aio_pool_init(void(f)(int)) {
CHECK(done == 1);
done = 0;
diff --git a/media/mtp/AsyncIO.h b/media/mtp/AsyncIO.h
index f7515a2..ed80828 100644
--- a/media/mtp/AsyncIO.h
+++ b/media/mtp/AsyncIO.h
@@ -48,6 +48,8 @@
std::thread thread;
ssize_t ret;
int error;
+
+ ~aiocb();
};
// Submit a request for IO to be completed
@@ -58,9 +60,13 @@
// Suspend current thread until given IO is complete, at which point
// its return value and any errors can be accessed
+// All submitted requests must have a corresponding suspend.
+// aiocb->aio_buf must refer to valid memory until after the suspend call
int aio_suspend(struct aiocb *[], int, const struct timespec *);
int aio_error(const struct aiocb *);
ssize_t aio_return(struct aiocb *);
+
+// (Currently unimplemented)
int aio_cancel(int, struct aiocb *);
// Initialize a threadpool to perform IO. Only one pool can be
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index 35dd10f..565a2fe 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -490,7 +490,11 @@
int MtpFfsHandle::configure(bool usePtp) {
// Wait till previous server invocation has closed
- std::lock_guard<std::mutex> lk(mLock);
+ if (!mLock.try_lock_for(std::chrono::milliseconds(1000))) {
+ LOG(ERROR) << "MtpServer was unable to get configure lock";
+ return -1;
+ }
+ int ret = 0;
// If ptp is changed, the configuration must be rewritten
if (mPtp != usePtp) {
@@ -500,10 +504,10 @@
mPtp = usePtp;
if (!initFunctionfs()) {
- return -1;
+ ret = -1;
}
-
- return 0;
+ mLock.unlock();
+ return ret;
}
void MtpFfsHandle::close() {
@@ -537,14 +541,12 @@
if (file_length > 0) {
length = std::min(static_cast<uint32_t>(MAX_FILE_CHUNK_SIZE), file_length);
- // Read data from USB
- if ((ret = readHandle(mBulkOut, data, length)) == -1) {
- return -1;
- }
+ // Read data from USB, handle errors after waiting for write thread.
+ ret = readHandle(mBulkOut, data, length);
if (file_length != MAX_MTP_FILE_SIZE && ret < static_cast<int>(length)) {
+ ret = -1;
errno = EIO;
- return -1;
}
read = true;
}
@@ -565,6 +567,11 @@
write = false;
}
+ // If there was an error reading above
+ if (ret == -1) {
+ return -1;
+ }
+
if (read) {
// Enqueue a new write request
aio.aio_buf = data;
@@ -622,6 +629,7 @@
aio.aio_fildes = mfr.fd;
struct aiocb *aiol[] = {&aio};
int ret, length;
+ int error = 0;
bool read = false;
bool write = false;
@@ -665,6 +673,10 @@
write = true;
}
+ if (error == -1) {
+ return -1;
+ }
+
if (file_length > 0) {
length = std::min(static_cast<uint64_t>(MAX_FILE_CHUNK_SIZE), file_length);
// Queue up another read
@@ -676,8 +688,9 @@
}
if (write) {
- if (writeHandle(mBulkIn, data2, ret) == -1)
- return -1;
+ if (writeHandle(mBulkIn, data2, ret) == -1) {
+ error = -1;
+ }
write = false;
}
}
diff --git a/media/mtp/MtpFfsHandle.h b/media/mtp/MtpFfsHandle.h
index b4d5a97..7491a1b 100644
--- a/media/mtp/MtpFfsHandle.h
+++ b/media/mtp/MtpFfsHandle.h
@@ -36,7 +36,7 @@
bool mPtp;
- std::mutex mLock;
+ std::timed_mutex mLock;
android::base::unique_fd mControl;
// "in" from the host's perspective => sink for mtp server
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index e4e3d8f..824872f 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -17,7 +17,7 @@
// frameworks/av/include.
ndk_library {
- name: "libmediandk.ndk",
+ name: "libmediandk",
symbol_file: "libmediandk.map.txt",
first_version: "21",
unversioned_until: "current",
diff --git a/media/ndk/Android.mk b/media/ndk/Android.mk
index 0984ca4..2c070af 100644
--- a/media/ndk/Android.mk
+++ b/media/ndk/Android.mk
@@ -47,6 +47,9 @@
LOCAL_CFLAGS += -Werror -Wall
+LOCAL_STATIC_LIBRARIES := \
+ libgrallocusage \
+
LOCAL_SHARED_LIBRARIES := \
libbinder \
libmedia \
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index c0aee90..c449611 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -27,6 +27,7 @@
#include <android_media_Utils.h>
#include <android_runtime/android_view_Surface.h>
#include <android_runtime/android_hardware_HardwareBuffer.h>
+#include <grallocusage/GrallocUsageConversion.h>
using namespace android;
@@ -260,7 +261,8 @@
uint64_t consumerUsage;
android_hardware_HardwareBuffer_convertToGrallocUsageBits(
&producerUsage, &consumerUsage, mUsage0, mUsage1);
- mHalUsage = consumerUsage;
+ // Strip out producerUsage here.
+ mHalUsage = android_convertGralloc1To0Usage(0, consumerUsage);
sp<IGraphicBufferProducer> gbProducer;
sp<IGraphicBufferConsumer> gbConsumer;
@@ -411,11 +413,9 @@
}
// Check if the producer buffer configurations match what ImageReader configured.
- if ((bufferFmt != HAL_PIXEL_FORMAT_BLOB) && (readerFmt != HAL_PIXEL_FORMAT_BLOB) &&
- (readerWidth != bufferWidth || readerHeight != bufferHeight)) {
- ALOGW("%s: Buffer size: %dx%d, doesn't match AImageReader configured size: %dx%d",
- __FUNCTION__, bufferWidth, bufferHeight, readerWidth, readerHeight);
- }
+ ALOGV_IF(readerWidth != bufferWidth || readerHeight != bufferHeight,
+ "%s: Buffer size: %dx%d, doesn't match AImageReader configured size: %dx%d",
+ __FUNCTION__, bufferWidth, bufferHeight, readerWidth, readerHeight);
// Check if the buffer usage is a super set of reader's usage bits, aka all usage bits that
// ImageReader requested has been supported from the producer side.
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
new file mode 100644
index 0000000..72917dd
--- /dev/null
+++ b/media/utils/Android.bp
@@ -0,0 +1,41 @@
+// Copyright 2017 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+cc_library_shared {
+ name: "libmediautils",
+
+ srcs: [
+ "BatteryNotifier.cpp",
+ "ISchedulingPolicyService.cpp",
+ "MemoryLeakTrackUtil.cpp",
+ "ProcessInfo.cpp",
+ "SchedulingPolicyService.cpp",
+ ],
+ shared_libs: [
+ "libbinder",
+ "libcutils",
+ "liblog",
+ "libutils",
+ "libmemunreachable",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ ],
+
+ local_include_dirs: ["include"],
+ export_include_dirs: ["include"],
+}
diff --git a/media/utils/Android.mk b/media/utils/Android.mk
deleted file mode 100644
index 21d1b5b..0000000
--- a/media/utils/Android.mk
+++ /dev/null
@@ -1,44 +0,0 @@
-# Copyright 2015 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-LOCAL_PATH := $(call my-dir)
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- BatteryNotifier.cpp \
- ISchedulingPolicyService.cpp \
- MemoryLeakTrackUtil.cpp \
- ProcessInfo.cpp \
- SchedulingPolicyService.cpp
-
-LOCAL_SHARED_LIBRARIES := \
- libbinder \
- libcutils \
- liblog \
- libutils \
- libmemunreachable \
-
-LOCAL_C_INCLUDES := $(LOCAL_PATH)/include
-
-LOCAL_CFLAGS += \
- -Wall \
- -Wextra \
- -Werror \
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
-
-LOCAL_MODULE := libmediautils
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 4b2e643..468b507 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -117,6 +117,22 @@
Mutex gLock;
wp<AudioFlinger> gAudioFlinger;
+// Keep a strong reference to media.log service around forever.
+// The service is within our parent process so it can never die in a way that we could observe.
+// These two variables are const after initialization.
+static sp<IBinder> sMediaLogServiceAsBinder;
+static sp<IMediaLogService> sMediaLogService;
+
+static pthread_once_t sMediaLogOnce = PTHREAD_ONCE_INIT;
+
+static void sMediaLogInit()
+{
+ sMediaLogServiceAsBinder = defaultServiceManager()->getService(String16("media.log"));
+ if (sMediaLogServiceAsBinder != 0) {
+ sMediaLogService = interface_cast<IMediaLogService>(sMediaLogServiceAsBinder);
+ }
+}
+
// ----------------------------------------------------------------------------
std::string formatToString(audio_format_t format) {
@@ -154,6 +170,7 @@
if (doLog) {
mLogMemoryDealer = new MemoryDealer(kLogMemorySize, "LogWriters",
MemoryHeapBase::READ_ONLY);
+ (void) pthread_once(&sMediaLogOnce, sMediaLogInit);
}
// reset battery stats.
@@ -230,15 +247,11 @@
}
// Tell media.log service about any old writers that still need to be unregistered
- if (mLogMemoryDealer != 0) {
- sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
- if (binder != 0) {
- sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
- for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
- sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory());
- mUnregisteredWriters.pop();
- mediaLogService->unregisterWriter(iMemory);
- }
+ if (sMediaLogService != 0) {
+ for (size_t count = mUnregisteredWriters.size(); count > 0; count--) {
+ sp<IMemory> iMemory(mUnregisteredWriters.top()->getIMemory());
+ mUnregisteredWriters.pop();
+ sMediaLogService->unregisterWriter(iMemory);
}
}
}
@@ -519,13 +532,10 @@
// append a copy of media.log here by forwarding fd to it, but don't attempt
// to lookup the service if it's not running, as it will block for a second
- if (mLogMemoryDealer != 0) {
- sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
- if (binder != 0) {
- dprintf(fd, "\nmedia.log:\n");
- Vector<String16> args;
- binder->dump(fd, args);
- }
+ if (sMediaLogServiceAsBinder != 0) {
+ dprintf(fd, "\nmedia.log:\n");
+ Vector<String16> args;
+ sMediaLogServiceAsBinder->dump(fd, args);
}
// check for optional arguments
@@ -570,16 +580,11 @@
sp<NBLog::Writer> AudioFlinger::newWriter_l(size_t size, const char *name)
{
- // If there is no memory allocated for logs, return a dummy writer that does nothing
- if (mLogMemoryDealer == 0) {
+ // If there is no memory allocated for logs, return a dummy writer that does nothing.
+ // Similarly if we can't contact the media.log service, also return a dummy writer.
+ if (mLogMemoryDealer == 0 || sMediaLogService == 0) {
return new NBLog::Writer();
}
- sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
- // Similarly if we can't contact the media.log service, also return a dummy writer
- if (binder == 0) {
- return new NBLog::Writer();
- }
- sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
sp<IMemory> shared = mLogMemoryDealer->allocate(NBLog::Timeline::sharedSize(size));
// If allocation fails, consult the vector of previously unregistered writers
// and garbage-collect one or more them until an allocation succeeds
@@ -590,7 +595,7 @@
// Pick the oldest stale writer to garbage-collect
sp<IMemory> iMemory(mUnregisteredWriters[0]->getIMemory());
mUnregisteredWriters.removeAt(0);
- mediaLogService->unregisterWriter(iMemory);
+ sMediaLogService->unregisterWriter(iMemory);
// Now the media.log remote reference to IMemory is gone. When our last local
// reference to IMemory also drops to zero at end of this block,
// the IMemory destructor will deallocate the region from mLogMemoryDealer.
@@ -609,7 +614,7 @@
NBLog::Shared *sharedRawPtr = (NBLog::Shared *) shared->pointer();
new((void *) sharedRawPtr) NBLog::Shared(); // placement new here, but the corresponding
// explicit destructor not needed since it is POD
- mediaLogService->registerWriter(shared, size, name);
+ sMediaLogService->registerWriter(shared, size, name);
return new NBLog::Writer(shared, size);
}
@@ -1544,6 +1549,10 @@
}
bool AudioFlinger::MediaLogNotifier::threadLoop() {
+ // Should already have been checked, but just in case
+ if (sMediaLogService == 0) {
+ return false;
+ }
// Wait until there are pending requests
{
AutoMutex _l(mMutex);
@@ -1555,11 +1564,7 @@
mPendingRequests = false;
}
// Execute the actual MediaLogService binder call and ignore extra requests for a while
- sp<IBinder> binder = defaultServiceManager()->getService(String16("media.log"));
- if (binder != 0) {
- sp<IMediaLogService> mediaLogService(interface_cast<IMediaLogService>(binder));
- mediaLogService->requestMergeWakeup();
- }
+ sMediaLogService->requestMergeWakeup();
usleep(kPostTriggerSleepPeriod);
return true;
}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 6a75bb0..97a4a85 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -6800,7 +6800,7 @@
bool AudioFlinger::RecordThread::stop(RecordThread::RecordTrack* recordTrack) {
ALOGV("RecordThread::stop");
AutoMutex _l(mLock);
- if (mActiveTracks.indexOf(recordTrack) != 0 || recordTrack->mState == TrackBase::PAUSING) {
+ if (mActiveTracks.indexOf(recordTrack) < 0 || recordTrack->mState == TrackBase::PAUSING) {
return false;
}
// note that threadLoop may still be processing the track at this point [without lock]
@@ -6814,7 +6814,7 @@
// FIXME incorrect usage of wait: no explicit predicate or loop
mStartStopCond.wait(mLock);
// if we have been restarted, recordTrack is in mActiveTracks here
- if (exitPending() || mActiveTracks.indexOf(recordTrack) != 0) {
+ if (exitPending() || mActiveTracks.indexOf(recordTrack) < 0) {
ALOGV("Record stopped OK");
return true;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index dbdcca7..bea9f4f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -91,8 +91,10 @@
AUDIO_CONFIG_BASE_INITIALIZER;
const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
AUDIO_PATCH_HANDLE_NONE;
- mClientInterface->onRecordingConfigurationUpdate(event, mSession, mInputSource,
- &mConfig, &deviceConfig, patchHandle);
+ if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
+ mClientInterface->onRecordingConfigurationUpdate(event, mSession, mInputSource,
+ &mConfig, &deviceConfig, patchHandle);
+ }
}
return mActiveCount;
@@ -126,9 +128,11 @@
AUDIO_CONFIG_BASE_INITIALIZER;
const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
AUDIO_PATCH_HANDLE_NONE;
- mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
- mSession, mInputSource,
- &mConfig, &deviceConfig, patchHandle);
+ if (patchHandle != AUDIO_PATCH_HANDLE_NONE) {
+ mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
+ mSession, mInputSource,
+ &mConfig, &deviceConfig, patchHandle);
+ }
}
}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
index aa2af0f..b43f83b 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/PolicyConfigurableDomains.xml
@@ -3062,7 +3062,7 @@
<CompoundRule Type="All">
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
- <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+ <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
<SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dp"/>
</CompoundRule>
</Configuration>
@@ -3070,7 +3070,7 @@
<CompoundRule Type="All">
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
- <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+ <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
<SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpHeadphones"/>
</CompoundRule>
</Configuration>
@@ -3078,7 +3078,7 @@
<CompoundRule Type="All">
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
- <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="Is" Value="ForceNoBtA2dp"/>
+ <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceNoBtA2dp"/>
<SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="BluetoothA2dpSpeaker"/>
</CompoundRule>
</Configuration>
@@ -6472,7 +6472,7 @@
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCall"/>
<SelectionCriterionRule SelectionCriterion="TelephonyMode" MatchesWhen="IsNot" Value="InCommunication"/>
<SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Excludes" Value="UsbAccessory"/>
- <SelectionCriterionRule SelectionCriterion="ForceUseForCommunication" MatchesWhen="Is" Value="ForceSpeaker"/>
+ <SelectionCriterionRule SelectionCriterion="ForceUseForMedia" MatchesWhen="IsNot" Value="ForceSpeaker"/>
</CompoundRule>
</CompoundRule>
<SelectionCriterionRule SelectionCriterion="AvailableOutputDevices" MatchesWhen="Includes" Value="UsbDevice"/>
@@ -8416,6 +8416,7 @@
<ConfigurableElement Path="/Policy/policy/usages/assistance_navigation_guidance/applicable_strategy/strategy"/>
<ConfigurableElement Path="/Policy/policy/usages/assistance_sonification/applicable_strategy/strategy"/>
<ConfigurableElement Path="/Policy/policy/usages/game/applicable_strategy/strategy"/>
+ <ConfigurableElement Path="/Policy/policy/usages/assistant/applicable_strategy/strategy"/>
</ConfigurableElements>
<Settings>
<Configuration Name="Calibration">
@@ -8461,6 +8462,9 @@
<ConfigurableElement Path="/Policy/policy/usages/game/applicable_strategy/strategy">
<EnumParameter Name="strategy">media</EnumParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/usages/assistant/applicable_strategy/strategy">
+ <EnumParameter Name="strategy">media</EnumParameter>
+ </ConfigurableElement>
</Configuration>
</Settings>
</ConfigurableDomain>
@@ -8738,6 +8742,7 @@
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/loopback"/>
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/ip"/>
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bus"/>
+ <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/stub"/>
</ConfigurableElements>
<Settings>
<Configuration Name="Calibration">
@@ -9428,6 +9433,9 @@
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/bus">
<BitParameter Name="bus">0</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/stub">
+ <BitParameter Name="stub">0</BitParameter>
+ </ConfigurableElement>
</Configuration>
</Settings>
</ConfigurableDomain>
@@ -9758,7 +9766,7 @@
</Configuration>
</Settings>
</ConfigurableDomain>
- <ConfigurableDomain Name="DeviceForInputSource.VoiceRecognitionAndHotword" SequenceAware="false">
+ <ConfigurableDomain Name="DeviceForInputSource.VoiceRecognitionAndUnprocessedAndHotword" SequenceAware="false">
<Configurations>
<Configuration Name="ScoHeadset">
<CompoundRule Type="All">
@@ -9790,6 +9798,10 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/wired_headset"/>
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/usb_device"/>
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic"/>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset"/>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset"/>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device"/>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic"/>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset"/>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/wired_headset"/>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/usb_device"/>
@@ -9809,6 +9821,18 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
<BitParameter Name="builtin_mic">0</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+ <BitParameter Name="bluetooth_sco_headset">1</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+ <BitParameter Name="wired_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+ <BitParameter Name="usb_device">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+ <BitParameter Name="builtin_mic">0</BitParameter>
+ </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
<BitParameter Name="bluetooth_sco_headset">1</BitParameter>
</ConfigurableElement>
@@ -9835,6 +9859,18 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
<BitParameter Name="builtin_mic">0</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+ <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+ <BitParameter Name="wired_headset">1</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+ <BitParameter Name="usb_device">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+ <BitParameter Name="builtin_mic">0</BitParameter>
+ </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
<BitParameter Name="bluetooth_sco_headset">0</BitParameter>
</ConfigurableElement>
@@ -9861,6 +9897,18 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
<BitParameter Name="builtin_mic">0</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+ <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+ <BitParameter Name="wired_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+ <BitParameter Name="usb_device">1</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+ <BitParameter Name="builtin_mic">0</BitParameter>
+ </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
<BitParameter Name="bluetooth_sco_headset">0</BitParameter>
</ConfigurableElement>
@@ -9887,6 +9935,18 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
<BitParameter Name="builtin_mic">1</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+ <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+ <BitParameter Name="wired_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+ <BitParameter Name="usb_device">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+ <BitParameter Name="builtin_mic">1</BitParameter>
+ </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
<BitParameter Name="bluetooth_sco_headset">0</BitParameter>
</ConfigurableElement>
@@ -9913,6 +9973,18 @@
<ConfigurableElement Path="/Policy/policy/input_sources/voice_recognition/applicable_input_device/mask/builtin_mic">
<BitParameter Name="builtin_mic">0</BitParameter>
</ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/bluetooth_sco_headset">
+ <BitParameter Name="bluetooth_sco_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/wired_headset">
+ <BitParameter Name="wired_headset">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/usb_device">
+ <BitParameter Name="usb_device">0</BitParameter>
+ </ConfigurableElement>
+ <ConfigurableElement Path="/Policy/policy/input_sources/unprocessed/applicable_input_device/mask/builtin_mic">
+ <BitParameter Name="builtin_mic">0</BitParameter>
+ </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/input_sources/hotword/applicable_input_device/mask/bluetooth_sco_headset">
<BitParameter Name="bluetooth_sco_headset">0</BitParameter>
</ConfigurableElement>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
index ecd56b0..eb11980 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_accessibility.pfw
@@ -375,7 +375,7 @@
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
AvailableOutputDevices Excludes UsbAccessory
- ForceUseForCommunication Is ForceSpeaker
+ ForceUseForMedia IsNot ForceSpeaker
AvailableOutputDevices Includes UsbDevice
component: /Policy/policy/strategies/accessibility/selected_output_devices/mask
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
index b30aa4c..cee7cd1 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/device_for_strategy_sonification_respectful.pfw
@@ -78,7 +78,7 @@
#
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
- ForceUseForMedia Is ForceNoBtA2dp
+ ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dp
component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
@@ -105,7 +105,7 @@
#
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
- ForceUseForMedia Is ForceNoBtA2dp
+ ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dpHeadphones
component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
@@ -132,7 +132,7 @@
#
TelephonyMode IsNot InCall
TelephonyMode IsNot InCommunication
- ForceUseForMedia Is ForceNoBtA2dp
+ ForceUseForMedia IsNot ForceNoBtA2dp
AvailableOutputDevices Includes BluetoothA2dpSpeaker
component: /Policy/policy/strategies/sonification_respectful/selected_output_devices/mask
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
index 3f5da13..b3115e7 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Settings/strategy_for_usage.pfw
@@ -16,6 +16,7 @@
/Policy/policy/usages/assistance_navigation_guidance/applicable_strategy/strategy = media
/Policy/policy/usages/assistance_sonification/applicable_strategy/strategy = media
/Policy/policy/usages/game/applicable_strategy/strategy = media
+ /Policy/policy/usages/assistant/applicable_strategy/strategy = media
domain: AssistanceAccessibility
conf: Sonification
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
index 71b2b62..ad9c356 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/examples/Structure/PolicySubsystem.xml
@@ -68,7 +68,7 @@
<!--#################### USAGE BEGIN ####################-->
- <ComponentType Name="Usages" Description="associated to audio_stream_type_t definition,
+ <ComponentType Name="Usages" Description="associated to audio_usage_t definition,
identifier mapping must match the value of the enum">
<Component Name="unknown" Type="Usage" Mapping="Amend1:Unknown,Identifier:0"/>
<Component Name="media" Type="Usage" Mapping="Amend1:Media,Identifier:1"/>
@@ -97,6 +97,7 @@
<Component Name="game" Type="Usage" Mapping="Amend1:BluetoothSco,Identifier:14"/>
<Component Name="virtual_source" Type="Usage"
Mapping="Amend1:VirtualSource,Identifier:15"/>
+ <Component Name="assistant" Type="Usage" Mapping="Amend1:Assistant,Identifier:16"/>
</ComponentType>
<!--#################### USAGE END ####################-->
diff --git a/services/camera/libcameraservice/CameraFlashlight.cpp b/services/camera/libcameraservice/CameraFlashlight.cpp
index 0ff9314..4537ae6 100644
--- a/services/camera/libcameraservice/CameraFlashlight.cpp
+++ b/services/camera/libcameraservice/CameraFlashlight.cpp
@@ -944,7 +944,7 @@
}
// need to set __get_memory in set_callbacks().
- device->setCallbacks(NULL, NULL, NULL, NULL);
+ device->setCallbacks(NULL, NULL, NULL, NULL, NULL);
mParameters = device->getParameters();
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 4318a11..39351e7 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -661,6 +661,22 @@
return Status::ok();
}
+Status CameraService::getCameraVendorTagCache(
+ /*out*/ hardware::camera2::params::VendorTagDescriptorCache* cache) {
+ ATRACE_CALL();
+ if (!mInitialized) {
+ ALOGE("%s: Camera HAL couldn't be initialized", __FUNCTION__);
+ return STATUS_ERROR(ERROR_DISCONNECTED,
+ "Camera subsystem not available");
+ }
+ sp<VendorTagDescriptorCache> globalCache =
+ VendorTagDescriptorCache::getGlobalVendorTagCache();
+ if (globalCache != nullptr) {
+ *cache = *(globalCache.get());
+ }
+ return Status::ok();
+}
+
int CameraService::getDeviceVersion(const String8& cameraId, int* facing) {
ATRACE_CALL();
@@ -2859,7 +2875,13 @@
sp<VendorTagDescriptor> desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
if (desc == NULL) {
- dprintf(fd, "No vendor tags.\n");
+ sp<VendorTagDescriptorCache> cache =
+ VendorTagDescriptorCache::getGlobalVendorTagCache();
+ if (cache == NULL) {
+ dprintf(fd, "No vendor tags.\n");
+ } else {
+ cache->dump(fd, /*verbosity*/2, /*indentation*/2);
+ }
} else {
desc->dump(fd, /*verbosity*/2, /*indentation*/2);
}
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index c7acdc9..e49fe62 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -112,6 +112,9 @@
virtual binder::Status getCameraVendorTagDescriptor(
/*out*/
hardware::camera2::params::VendorTagDescriptor* desc);
+ virtual binder::Status getCameraVendorTagCache(
+ /*out*/
+ hardware::camera2::params::VendorTagDescriptorCache* cache);
virtual binder::Status connect(const sp<hardware::ICameraClient>& cameraClient,
int32_t cameraId, const String16& clientPackageName,
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 3aec562..335e999 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -1261,6 +1261,13 @@
ALOGW("%s: Not supported in buffer queue mode.", __FUNCTION__);
}
+void Camera2Client::releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles) {
+ (void)handles;
+ ATRACE_CALL();
+ ALOGW("%s: Not supported in buffer queue mode.", __FUNCTION__);
+}
+
status_t Camera2Client::autoFocus() {
ATRACE_CALL();
Mutex::Autolock icl(mBinderSerializationLock);
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 87c91a0..9738aca 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -72,6 +72,8 @@
virtual bool recordingEnabled();
virtual void releaseRecordingFrame(const sp<IMemory>& mem);
virtual void releaseRecordingFrameHandle(native_handle_t *handle);
+ virtual void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles);
virtual status_t autoFocus();
virtual status_t cancelAutoFocus();
virtual status_t takePicture(int msgType);
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index ffb657e..df8726e 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -98,6 +98,7 @@
mHardware->setCallbacks(notifyCallback,
dataCallback,
dataCallbackTimestamp,
+ handleCallbackTimestampBatch,
(void *)(uintptr_t)mCameraId);
// Enable zoom, error, focus, and metadata messages by default
@@ -533,6 +534,50 @@
mHardware->releaseRecordingFrame(dataPtr);
}
+void CameraClient::releaseRecordingFrameHandleBatch(const std::vector<native_handle_t*>& handles) {
+ size_t n = handles.size();
+ std::vector<sp<IMemory>> frames;
+ frames.reserve(n);
+ bool error = false;
+ for (auto& handle : handles) {
+ sp<IMemory> dataPtr;
+ {
+ Mutex::Autolock l(mAvailableCallbackBuffersLock);
+ if (!mAvailableCallbackBuffers.empty()) {
+ dataPtr = mAvailableCallbackBuffers.back();
+ mAvailableCallbackBuffers.pop_back();
+ }
+ }
+
+ if (dataPtr == nullptr) {
+ ALOGE("%s: %d: No callback buffer available. Dropping frames.", __FUNCTION__,
+ __LINE__);
+ error = true;
+ break;
+ } else if (dataPtr->size() != sizeof(VideoNativeHandleMetadata)) {
+ ALOGE("%s: %d: Callback buffer must be VideoNativeHandleMetadata", __FUNCTION__,
+ __LINE__);
+ error = true;
+ break;
+ }
+
+ VideoNativeHandleMetadata *metadata = (VideoNativeHandleMetadata*)(dataPtr->pointer());
+ metadata->eType = kMetadataBufferTypeNativeHandleSource;
+ metadata->pHandle = handle;
+ frames.push_back(dataPtr);
+ }
+
+ if (error) {
+ for (auto& handle : handles) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ } else {
+ mHardware->releaseRecordingFrameBatch(frames);
+ }
+ return;
+}
+
status_t CameraClient::setVideoBufferMode(int32_t videoBufferMode) {
LOG1("setVideoBufferMode: %d", videoBufferMode);
bool enableMetadataInBuffers = false;
@@ -855,6 +900,49 @@
client->handleGenericDataTimestamp(timestamp, msgType, dataPtr);
}
+void CameraClient::handleCallbackTimestampBatch(
+ int32_t msgType, const std::vector<HandleTimestampMessage>& msgs, void* user) {
+ LOG2("dataCallbackTimestampBatch");
+ sp<CameraClient> client = getClientFromCookie(user);
+ if (client.get() == nullptr) return;
+ if (!client->lockIfMessageWanted(msgType)) return;
+
+ sp<hardware::ICameraClient> c = client->mRemoteCallback;
+ client->mLock.unlock();
+ if (c != 0 && msgs.size() > 0) {
+ size_t n = msgs.size();
+ std::vector<nsecs_t> timestamps;
+ std::vector<native_handle_t*> handles;
+ timestamps.reserve(n);
+ handles.reserve(n);
+ for (auto& msg : msgs) {
+ native_handle_t* handle = nullptr;
+ if (msg.dataPtr->size() != sizeof(VideoNativeHandleMetadata)) {
+ ALOGE("%s: dataPtr does not contain VideoNativeHandleMetadata!", __FUNCTION__);
+ return;
+ }
+ VideoNativeHandleMetadata *metadata =
+ (VideoNativeHandleMetadata*)(msg.dataPtr->pointer());
+ if (metadata->eType == kMetadataBufferTypeNativeHandleSource) {
+ handle = metadata->pHandle;
+ }
+
+ if (handle == nullptr) {
+ ALOGE("%s: VideoNativeHandleMetadata type mismatch or null handle passed!",
+ __FUNCTION__);
+ return;
+ }
+ {
+ Mutex::Autolock l(client->mAvailableCallbackBuffersLock);
+ client->mAvailableCallbackBuffers.push_back(msg.dataPtr);
+ }
+ timestamps.push_back(msg.timestamp);
+ handles.push_back(handle);
+ }
+ c->recordingFrameHandleCallbackTimestampBatch(timestamps, handles);
+ }
+}
+
// snapshot taken callback
void CameraClient::handleShutter(void) {
if (mPlayShutterSound) {
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 91f00e3..1073384 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -50,6 +50,8 @@
virtual bool recordingEnabled();
virtual void releaseRecordingFrame(const sp<IMemory>& mem);
virtual void releaseRecordingFrameHandle(native_handle_t *handle);
+ virtual void releaseRecordingFrameHandleBatch(
+ const std::vector<native_handle_t*>& handles);
virtual status_t autoFocus();
virtual status_t cancelAutoFocus();
virtual status_t takePicture(int msgType);
@@ -109,6 +111,8 @@
static void dataCallback(int32_t msgType, const sp<IMemory>& dataPtr,
camera_frame_metadata_t *metadata, void* user);
static void dataCallbackTimestamp(nsecs_t timestamp, int32_t msgType, const sp<IMemory>& dataPtr, void* user);
+ static void handleCallbackTimestampBatch(
+ int32_t msgType, const std::vector<HandleTimestampMessage>&, void* user);
// handlers for messages
void handleShutter(void);
void handlePreviewData(int32_t msgType, const sp<IMemory>& mem,
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 394eb4c..733a78e 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -367,9 +367,12 @@
entry = result.find(tag);
if (entry.count == 0) {
+ const camera_metadata *metaBuffer = result.getAndLock();
ALOGV("%s: Camera %d: No %s provided by HAL for frame %d in this result!",
__FUNCTION__, cameraId,
- get_camera_metadata_tag_name(tag), frameNumber);
+ get_local_camera_metadata_tag_name(tag, metaBuffer),
+ frameNumber);
+ result.unlock(metaBuffer);
return false;
} else {
switch(sizeof(Src)){
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 83c84af..b2686bf 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -798,16 +798,38 @@
exposureCompensationStep.data.r[0].denominator);
autoExposureLock = false;
- params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK,
- CameraParameters::FALSE);
- params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED,
- CameraParameters::TRUE);
+ autoExposureLockAvailable = false;
+ camera_metadata_ro_entry_t exposureLockAvailable =
+ staticInfo(ANDROID_CONTROL_AE_LOCK_AVAILABLE, 1, 1);
+ if ((0 < exposureLockAvailable.count) &&
+ (ANDROID_CONTROL_AE_LOCK_AVAILABLE_TRUE ==
+ exposureLockAvailable.data.u8[0])) {
+ params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK,
+ CameraParameters::FALSE);
+ params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED,
+ CameraParameters::TRUE);
+ autoExposureLockAvailable = true;
+ } else {
+ params.set(CameraParameters::KEY_AUTO_EXPOSURE_LOCK_SUPPORTED,
+ CameraParameters::FALSE);
+ }
autoWhiteBalanceLock = false;
- params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK,
- CameraParameters::FALSE);
- params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED,
- CameraParameters::TRUE);
+ autoWhiteBalanceLockAvailable = false;
+ camera_metadata_ro_entry_t whitebalanceLockAvailable =
+ staticInfo(ANDROID_CONTROL_AWB_LOCK_AVAILABLE, 1, 1);
+ if ((0 < whitebalanceLockAvailable.count) &&
+ (ANDROID_CONTROL_AWB_LOCK_AVAILABLE_TRUE ==
+ whitebalanceLockAvailable.data.u8[0])) {
+ params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK,
+ CameraParameters::FALSE);
+ params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED,
+ CameraParameters::TRUE);
+ autoWhiteBalanceLockAvailable = true;
+ } else {
+ params.set(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK_SUPPORTED,
+ CameraParameters::FALSE);
+ }
meteringAreas.add(Parameters::Area(0, 0, 0, 0, 0));
params.set(CameraParameters::KEY_MAX_NUM_METERING_AREAS,
@@ -816,30 +838,37 @@
"(0,0,0,0,0)");
zoom = 0;
- params.set(CameraParameters::KEY_ZOOM, zoom);
- params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
-
+ zoomAvailable = false;
camera_metadata_ro_entry_t maxDigitalZoom =
staticInfo(ANDROID_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, /*minCount*/1, /*maxCount*/1);
if (!maxDigitalZoom.count) return NO_INIT;
- {
- String8 zoomRatios;
- float zoom = 1.f;
- float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) /
- (NUM_ZOOM_STEPS-1);
- bool addComma = false;
- for (size_t i=0; i < NUM_ZOOM_STEPS; i++) {
- if (addComma) zoomRatios += ",";
- addComma = true;
- zoomRatios += String8::format("%d", static_cast<int>(zoom * 100));
- zoom += zoomIncrement;
- }
- params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
- }
+ if (fabs(maxDigitalZoom.data.f[0] - 1.f) > 0.00001f) {
+ params.set(CameraParameters::KEY_ZOOM, zoom);
+ params.set(CameraParameters::KEY_MAX_ZOOM, NUM_ZOOM_STEPS - 1);
- params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
- CameraParameters::TRUE);
+ {
+ String8 zoomRatios;
+ float zoom = 1.f;
+ float zoomIncrement = (maxDigitalZoom.data.f[0] - zoom) /
+ (NUM_ZOOM_STEPS-1);
+ bool addComma = false;
+ for (size_t i=0; i < NUM_ZOOM_STEPS; i++) {
+ if (addComma) zoomRatios += ",";
+ addComma = true;
+ zoomRatios += String8::format("%d", static_cast<int>(zoom * 100));
+ zoom += zoomIncrement;
+ }
+ params.set(CameraParameters::KEY_ZOOM_RATIOS, zoomRatios);
+ }
+
+ params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+ CameraParameters::TRUE);
+ zoomAvailable = true;
+ } else {
+ params.set(CameraParameters::KEY_ZOOM_SUPPORTED,
+ CameraParameters::FALSE);
+ }
params.set(CameraParameters::KEY_SMOOTH_ZOOM_SUPPORTED,
CameraParameters::FALSE);
@@ -1198,11 +1227,14 @@
camera_metadata_ro_entry_t Parameters::staticInfo(uint32_t tag,
size_t minCount, size_t maxCount, bool required) const {
camera_metadata_ro_entry_t entry = info->find(tag);
+ const camera_metadata_t *metaBuffer = info->getAndLock();
if (CC_UNLIKELY( entry.count == 0 ) && required) {
- const char* tagSection = get_camera_metadata_section_name(tag);
+ const char* tagSection = get_local_camera_metadata_section_name(tag,
+ metaBuffer);
if (tagSection == NULL) tagSection = "<unknown>";
- const char* tagName = get_camera_metadata_tag_name(tag);
+ const char* tagName = get_local_camera_metadata_tag_name(tag,
+ metaBuffer);
if (tagName == NULL) tagName = "<unknown>";
ALOGE("Error finding static metadata entry '%s.%s' (%x)",
@@ -1210,14 +1242,17 @@
} else if (CC_UNLIKELY(
(minCount != 0 && entry.count < minCount) ||
(maxCount != 0 && entry.count > maxCount) ) ) {
- const char* tagSection = get_camera_metadata_section_name(tag);
+ const char* tagSection = get_local_camera_metadata_section_name(tag,
+ metaBuffer);
if (tagSection == NULL) tagSection = "<unknown>";
- const char* tagName = get_camera_metadata_tag_name(tag);
+ const char* tagName = get_local_camera_metadata_tag_name(tag,
+ metaBuffer);
if (tagName == NULL) tagName = "<unknown>";
ALOGE("Malformed static metadata entry '%s.%s' (%x):"
"Expected between %zu and %zu values, but got %zu values",
tagSection, tagName, tag, minCount, maxCount, entry.count);
}
+ info->unlock(metaBuffer);
return entry;
}
@@ -1830,13 +1865,25 @@
return BAD_VALUE;
}
- // AUTO_EXPOSURE_LOCK (always supported)
- validatedParams.autoExposureLock = boolFromString(
- newParams.get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK));
+ if (autoExposureLockAvailable) {
+ validatedParams.autoExposureLock = boolFromString(
+ newParams.get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK));
+ } else if (nullptr !=
+ newParams.get(CameraParameters::KEY_AUTO_EXPOSURE_LOCK)){
+ ALOGE("%s: Requested auto exposure lock is not supported",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
- // AUTO_WHITEBALANCE_LOCK (always supported)
- validatedParams.autoWhiteBalanceLock = boolFromString(
- newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK));
+ if (autoWhiteBalanceLockAvailable) {
+ validatedParams.autoWhiteBalanceLock = boolFromString(
+ newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK));
+ } else if (nullptr !=
+ newParams.get(CameraParameters::KEY_AUTO_WHITEBALANCE_LOCK)) {
+ ALOGE("%s: Requested auto whitebalance lock is not supported",
+ __FUNCTION__);
+ return BAD_VALUE;
+ }
// METERING_AREAS
size_t maxAeRegions = (size_t)staticInfo(ANDROID_CONTROL_MAX_REGIONS,
@@ -1856,12 +1903,14 @@
}
// ZOOM
- validatedParams.zoom = newParams.getInt(CameraParameters::KEY_ZOOM);
- if (validatedParams.zoom < 0
- || validatedParams.zoom >= (int)NUM_ZOOM_STEPS) {
- ALOGE("%s: Requested zoom level %d is not supported",
- __FUNCTION__, validatedParams.zoom);
- return BAD_VALUE;
+ if (zoomAvailable) {
+ validatedParams.zoom = newParams.getInt(CameraParameters::KEY_ZOOM);
+ if (validatedParams.zoom < 0
+ || validatedParams.zoom >= (int)NUM_ZOOM_STEPS) {
+ ALOGE("%s: Requested zoom level %d is not supported",
+ __FUNCTION__, validatedParams.zoom);
+ return BAD_VALUE;
+ }
}
// VIDEO_SIZE
@@ -1982,10 +2031,12 @@
}
if (res != OK) return res;
- uint8_t reqWbLock = autoWhiteBalanceLock ?
- ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
- res = request->update(ANDROID_CONTROL_AWB_LOCK,
- &reqWbLock, 1);
+ if (autoWhiteBalanceLockAvailable) {
+ uint8_t reqWbLock = autoWhiteBalanceLock ?
+ ANDROID_CONTROL_AWB_LOCK_ON : ANDROID_CONTROL_AWB_LOCK_OFF;
+ res = request->update(ANDROID_CONTROL_AWB_LOCK,
+ &reqWbLock, 1);
+ }
res = request->update(ANDROID_CONTROL_EFFECT_MODE,
&effectMode, 1);
@@ -2043,11 +2094,13 @@
&reqAeMode, 1);
if (res != OK) return res;
- uint8_t reqAeLock = autoExposureLock ?
- ANDROID_CONTROL_AE_LOCK_ON : ANDROID_CONTROL_AE_LOCK_OFF;
- res = request->update(ANDROID_CONTROL_AE_LOCK,
- &reqAeLock, 1);
- if (res != OK) return res;
+ if (autoExposureLockAvailable) {
+ uint8_t reqAeLock = autoExposureLock ?
+ ANDROID_CONTROL_AE_LOCK_ON : ANDROID_CONTROL_AE_LOCK_OFF;
+ res = request->update(ANDROID_CONTROL_AE_LOCK,
+ &reqAeLock, 1);
+ if (res != OK) return res;
+ }
res = request->update(ANDROID_CONTROL_AWB_MODE,
&wbMode, 1);
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index c8ecbba..507de75 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -122,7 +122,9 @@
int32_t exposureCompensation;
bool autoExposureLock;
+ bool autoExposureLockAvailable;
bool autoWhiteBalanceLock;
+ bool autoWhiteBalanceLockAvailable;
// 3A region types, for use with ANDROID_CONTROL_MAX_REGIONS
enum region_t {
@@ -135,6 +137,7 @@
Vector<Area> meteringAreas;
int zoom;
+ bool zoomAvailable;
int videoWidth, videoHeight, videoFormat;
android_dataspace videoDataSpace;
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index f9b062a..4f788ae 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -145,12 +145,6 @@
int32_t format, /*out*/ int32_t *id) = 0;
/**
- * Create an input reprocess stream that uses buffers from an existing
- * output stream.
- */
- virtual status_t createReprocessStreamFromStream(int outputId, int *id) = 0;
-
- /**
* Get information about a given stream.
*/
virtual status_t getStreamInfo(int id,
@@ -169,12 +163,6 @@
virtual status_t deleteStream(int id) = 0;
/**
- * Delete reprocess stream. Must not be called if there are requests in
- * flight which reference that stream.
- */
- virtual status_t deleteReprocessStream(int id) = 0;
-
- /**
* Take the currently-defined set of streams and configure the HAL to use
* them. This is a long-running operation (may be several hundered ms).
*
@@ -289,21 +277,6 @@
virtual status_t triggerPrecaptureMetering(uint32_t id) = 0;
/**
- * Abstract interface for clients that want to listen to reprocess buffer
- * release events
- */
- struct BufferReleasedListener : public virtual RefBase {
- virtual void onBufferReleased(buffer_handle_t *handle) = 0;
- };
-
- /**
- * Push a buffer to be reprocessed into a reprocessing stream, and
- * provide a listener to call once the buffer is returned by the HAL
- */
- virtual status_t pushReprocessBuffer(int reprocessStreamId,
- buffer_handle_t *buffer, wp<BufferReleasedListener> listener) = 0;
-
- /**
* Flush all pending and in-flight requests. Blocks until flush is
* complete.
* Output lastFrameNumber is the last frame number of the previous streaming request.
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index bbeeca6..56ba5b6 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -23,6 +23,8 @@
#include <chrono>
#include <inttypes.h>
#include <hidl/ServiceManagement.h>
+#include <functional>
+#include <camera_metadata_hidden.h>
namespace android {
@@ -221,7 +223,9 @@
}
status_t CameraProviderManager::setUpVendorTags() {
- // TODO (b/34275821): support aggregating vendor tags for more than one provider
+ sp<VendorTagDescriptorCache> tagCache = new VendorTagDescriptorCache();
+
+ VendorTagDescriptorCache::clearGlobalVendorTagCache();
for (auto& provider : mProviders) {
hardware::hidl_vec<VendorTagSection> vts;
Status status;
@@ -242,8 +246,6 @@
return mapToStatusT(status);
}
- VendorTagDescriptor::clearGlobalVendorTagDescriptor();
-
// Read all vendor tag definitions into a descriptor
sp<VendorTagDescriptor> desc;
status_t res;
@@ -255,9 +257,11 @@
return res;
}
- // Set the global descriptor to use with camera metadata
- VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
+ tagCache->addVendorDescriptor(provider->mProviderTagid, desc);
}
+
+ VendorTagDescriptorCache::setAsGlobalVendorTagCache(tagCache);
+
return OK;
}
@@ -350,6 +354,24 @@
return nullptr;
}
+metadata_vendor_id_t CameraProviderManager::getProviderTagIdLocked(
+ const std::string& id, hardware::hidl_version minVersion,
+ hardware::hidl_version maxVersion) const {
+ metadata_vendor_id_t ret = CAMERA_METADATA_INVALID_VENDOR_ID;
+
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
+ for (auto& provider : mProviders) {
+ for (auto& deviceInfo : provider->mDevices) {
+ if (deviceInfo->mId == id &&
+ minVersion <= deviceInfo->mVersion &&
+ maxVersion >= deviceInfo->mVersion) {
+ return provider->mProviderTagid;
+ }
+ }
+ }
+
+ return ret;
+}
status_t CameraProviderManager::addProviderLocked(const std::string& newProvider, bool expected) {
for (const auto& providerInfo : mProviders) {
@@ -430,6 +452,7 @@
CameraProviderManager *manager) :
mProviderName(providerName),
mInterface(interface),
+ mProviderTagid(generateVendorTagId(providerName)),
mManager(manager) {
(void) mManager;
}
@@ -542,10 +565,12 @@
std::unique_ptr<DeviceInfo> deviceInfo;
switch (major) {
case 1:
- deviceInfo = initializeDeviceInfo<DeviceInfo1>(name, id, minor);
+ deviceInfo = initializeDeviceInfo<DeviceInfo1>(name, mProviderTagid,
+ id, minor);
break;
case 3:
- deviceInfo = initializeDeviceInfo<DeviceInfo3>(name, id, minor);
+ deviceInfo = initializeDeviceInfo<DeviceInfo3>(name, mProviderTagid,
+ id, minor);
break;
default:
ALOGE("%s: Device %s: Unknown HIDL device HAL major version %d:", __FUNCTION__,
@@ -691,7 +716,7 @@
template<class DeviceInfoT>
std::unique_ptr<CameraProviderManager::ProviderInfo::DeviceInfo>
CameraProviderManager::ProviderInfo::initializeDeviceInfo(
- const std::string &name,
+ const std::string &name, const metadata_vendor_id_t tagId,
const std::string &id, uint16_t minorVersion) const {
Status status;
@@ -711,7 +736,8 @@
return nullptr;
}
return std::unique_ptr<DeviceInfo>(
- new DeviceInfoT(name, id, minorVersion, resourceCost, cameraInterface));
+ new DeviceInfoT(name, tagId, id, minorVersion, resourceCost,
+ cameraInterface));
}
template<class InterfaceT>
@@ -782,11 +808,12 @@
}
CameraProviderManager::ProviderInfo::DeviceInfo1::DeviceInfo1(const std::string& name,
- const std::string &id,
+ const metadata_vendor_id_t tagId, const std::string &id,
uint16_t minorVersion,
const CameraResourceCost& resourceCost,
sp<InterfaceT> interface) :
- DeviceInfo(name, id, hardware::hidl_version{1, minorVersion}, resourceCost),
+ DeviceInfo(name, tagId, id, hardware::hidl_version{1, minorVersion},
+ resourceCost),
mInterface(interface) {
// Get default parameters and initialize flash unit availability
// Requires powering on the camera device
@@ -869,11 +896,12 @@
}
CameraProviderManager::ProviderInfo::DeviceInfo3::DeviceInfo3(const std::string& name,
- const std::string &id,
+ const metadata_vendor_id_t tagId, const std::string &id,
uint16_t minorVersion,
const CameraResourceCost& resourceCost,
sp<InterfaceT> interface) :
- DeviceInfo(name, id, hardware::hidl_version{3, minorVersion}, resourceCost),
+ DeviceInfo(name, tagId, id, hardware::hidl_version{3, minorVersion},
+ resourceCost),
mInterface(interface) {
// Get camera characteristics and initialize flash unit availability
Status status;
@@ -884,6 +912,7 @@
if (s == Status::OK) {
camera_metadata_t *buffer =
reinterpret_cast<camera_metadata_t*>(metadata.data());
+ set_camera_metadata_vendor_id(buffer, mProviderTagid);
mCameraCharacteristics = buffer;
}
});
@@ -1004,6 +1033,17 @@
return OK;
}
+metadata_vendor_id_t CameraProviderManager::ProviderInfo::generateVendorTagId(
+ const std::string &name) {
+ metadata_vendor_id_t ret = std::hash<std::string> {} (name);
+ // CAMERA_METADATA_INVALID_VENDOR_ID is not a valid hash value
+ if (CAMERA_METADATA_INVALID_VENDOR_ID == ret) {
+ ret = 0;
+ }
+
+ return ret;
+}
+
status_t CameraProviderManager::ProviderInfo::parseDeviceName(const std::string& name,
uint16_t *major, uint16_t *minor, std::string *type, std::string *id) {
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index a388db5..2df4fd5 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -224,6 +224,13 @@
static status_t mapToStatusT(const hardware::camera::common::V1_0::Status& s);
static const char* statusToString(const hardware::camera::common::V1_0::Status& s);
+ /*
+ * Return provider type for a specific device.
+ */
+ metadata_vendor_id_t getProviderTagIdLocked(const std::string& id,
+ hardware::hidl_version minVersion = hardware::hidl_version{0,0},
+ hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
+
private:
// All private members, unless otherwise noted, expect mInterfaceMutex to be locked before use
mutable std::mutex mInterfaceMutex;
@@ -241,6 +248,7 @@
{
const std::string mProviderName;
const sp<hardware::camera::provider::V2_4::ICameraProvider> mInterface;
+ const metadata_vendor_id_t mProviderTagid;
ProviderInfo(const std::string &providerName,
sp<hardware::camera::provider::V2_4::ICameraProvider>& interface,
@@ -274,6 +282,7 @@
const std::string mName; // Full instance name
const std::string mId; // ID section of full name
const hardware::hidl_version mVersion;
+ const metadata_vendor_id_t mProviderTagid;
const hardware::camera::common::V1_0::CameraResourceCost mResourceCost;
@@ -287,10 +296,11 @@
return INVALID_OPERATION;
}
- DeviceInfo(const std::string& name, const std::string &id,
- const hardware::hidl_version& version,
+ DeviceInfo(const std::string& name, const metadata_vendor_id_t tagId,
+ const std::string &id, const hardware::hidl_version& version,
const hardware::camera::common::V1_0::CameraResourceCost& resourceCost) :
- mName(name), mId(id), mVersion(version), mResourceCost(resourceCost),
+ mName(name), mId(id), mVersion(version), mProviderTagid(tagId),
+ mResourceCost(resourceCost),
mStatus(hardware::camera::common::V1_0::CameraDeviceStatus::PRESENT),
mHasFlashUnit(false) {}
virtual ~DeviceInfo();
@@ -312,8 +322,8 @@
virtual status_t setTorchMode(bool enabled) override;
virtual status_t getCameraInfo(hardware::CameraInfo *info) const override;
- DeviceInfo1(const std::string& name, const std::string &id,
- uint16_t minorVersion,
+ DeviceInfo1(const std::string& name, const metadata_vendor_id_t tagId,
+ const std::string &id, uint16_t minorVersion,
const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
sp<InterfaceT> interface);
virtual ~DeviceInfo1();
@@ -331,8 +341,8 @@
virtual status_t getCameraCharacteristics(
CameraMetadata *characteristics) const override;
- DeviceInfo3(const std::string& name, const std::string &id,
- uint16_t minorVersion,
+ DeviceInfo3(const std::string& name, const metadata_vendor_id_t tagId,
+ const std::string &id, uint16_t minorVersion,
const hardware::camera::common::V1_0::CameraResourceCost& resourceCost,
sp<InterfaceT> interface);
virtual ~DeviceInfo3();
@@ -352,7 +362,8 @@
// right CameraProvider getCameraDeviceInterface_* method.
template<class DeviceInfoT>
std::unique_ptr<DeviceInfo> initializeDeviceInfo(const std::string &name,
- const std::string &id, uint16_t minorVersion) const;
+ const metadata_vendor_id_t tagId, const std::string &id,
+ uint16_t minorVersion) const;
// Helper for initializeDeviceInfo to use the right CameraProvider get method.
template<class InterfaceT>
@@ -365,6 +376,9 @@
// Parse device instance name for device version, type, and id.
static status_t parseDeviceName(const std::string& name,
uint16_t *major, uint16_t *minor, std::string *type, std::string *id);
+
+ // Generate vendor tag id
+ static metadata_vendor_id_t generateVendorTagId(const std::string &name);
};
// Utility to find a DeviceInfo by ID; pointer is only valid while mInterfaceMutex is held
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
index b52c0d8..0fe09d9 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.cpp
@@ -193,6 +193,36 @@
return hardware::Void();
}
+hardware::Return<void> CameraHardwareInterface::handleCallbackTimestampBatch(
+ DataCallbackMsg msgType,
+ const hardware::hidl_vec<hardware::camera::device::V1_0::HandleTimestampMessage>& messages) {
+ std::vector<android::HandleTimestampMessage> msgs;
+ msgs.reserve(messages.size());
+
+ for (const auto& hidl_msg : messages) {
+ if (mHidlMemPoolMap.count(hidl_msg.data) == 0) {
+ ALOGE("%s: memory pool ID %d not found", __FUNCTION__, hidl_msg.data);
+ return hardware::Void();
+ }
+ sp<CameraHeapMemory> mem(
+ static_cast<CameraHeapMemory *>(mHidlMemPoolMap.at(hidl_msg.data)->handle));
+
+ if (hidl_msg.bufferIndex >= mem->mNumBufs) {
+ ALOGE("%s: invalid buffer index %d, max allowed is %d", __FUNCTION__,
+ hidl_msg.bufferIndex, mem->mNumBufs);
+ return hardware::Void();
+ }
+ VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*)
+ mem->mBuffers[hidl_msg.bufferIndex]->pointer();
+ md->pHandle = const_cast<native_handle_t*>(hidl_msg.frameData.getNativeHandle());
+
+ msgs.push_back({hidl_msg.timestamp, mem->mBuffers[hidl_msg.bufferIndex]});
+ }
+
+ mDataCbTimestampBatch((int32_t) msgType, msgs, mCbUser);
+ return hardware::Void();
+}
+
std::pair<bool, uint64_t> CameraHardwareInterface::getBufferId(
ANativeWindowBuffer* anb) {
std::lock_guard<std::mutex> lock(mBufferIdMapLock);
@@ -365,7 +395,7 @@
}
hardware::Return<Status>
-CameraHardwareInterface::setUsage(hardware::graphics::allocator::V2_0::ProducerUsage usage) {
+CameraHardwareInterface::setUsage(hardware::graphics::common::V1_0::BufferUsage usage) {
Status s = Status::INTERNAL_ERROR;
ANativeWindow *a = mPreviewWindow.get();
if (a == nullptr) {
@@ -468,11 +498,13 @@
void CameraHardwareInterface::setCallbacks(notify_callback notify_cb,
data_callback data_cb,
data_callback_timestamp data_cb_timestamp,
+ data_callback_timestamp_batch data_cb_timestamp_batch,
void* user)
{
mNotifyCb = notify_cb;
mDataCb = data_cb;
mDataCbTimestamp = data_cb_timestamp;
+ mDataCbTimestampBatch = data_cb_timestamp_batch;
mCbUser = user;
ALOGV("%s(%s)", __FUNCTION__, mName.string());
@@ -628,6 +660,44 @@
}
}
+void CameraHardwareInterface::releaseRecordingFrameBatch(const std::vector<sp<IMemory>>& frames)
+{
+ ALOGV("%s(%s)", __FUNCTION__, mName.string());
+ size_t n = frames.size();
+ std::vector<VideoFrameMessage> msgs;
+ msgs.reserve(n);
+ for (auto& mem : frames) {
+ if (CC_LIKELY(mHidlDevice != nullptr)) {
+ ssize_t offset;
+ size_t size;
+ sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
+ if (size == sizeof(VideoNativeHandleMetadata)) {
+ uint32_t heapId = heap->getHeapID();
+ uint32_t bufferIndex = offset / size;
+ VideoNativeHandleMetadata* md = (VideoNativeHandleMetadata*) mem->pointer();
+ // Caching the handle here because md->pHandle will be subject to HAL's edit
+ native_handle_t* nh = md->pHandle;
+ VideoFrameMessage msg;
+ msgs.push_back({nh, heapId, bufferIndex});
+ } else {
+ ALOGE("%s only supports VideoNativeHandleMetadata mode", __FUNCTION__);
+ return;
+ }
+ } else {
+ ALOGE("Non HIDL mode do not support %s", __FUNCTION__);
+ return;
+ }
+ }
+
+ mHidlDevice->releaseRecordingFrameHandleBatch(msgs);
+
+ for (auto& msg : msgs) {
+ native_handle_t* nh = const_cast<native_handle_t*>(msg.frameData.getNativeHandle());
+ native_handle_close(nh);
+ native_handle_delete(nh);
+ }
+}
+
status_t CameraHardwareInterface::autoFocus()
{
ALOGV("%s(%s)", __FUNCTION__, mName.string());
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 88ab2e9..4bd879f 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -48,6 +48,15 @@
const sp<IMemory> &dataPtr,
void *user);
+struct HandleTimestampMessage {
+ nsecs_t timestamp;
+ const sp<IMemory> dataPtr;
+};
+
+typedef void (*data_callback_timestamp_batch)(
+ int32_t msgType,
+ const std::vector<HandleTimestampMessage>&, void* user);
+
/**
* CameraHardwareInterface.h defines the interface to the
* camera hardware abstraction layer, used for setting and getting
@@ -112,6 +121,7 @@
void setCallbacks(notify_callback notify_cb,
data_callback data_cb,
data_callback_timestamp data_cb_timestamp,
+ data_callback_timestamp_batch data_cb_timestamp_batch,
void* user);
/**
@@ -227,6 +237,20 @@
void releaseRecordingFrame(const sp<IMemory>& mem);
/**
+ * Release a batch of recording frames previously returned by
+ * CAMERA_MSG_VIDEO_FRAME. This method only supports frames that are
+ * stored as VideoNativeHandleMetadata.
+ *
+ * It is camera hal client's responsibility to release video recording
+ * frames sent out by the camera hal before the camera hal receives
+ * a call to disableMsgType(CAMERA_MSG_VIDEO_FRAME). After it receives
+ * the call to disableMsgType(CAMERA_MSG_VIDEO_FRAME), it is camera hal's
+ * responsibility of managing the life-cycle of the video recording
+ * frames.
+ */
+ void releaseRecordingFrameBatch(const std::vector<sp<IMemory>>& frames);
+
+ /**
* Start auto focus, the notification callback routine is called
* with CAMERA_MSG_FOCUS once when focusing is complete. autoFocus()
* will be called again if another auto focus is needed.
@@ -416,6 +440,10 @@
hardware::camera::device::V1_0::DataCallbackMsg msgType,
const hardware::hidl_handle& frameData, uint32_t data,
uint32_t bufferIndex, int64_t timestamp) override;
+ hardware::Return<void> handleCallbackTimestampBatch(
+ hardware::camera::device::V1_0::DataCallbackMsg msgType,
+ const hardware::hidl_vec<
+ hardware::camera::device::V1_0::HandleTimestampMessage>&) override;
/**
* Implementation of android::hardware::camera::device::V1_0::ICameraDevicePreviewCallback
@@ -433,7 +461,7 @@
hardware::Return<hardware::camera::common::V1_0::Status>
setCrop(int32_t left, int32_t top, int32_t right, int32_t bottom) override;
hardware::Return<hardware::camera::common::V1_0::Status>
- setUsage(hardware::graphics::allocator::V2_0::ProducerUsage usage) override;
+ setUsage(hardware::graphics::common::V1_0::BufferUsage usage) override;
hardware::Return<hardware::camera::common::V1_0::Status>
setSwapInterval(int32_t interval) override;
hardware::Return<void> getMinUndequeuedBufferCount(
@@ -450,9 +478,10 @@
struct camera_preview_window mHalPreviewWindow;
- notify_callback mNotifyCb;
- data_callback mDataCb;
- data_callback_timestamp mDataCbTimestamp;
+ notify_callback mNotifyCb;
+ data_callback mDataCb;
+ data_callback_timestamp mDataCbTimestamp;
+ data_callback_timestamp_batch mDataCbTimestampBatch;
void *mCbUser;
// Cached values for preview stream parameters
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 1de2edc..7d8d61e 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -74,7 +74,8 @@
mNextReprocessResultFrameNumber(0),
mNextShutterFrameNumber(0),
mNextReprocessShutterFrameNumber(0),
- mListener(NULL)
+ mListener(NULL),
+ mVendorTagId(CAMERA_METADATA_INVALID_VENDOR_ID)
{
ATRACE_CALL();
camera3_callback_ops::notify = &sNotify;
@@ -202,6 +203,8 @@
// for now use 3_4 to keep legacy devices working
mDeviceVersion = CAMERA_DEVICE_API_VERSION_3_4;
mInterface = std::make_unique<HalInterface>(session);
+ std::string providerType;
+ mVendorTagId = manager->getProviderTagIdLocked(mId.string());
return initializeCommonLocked();
}
@@ -225,6 +228,8 @@
/** Create buffer manager */
mBufferManager = new Camera3BufferManager();
+ mTagMonitor.initialize(mVendorTagId);
+
bool aeLockAvailable = false;
camera_metadata_entry aeLockAvailableEntry = mDeviceInfo.find(
ANDROID_CONTROL_AE_LOCK_AVAILABLE);
@@ -495,7 +500,7 @@
return dataSpace;
}
-ConsumerUsageFlags Camera3Device::mapToConsumerUsage(
+BufferUsageFlags Camera3Device::mapToConsumerUsage(
uint32_t usage) {
return usage;
}
@@ -550,12 +555,12 @@
}
uint32_t Camera3Device::mapConsumerToFrameworkUsage(
- ConsumerUsageFlags usage) {
+ BufferUsageFlags usage) {
return usage;
}
uint32_t Camera3Device::mapProducerToFrameworkUsage(
- ProducerUsageFlags usage) {
+ BufferUsageFlags usage) {
return usage;
}
@@ -1399,15 +1404,6 @@
return OK;
}
-status_t Camera3Device::createReprocessStreamFromStream(int outputId, int *id) {
- ATRACE_CALL();
- (void)outputId; (void)id;
-
- CLOGE("Unimplemented");
- return INVALID_OPERATION;
-}
-
-
status_t Camera3Device::getStreamInfo(int id,
uint32_t *width, uint32_t *height,
uint32_t *format, android_dataspace *dataSpace) {
@@ -1523,14 +1519,6 @@
return res;
}
-status_t Camera3Device::deleteReprocessStream(int id) {
- ATRACE_CALL();
- (void)id;
-
- CLOGE("Unimplemented");
- return INVALID_OPERATION;
-}
-
status_t Camera3Device::configureStreams(int operatingMode) {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
@@ -1604,6 +1592,7 @@
return res;
}
+ set_camera_metadata_vendor_id(rawRequest, mVendorTagId);
mRequestTemplateCache[templateId].acquire(rawRequest);
// Derive some new keys for backward compatibility
@@ -1856,15 +1845,6 @@
sizeof(trigger)/sizeof(trigger[0]));
}
-status_t Camera3Device::pushReprocessBuffer(int reprocessStreamId,
- buffer_handle_t *buffer, wp<BufferReleasedListener> listener) {
- ATRACE_CALL();
- (void)reprocessStreamId; (void)buffer; (void)listener;
-
- CLOGE("Unimplemented");
- return INVALID_OPERATION;
-}
-
status_t Camera3Device::flush(int64_t *frameNumber) {
ATRACE_CALL();
ALOGV("%s: Camera %s: Flushing all requests", __FUNCTION__, mId.string());
@@ -2563,6 +2543,11 @@
const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
if (result == nullptr) return;
+ camera_metadata_t *meta = const_cast<camera_metadata_t *>(
+ result->mMetadata.getAndLock());
+ set_camera_metadata_vendor_id(meta, mVendorTagId);
+ result->mMetadata.unlock(meta);
+
if (result->mMetadata.update(ANDROID_REQUEST_FRAME_COUNT,
(int32_t*)&frameNumber, 1) != OK) {
SET_ERR("Failed to set frame number %d in metadata", frameNumber);
@@ -2934,6 +2919,13 @@
InFlightRequest &r = mInFlightMap.editValueAt(idx);
r.requestStatus = msg.error_code;
resultExtras = r.resultExtras;
+ if (hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT ==
+ errorCode) {
+ // In case of missing result check whether the buffers
+ // returned. If they returned, then remove inflight
+ // request.
+ removeInFlightRequestIfReadyLocked(idx);
+ }
} else {
resultExtras.frameNumber = msg.frame_number;
ALOGE("Camera %s: %s: cannot find in-flight request on "
@@ -3154,7 +3146,9 @@
Stream &dst = requestedConfiguration.streams[i];
camera3_stream_t *src = config->streams[i];
- int streamId = Camera3Stream::cast(src)->getId();
+ Camera3Stream* cam3stream = Camera3Stream::cast(src);
+ cam3stream->setBufferFreedListener(this);
+ int streamId = cam3stream->getId();
StreamType streamType;
switch (src->stream_type) {
case CAMERA3_STREAM_OUTPUT:
@@ -3359,9 +3353,21 @@
wrapAsHidlRequest(requests[i], /*out*/&captureRequests[i], /*out*/&handlesCreated);
}
+ std::vector<device::V3_2::BufferCache> cachesToRemove;
+ {
+ std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+ for (auto& pair : mFreedBuffers) {
+ // The stream might have been removed since onBufferFreed
+ if (mBufferIdMaps.find(pair.first) != mBufferIdMaps.end()) {
+ cachesToRemove.push_back({pair.first, pair.second});
+ }
+ }
+ mFreedBuffers.clear();
+ }
+
common::V1_0::Status status = common::V1_0::Status::INTERNAL_ERROR;
*numRequestProcessed = 0;
- mHidlSession->processCaptureRequest(captureRequests,
+ mHidlSession->processCaptureRequest(captureRequests, cachesToRemove,
[&status, &numRequestProcessed] (auto s, uint32_t n) {
status = s;
*numRequestProcessed = n;
@@ -3469,12 +3475,40 @@
auto it = bIdMap.find(buf);
if (it == bIdMap.end()) {
bIdMap[buf] = mNextBufferId++;
+ ALOGV("stream %d now have %zu buffer caches, buf %p",
+ streamId, bIdMap.size(), buf);
return std::make_pair(true, mNextBufferId - 1);
} else {
return std::make_pair(false, it->second);
}
}
+void Camera3Device::HalInterface::onBufferFreed(
+ int streamId, const native_handle_t* handle) {
+ std::lock_guard<std::mutex> lock(mBufferIdMapLock);
+ uint64_t bufferId = BUFFER_ID_NO_BUFFER;
+ auto mapIt = mBufferIdMaps.find(streamId);
+ if (mapIt == mBufferIdMaps.end()) {
+ // streamId might be from a deleted stream here
+ ALOGI("%s: stream %d has been removed",
+ __FUNCTION__, streamId);
+ return;
+ }
+ BufferIdMap& bIdMap = mapIt->second;
+ auto it = bIdMap.find(handle);
+ if (it == bIdMap.end()) {
+ ALOGW("%s: cannot find buffer %p in stream %d",
+ __FUNCTION__, handle, streamId);
+ return;
+ } else {
+ bufferId = it->second;
+ bIdMap.erase(it);
+ ALOGV("%s: stream %d now have %zu buffer caches after removing buf %p",
+ __FUNCTION__, streamId, bIdMap.size(), handle);
+ }
+ mFreedBuffers.push_back(std::make_pair(streamId, bufferId));
+}
+
/**
* RequestThread inner class methods
*/
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index d873b27..9c0210b 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -39,6 +39,7 @@
#include "device3/StatusTracker.h"
#include "device3/Camera3BufferManager.h"
#include "utils/TagMonitor.h"
+#include <camera_metadata_hidden.h>
/**
* Function pointer types with C calling convention to
@@ -125,7 +126,6 @@
status_t createInputStream(
uint32_t width, uint32_t height, int format,
int *id) override;
- status_t createReprocessStreamFromStream(int outputId, int *id) override;
status_t getStreamInfo(int id,
uint32_t *width, uint32_t *height,
@@ -133,7 +133,6 @@
status_t setStreamTransform(int id, int transform) override;
status_t deleteStream(int id) override;
- status_t deleteReprocessStream(int id) override;
status_t configureStreams(int operatingMode =
static_cast<int>(hardware::camera::device::V3_2::StreamConfigurationMode::NORMAL_MODE))
@@ -155,9 +154,6 @@
status_t triggerCancelAutofocus(uint32_t id) override;
status_t triggerPrecaptureMetering(uint32_t id) override;
- status_t pushReprocessBuffer(int reprocessStreamId,
- buffer_handle_t *buffer, wp<BufferReleasedListener> listener) override;
-
status_t flush(int64_t *lastFrameNumber = NULL) override;
status_t prepare(int streamId) override;
@@ -228,7 +224,7 @@
* Adapter for legacy HAL / HIDL HAL interface calls; calls either into legacy HALv3 or the
* HIDL HALv3 interfaces.
*/
- class HalInterface {
+ class HalInterface : public camera3::Camera3StreamBufferFreedListener {
public:
HalInterface(camera3_device_t *device);
HalInterface(sp<hardware::camera::device::V3_2::ICameraDeviceSession> &session);
@@ -326,6 +322,10 @@
// buffer_handle_t's FD won't change.
// return pair of (newlySeenBuffer?, bufferId)
std::pair<bool, uint64_t> getBufferId(const buffer_handle_t& buf, int streamId);
+
+ virtual void onBufferFreed(int streamId, const native_handle_t* handle) override;
+
+ std::vector<std::pair<int, uint64_t>> mFreedBuffers;
};
std::unique_ptr<HalInterface> mInterface;
@@ -598,7 +598,7 @@
static hardware::graphics::common::V1_0::PixelFormat mapToPixelFormat(int frameworkFormat);
static hardware::camera::device::V3_2::DataspaceFlags mapToHidlDataspace(
android_dataspace dataSpace);
- static hardware::camera::device::V3_2::ConsumerUsageFlags mapToConsumerUsage(uint32_t usage);
+ static hardware::camera::device::V3_2::BufferUsageFlags mapToConsumerUsage(uint32_t usage);
static hardware::camera::device::V3_2::StreamRotation mapToStreamRotation(
camera3_stream_rotation_t rotation);
// Returns a negative error code if the passed-in operation mode is not valid.
@@ -607,9 +607,9 @@
static camera3_buffer_status_t mapHidlBufferStatus(hardware::camera::device::V3_2::BufferStatus status);
static int mapToFrameworkFormat(hardware::graphics::common::V1_0::PixelFormat pixelFormat);
static uint32_t mapConsumerToFrameworkUsage(
- hardware::camera::device::V3_2::ConsumerUsageFlags usage);
+ hardware::camera::device::V3_2::BufferUsageFlags usage);
static uint32_t mapProducerToFrameworkUsage(
- hardware::camera::device::V3_2::ProducerUsageFlags usage);
+ hardware::camera::device::V3_2::BufferUsageFlags usage);
struct RequestTrigger {
// Metadata tag number, e.g. android.control.aePrecaptureTrigger
@@ -1065,6 +1065,8 @@
void monitorMetadata(TagMonitor::eventSource source, int64_t frameNumber,
nsecs_t timestamp, const CameraMetadata& metadata);
+ metadata_vendor_id_t mVendorTagId;
+
/**
* Static callback forwarding methods from HAL to instance
*/
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 1469b74..4eb15ad 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -263,6 +263,8 @@
mConsumer->setName(String8::format("Camera3-InputStream-%d", mId));
mProducer = producer;
+
+ mConsumer->setBufferFreedListener(this);
}
res = mConsumer->setDefaultBufferSize(camera3_stream::width,
@@ -288,6 +290,17 @@
return OK;
}
+void Camera3InputStream::onBufferFreed(const wp<GraphicBuffer>& gb) {
+ const sp<GraphicBuffer> buffer = gb.promote();
+ if (buffer != nullptr) {
+ if (mBufferFreedListener != nullptr) {
+ mBufferFreedListener->onBufferFreed(mId, buffer->handle);
+ }
+ } else {
+ ALOGE("%s: GraphicBuffer is freed before onBufferFreed callback finishes!", __FUNCTION__);
+ }
+}
+
}; // namespace camera3
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index 9f3de10..8f5b431 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -34,7 +34,8 @@
* buffers by feeding them into the HAL, as well as releasing the buffers back
* the buffers once the HAL is done with them.
*/
-class Camera3InputStream : public Camera3IOStreamBase {
+class Camera3InputStream : public Camera3IOStreamBase,
+ public BufferItemConsumer::BufferFreedListener {
public:
/**
* Set up a stream for formats that have fixed size, such as RAW and YUV.
@@ -77,6 +78,11 @@
virtual status_t getEndpointUsage(uint32_t *usage) const;
+ /**
+ * BufferItemConsumer::BufferFreedListener interface
+ */
+ virtual void onBufferFreed(const wp<GraphicBuffer>&) override;
+
}; // class Camera3InputStream
}; // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 51dc20a..e46d55e 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -347,7 +347,9 @@
// Configure consumer-side ANativeWindow interface. The listener may be used
// to notify buffer manager (if it is used) of the returned buffers.
- res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA, /*listener*/mBufferReleasedListener);
+ res = mConsumer->connect(NATIVE_WINDOW_API_CAMERA,
+ /*listener*/mBufferReleasedListener,
+ /*reportBufferRemoval*/true);
if (res != OK) {
ALOGE("%s: Unable to connect to native window for stream %d",
__FUNCTION__, mId);
@@ -543,6 +545,14 @@
}
}
+ if (res == OK) {
+ std::vector<sp<GraphicBuffer>> removedBuffers;
+ res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
+ if (res == OK) {
+ onBuffersRemovedLocked(removedBuffers);
+ }
+ }
+
return res;
}
@@ -686,6 +696,16 @@
}
}
+void Camera3OutputStream::onBuffersRemovedLocked(
+ const std::vector<sp<GraphicBuffer>>& removedBuffers) {
+ Camera3StreamBufferFreedListener* callback = mBufferFreedListener;
+ if (callback != nullptr) {
+ for (auto gb : removedBuffers) {
+ callback->onBufferFreed(mId, gb->handle);
+ }
+ }
+}
+
status_t Camera3OutputStream::detachBuffer(sp<GraphicBuffer>* buffer, int* fenceFd) {
Mutex::Autolock l(mLock);
@@ -718,7 +738,12 @@
}
}
- return OK;
+ std::vector<sp<GraphicBuffer>> removedBuffers;
+ res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
+ if (res == OK) {
+ onBuffersRemovedLocked(removedBuffers);
+ }
+ return res;
}
status_t Camera3OutputStream::notifyBufferReleased(ANativeWindowBuffer* /*anwBuffer*/) {
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 24e4e05..86676e4 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -263,6 +263,8 @@
virtual status_t getEndpointUsage(uint32_t *usage) const;
+ void onBuffersRemovedLocked(const std::vector<sp<GraphicBuffer>>&);
+
}; // class Camera3OutputStream
} // namespace camera3
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 53a3168..2b1a899 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -812,6 +812,18 @@
}
}
+void Camera3Stream::setBufferFreedListener(
+ Camera3StreamBufferFreedListener* listener) {
+ Mutex::Autolock l(mLock);
+ // Only allow set listener during stream configuration because stream is guaranteed to be IDLE
+ // at this state, so setBufferFreedListener won't collide with onBufferFreed callbacks
+ if (mState != STATE_IN_CONFIG && mState != STATE_IN_RECONFIG) {
+ ALOGE("%s: listener must be set during stream configuration!",__FUNCTION__);
+ return;
+ }
+ mBufferFreedListener = listener;
+}
+
}; // namespace camera3
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 56cb827..27ef86d 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -365,6 +365,11 @@
void removeBufferListener(
const sp<Camera3StreamBufferListener>& listener);
+
+ // Setting listener will remove previous listener (if exists)
+ virtual void setBufferFreedListener(
+ Camera3StreamBufferFreedListener* listener) override;
+
/**
* Return if the buffer queue of the stream is abandoned.
*/
@@ -408,6 +413,8 @@
android_dataspace dataSpace, camera3_stream_rotation_t rotation,
int setId);
+ Camera3StreamBufferFreedListener* mBufferFreedListener;
+
/**
* Interface to be implemented by derived classes
*/
diff --git a/services/camera/libcameraservice/device3/Camera3StreamBufferFreedListener.h b/services/camera/libcameraservice/device3/Camera3StreamBufferFreedListener.h
new file mode 100644
index 0000000..478a752
--- /dev/null
+++ b/services/camera/libcameraservice/device3/Camera3StreamBufferFreedListener.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_SERVERS_CAMERA3_STREAMBUFFERFREEDLISTENER_H
+#define ANDROID_SERVERS_CAMERA3_STREAMBUFFERFREEDLISTENER_H
+
+#include <gui/Surface.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+namespace camera3 {
+
+class Camera3StreamBufferFreedListener {
+public:
+ // onBufferFreed is called when a buffer is no longer being managed
+ // by this stream. This will not be called in events when all
+ // buffers are freed due to stream disconnection.
+ //
+ // The input handle may be deleted after this callback ends, so attempting
+ // to dereference handle post this callback is illegal and might lead to
+ // crash.
+ //
+ // This callback will be called while holding Camera3Stream's lock, so
+ // calling into other Camera3Stream APIs within this callback will
+ // lead to deadlock.
+ virtual void onBufferFreed(int streamId, const native_handle_t* handle) = 0;
+
+ virtual ~Camera3StreamBufferFreedListener() {}
+};
+
+}; //namespace camera3
+}; //namespace android
+
+#endif
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index f7b092f..37b7c36 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -19,6 +19,7 @@
#include <utils/RefBase.h>
#include "Camera3StreamBufferListener.h"
+#include "Camera3StreamBufferFreedListener.h"
struct camera3_stream_buffer;
@@ -287,6 +288,15 @@
wp<Camera3StreamBufferListener> listener) = 0;
virtual void removeBufferListener(
const sp<Camera3StreamBufferListener>& listener) = 0;
+
+ /**
+ * Setting listner will remove previous listener (if exists)
+ * Only allow set listener during stream configuration because stream is guaranteed to be IDLE
+ * at this state, so setBufferFreedListener won't collide with onBufferFreed callbacks.
+ * Client is responsible to keep the listener object alive throughout the lifecycle of this
+ * Camera3Stream.
+ */
+ virtual void setBufferFreedListener(Camera3StreamBufferFreedListener* listener) = 0;
};
} // namespace camera3
diff --git a/services/camera/libcameraservice/tests/Android.mk b/services/camera/libcameraservice/tests/Android.mk
index 179643b..37a05c2 100644
--- a/services/camera/libcameraservice/tests/Android.mk
+++ b/services/camera/libcameraservice/tests/Android.mk
@@ -22,6 +22,9 @@
libcameraservice \
libhidlbase \
liblog \
+ libhidltransport \
+ libcamera_client \
+ libcamera_metadata \
libutils \
android.hardware.camera.common@1.0 \
android.hardware.camera.provider@2.4 \
@@ -29,6 +32,7 @@
android.hardware.camera.device@3.2
LOCAL_C_INCLUDES += \
+ system/media/private/camera/include \
LOCAL_CFLAGS += -Wall -Wextra -Werror
diff --git a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
index eb934ba..c1d6e85 100644
--- a/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
+++ b/services/camera/libcameraservice/tests/CameraProviderManagerTest.cpp
@@ -20,38 +20,104 @@
#include "../common/CameraProviderManager.h"
#include <android/hidl/manager/1.0/IServiceManager.h>
#include <android/hidl/manager/1.0/IServiceNotification.h>
-
+#include <android/hardware/camera/device/3.2/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.2/ICameraDeviceSession.h>
+#include <camera_metadata_hidden.h>
#include <gtest/gtest.h>
using namespace android;
using namespace android::hardware::camera;
using android::hardware::camera::common::V1_0::Status;
+using android::hardware::camera::common::V1_0::VendorTag;
+using android::hardware::camera::common::V1_0::VendorTagSection;
+using android::hardware::camera::common::V1_0::CameraMetadataType;
+using android::hardware::camera::device::V3_2::ICameraDeviceCallback;
+using android::hardware::camera::device::V3_2::ICameraDeviceSession;
+
+/**
+ * Basic test implementation of a camera ver. 3.2 device interface
+ */
+struct TestDeviceInterface : public device::V3_2::ICameraDevice {
+ std::vector<hardware::hidl_string> mDeviceNames;
+ TestDeviceInterface(std::vector<hardware::hidl_string> deviceNames) :
+ mDeviceNames(deviceNames) {}
+ using getResourceCost_cb = std::function<void(
+ hardware::camera::common::V1_0::Status status,
+ const hardware::camera::common::V1_0::CameraResourceCost& resourceCost)>;
+ virtual ::android::hardware::Return<void> getResourceCost(
+ getResourceCost_cb _hidl_cb) override {
+ hardware::camera::common::V1_0::CameraResourceCost resourceCost = {100,
+ mDeviceNames};
+ _hidl_cb(Status::OK, resourceCost);
+ return hardware::Void();
+ }
+
+ using getCameraCharacteristics_cb = std::function<void(
+ hardware::camera::common::V1_0::Status status,
+ const hardware::hidl_vec<uint8_t>& cameraCharacteristics)>;
+ hardware::Return<void> getCameraCharacteristics(
+ getCameraCharacteristics_cb _hidl_cb) override {
+ hardware::hidl_vec<uint8_t> cameraCharacteristics;
+ _hidl_cb(Status::OK, cameraCharacteristics);
+ return hardware::Void();
+ }
+
+ hardware::Return<hardware::camera::common::V1_0::Status> setTorchMode(
+ ::android::hardware::camera::common::V1_0::TorchMode) override {
+ return Status::OK;
+ }
+
+ using open_cb = std::function<void(
+ ::android::hardware::camera::common::V1_0::Status status,
+ const ::android::sp<ICameraDeviceSession>& session)>;
+ hardware::Return<void> open(
+ const ::android::sp<ICameraDeviceCallback>&,
+ open_cb _hidl_cb) override {
+ sp<ICameraDeviceSession> deviceSession = nullptr;
+ _hidl_cb(Status::OK, deviceSession);
+ return hardware::Void();
+ }
+
+ hardware::Return<void> dumpState(
+ const ::android::hardware::hidl_handle&) override {
+ return hardware::Void();
+ }
+};
/**
* Basic test implementation of a camera provider
*/
struct TestICameraProvider : virtual public provider::V2_4::ICameraProvider {
- sp<provider::V2_4::ICameraProviderCallbacks> mCallbacks;
-
+ sp<provider::V2_4::ICameraProviderCallback> mCallbacks;
std::vector<hardware::hidl_string> mDeviceNames;
+ sp<device::V3_2::ICameraDevice> mDeviceInterface;
+ hardware::hidl_vec<common::V1_0::VendorTagSection> mVendorTagSections;
- TestICameraProvider() {
- mDeviceNames.push_back("device@3.2/test/0");
- mDeviceNames.push_back("device@1.0/test/0");
- mDeviceNames.push_back("device@3.2/test/1");
- }
+ TestICameraProvider(const std::vector<hardware::hidl_string> &devices,
+ const hardware::hidl_vec<common::V1_0::VendorTagSection> &vendorSection) :
+ mDeviceNames(devices),
+ mDeviceInterface(new TestDeviceInterface(devices)),
+ mVendorTagSections (vendorSection) {}
- virtual hardware::Return<Status> setCallbacks(
- const sp<provider::V2_4::ICameraProviderCallbacks>& callbacks) override {
+ virtual hardware::Return<Status> setCallback(
+ const sp<provider::V2_4::ICameraProviderCallback>& callbacks) override {
mCallbacks = callbacks;
return hardware::Return<Status>(Status::OK);
}
using getVendorTags_cb = std::function<void(Status status,
const hardware::hidl_vec<common::V1_0::VendorTagSection>& sections)>;
- virtual hardware::Return<void> getVendorTags(getVendorTags_cb _hidl_cb) override {
- hardware::hidl_vec<common::V1_0::VendorTagSection> sections;
- _hidl_cb(Status::OK, sections);
+ hardware::Return<void> getVendorTags(getVendorTags_cb _hidl_cb) override {
+ _hidl_cb(Status::OK, mVendorTagSections);
+ return hardware::Void();
+ }
+
+ using isSetTorchModeSupported_cb = std::function<void(
+ ::android::hardware::camera::common::V1_0::Status status,
+ bool support)>;
+ virtual ::hardware::Return<void> isSetTorchModeSupported(
+ isSetTorchModeSupported_cb _hidl_cb) override {
+ _hidl_cb(Status::OK, false);
return hardware::Void();
}
@@ -68,17 +134,17 @@
const hardware::hidl_string& cameraDeviceName,
getCameraDeviceInterface_V1_x_cb _hidl_cb) override {
(void) cameraDeviceName;
- _hidl_cb(Status::OK, nullptr);
+ _hidl_cb(Status::OK, nullptr); //TODO: impl. of ver. 1.0 device interface
+ // otherwise enumeration will fail.
return hardware::Void();
}
using getCameraDeviceInterface_V3_x_cb = std::function<void(Status status,
const sp<device::V3_2::ICameraDevice>& device)>;
virtual hardware::Return<void> getCameraDeviceInterface_V3_x(
- const hardware::hidl_string& cameraDeviceName,
+ const hardware::hidl_string&,
getCameraDeviceInterface_V3_x_cb _hidl_cb) override {
- (void) cameraDeviceName;
- _hidl_cb(Status::OK, nullptr);
+ _hidl_cb(Status::OK, mDeviceInterface);
return hardware::Void();
}
@@ -90,12 +156,13 @@
*/
struct TestInteractionProxy : public CameraProviderManager::ServiceInteractionProxy {
sp<hidl::manager::V1_0::IServiceNotification> mManagerNotificationInterface;
- const sp<TestICameraProvider> mTestCameraProvider;
+ sp<TestICameraProvider> mTestCameraProvider;
- TestInteractionProxy() :
- mTestCameraProvider(new TestICameraProvider()) {
-
+ TestInteractionProxy() {}
+ void setProvider(sp<TestICameraProvider> provider) {
+ mTestCameraProvider = provider;
}
+
std::string mLastRequestedServiceName;
virtual ~TestInteractionProxy() {}
@@ -116,13 +183,30 @@
};
-TEST(CameraProviderManagerTest, InitializeTest) {
+struct TestStatusListener : public CameraProviderManager::StatusListener {
+ ~TestStatusListener() {}
+ void onDeviceStatusChanged(const String8 &,
+ hardware::camera::common::V1_0::CameraDeviceStatus) override {}
+ void onTorchStatusChanged(const String8 &,
+ hardware::camera::common::V1_0::TorchModeStatus) override {}
+};
+
+TEST(CameraProviderManagerTest, InitializeTest) {
+ std::vector<hardware::hidl_string> deviceNames;
+ deviceNames.push_back("device@3.2/test/0");
+ deviceNames.push_back("device@1.0/test/0");
+ deviceNames.push_back("device@3.2/test/1");
+ hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
status_t res;
sp<CameraProviderManager> providerManager = new CameraProviderManager();
- TestInteractionProxy serviceProxy{};
+ sp<TestStatusListener> statusListener = new TestStatusListener();
+ TestInteractionProxy serviceProxy;
+ sp<TestICameraProvider> provider = new TestICameraProvider(deviceNames,
+ vendorSection);
+ serviceProxy.setProvider(provider);
- res = providerManager->initialize(&serviceProxy);
+ res = providerManager->initialize(statusListener, &serviceProxy);
ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
hardware::hidl_string legacyInstanceName = "legacy/0";
@@ -139,3 +223,145 @@
ASSERT_EQ(serviceProxy.mLastRequestedServiceName, testProviderInstanceName) <<
"Incorrect instance requested from service manager";
}
+
+TEST(CameraProviderManagerTest, MultipleVendorTagTest) {
+ hardware::hidl_string sectionName = "VendorTestSection";
+ hardware::hidl_string tagName = "VendorTestTag";
+ uint32_t tagId = VENDOR_SECTION << 16;
+ hardware::hidl_vec<common::V1_0::VendorTagSection> vendorSection;
+ CameraMetadataType tagType = CameraMetadataType::BYTE;
+ vendorSection.resize(1);
+ vendorSection[0].sectionName = sectionName;
+ vendorSection[0].tags.resize(1);
+ vendorSection[0].tags[0].tagId = tagId;
+ vendorSection[0].tags[0].tagName = tagName;
+ vendorSection[0].tags[0].tagType = tagType;
+ std::vector<hardware::hidl_string> deviceNames = {"device@3.2/test/0"};
+
+ sp<CameraProviderManager> providerManager = new CameraProviderManager();
+ sp<TestStatusListener> statusListener = new TestStatusListener();
+ TestInteractionProxy serviceProxy;
+
+ sp<TestICameraProvider> provider = new TestICameraProvider(deviceNames,
+ vendorSection);
+ serviceProxy.setProvider(provider);
+
+ auto res = providerManager->initialize(statusListener, &serviceProxy);
+ ASSERT_EQ(res, OK) << "Unable to initialize provider manager";
+
+ hardware::hidl_string testProviderInstanceName = "test/0";
+ hardware::hidl_string testProviderFqInterfaceName =
+ "android.hardware.camera.provider@2.4::ICameraProvider";
+ serviceProxy.mManagerNotificationInterface->onRegistration(
+ testProviderFqInterfaceName, testProviderInstanceName, false);
+ ASSERT_EQ(serviceProxy.mLastRequestedServiceName, testProviderInstanceName) <<
+ "Incorrect instance requested from service manager";
+
+ hardware::hidl_string sectionNameSecond = "SecondVendorTestSection";
+ hardware::hidl_string secondTagName = "SecondVendorTestTag";
+ CameraMetadataType secondTagType = CameraMetadataType::DOUBLE;
+ vendorSection[0].sectionName = sectionNameSecond;
+ vendorSection[0].tags[0].tagId = tagId;
+ vendorSection[0].tags[0].tagName = secondTagName;
+ vendorSection[0].tags[0].tagType = secondTagType;
+ deviceNames = {"device@3.2/test2/1"};
+
+ sp<TestICameraProvider> secondProvider = new TestICameraProvider(
+ deviceNames, vendorSection);
+ serviceProxy.setProvider(secondProvider);
+ hardware::hidl_string testProviderSecondInstanceName = "test2/0";
+ serviceProxy.mManagerNotificationInterface->onRegistration(
+ testProviderFqInterfaceName, testProviderSecondInstanceName, false);
+ ASSERT_EQ(serviceProxy.mLastRequestedServiceName,
+ testProviderSecondInstanceName) <<
+ "Incorrect instance requested from service manager";
+
+ ASSERT_EQ(NO_ERROR , providerManager->setUpVendorTags());
+ sp<VendorTagDescriptorCache> vendorCache =
+ VendorTagDescriptorCache::getGlobalVendorTagCache();
+ ASSERT_NE(nullptr, vendorCache.get());
+
+ metadata_vendor_id_t vendorId = std::hash<std::string> {} (
+ testProviderInstanceName.c_str());
+ metadata_vendor_id_t vendorIdSecond = std::hash<std::string> {} (
+ testProviderSecondInstanceName.c_str());
+
+ hardware::hidl_string resultTag = vendorCache->getTagName(tagId, vendorId);
+ ASSERT_EQ(resultTag, tagName);
+
+ resultTag = vendorCache->getTagName(tagId, vendorIdSecond);
+ ASSERT_EQ(resultTag, secondTagName);
+
+ // Check whether we can create two separate CameraMetadata instances
+ // using different tag vendor vendors.
+ camera_metadata *metaBuffer = allocate_camera_metadata(10, 20);
+ ASSERT_NE(nullptr, metaBuffer);
+ set_camera_metadata_vendor_id(metaBuffer, vendorId);
+ CameraMetadata metadata(metaBuffer);
+
+ uint8_t byteVal = 10;
+ ASSERT_TRUE(metadata.isEmpty());
+ ASSERT_EQ(OK, metadata.update(tagId, &byteVal, 1));
+ ASSERT_FALSE(metadata.isEmpty());
+ ASSERT_TRUE(metadata.exists(tagId));
+
+ metaBuffer = allocate_camera_metadata(10, 20);
+ ASSERT_NE(nullptr, metaBuffer);
+ set_camera_metadata_vendor_id(metaBuffer, vendorIdSecond);
+ CameraMetadata secondMetadata(metaBuffer);
+
+ ASSERT_TRUE(secondMetadata.isEmpty());
+ double doubleVal = 1.0f;
+ ASSERT_EQ(OK, secondMetadata.update(tagId, &doubleVal, 1));
+ ASSERT_FALSE(secondMetadata.isEmpty());
+ ASSERT_TRUE(secondMetadata.exists(tagId));
+
+ // Check whether CameraMetadata copying works as expected
+ CameraMetadata metadataCopy(metadata);
+ ASSERT_FALSE(metadataCopy.isEmpty());
+ ASSERT_TRUE(metadataCopy.exists(tagId));
+ ASSERT_EQ(OK, metadataCopy.update(tagId, &byteVal, 1));
+ ASSERT_TRUE(metadataCopy.exists(tagId));
+
+ // Check whether values are as expected
+ camera_metadata_entry_t entry = metadata.find(tagId);
+ ASSERT_EQ(1u, entry.count);
+ ASSERT_EQ(byteVal, entry.data.u8[0]);
+ entry = secondMetadata.find(tagId);
+ ASSERT_EQ(1u, entry.count);
+ ASSERT_EQ(doubleVal, entry.data.d[0]);
+
+ // Swap and erase
+ secondMetadata.swap(metadataCopy);
+ ASSERT_TRUE(metadataCopy.exists(tagId));
+ ASSERT_TRUE(secondMetadata.exists(tagId));
+ ASSERT_EQ(OK, secondMetadata.erase(tagId));
+ ASSERT_TRUE(secondMetadata.isEmpty());
+ doubleVal = 0.0f;
+ ASSERT_EQ(OK, metadataCopy.update(tagId, &doubleVal, 1));
+ entry = metadataCopy.find(tagId);
+ ASSERT_EQ(1u, entry.count);
+ ASSERT_EQ(doubleVal, entry.data.d[0]);
+
+ // Append
+ uint8_t sceneMode = ANDROID_CONTROL_SCENE_MODE_ACTION;
+ secondMetadata.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+ // Append from two different vendor tag providers is not supported!
+ ASSERT_NE(OK, metadataCopy.append(secondMetadata));
+ ASSERT_EQ(OK, metadataCopy.erase(tagId));
+ metadataCopy.update(ANDROID_CONTROL_SCENE_MODE, &sceneMode, 1);
+ // However appending from same vendor tag provider should be fine
+ ASSERT_EQ(OK, metadata.append(secondMetadata));
+ // Append from a metadata without vendor tag provider should be supported
+ CameraMetadata regularMetadata(10, 20);
+ uint8_t controlMode = ANDROID_CONTROL_MODE_AUTO;
+ regularMetadata.update(ANDROID_CONTROL_MODE, &controlMode, 1);
+ ASSERT_EQ(OK, secondMetadata.append(regularMetadata));
+ ASSERT_EQ(2u, secondMetadata.entryCount());
+ ASSERT_EQ(2u, metadata.entryCount());
+
+ // Dump
+ metadata.dump(1, 2);
+ metadataCopy.dump(1, 2);
+ secondMetadata.dump(1, 2);
+}
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index f1b65bd..dec97d7 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -23,12 +23,14 @@
#include <inttypes.h>
#include <utils/Log.h>
#include <camera/VendorTagDescriptor.h>
+#include <camera_metadata_hidden.h>
namespace android {
TagMonitor::TagMonitor():
mMonitoringEnabled(false),
- mMonitoringEvents(kMaxMonitorEvents)
+ mMonitoringEvents(kMaxMonitorEvents),
+ mVendorTagId(CAMERA_METADATA_INVALID_VENDOR_ID)
{}
const char* TagMonitor::k3aTags =
@@ -55,6 +57,13 @@
sp<VendorTagDescriptor> vTags =
VendorTagDescriptor::getGlobalVendorTagDescriptor();
+ if ((nullptr == vTags.get()) || (0 >= vTags->getTagCount())) {
+ sp<VendorTagDescriptorCache> cache =
+ VendorTagDescriptorCache::getGlobalVendorTagCache();
+ if (cache.get()) {
+ cache->getVendorTagDescriptor(mVendorTagId, &vTags);
+ }
+ }
bool gotTag = false;
@@ -104,6 +113,15 @@
camera_metadata_ro_entry entry = metadata.find(tag);
CameraMetadata &lastValues = (source == REQUEST) ?
mLastMonitoredRequestValues : mLastMonitoredResultValues;
+ if (lastValues.isEmpty()) {
+ lastValues = CameraMetadata(mMonitoredTagList.size());
+ const camera_metadata_t *metaBuffer =
+ lastValues.getAndLock();
+ set_camera_metadata_vendor_id(
+ const_cast<camera_metadata_t *> (metaBuffer), mVendorTagId);
+ lastValues.unlock(metaBuffer);
+ }
+
camera_metadata_entry lastEntry = lastValues.find(tag);
if (entry.count > 0) {
@@ -129,16 +147,21 @@
}
if (isDifferent) {
- ALOGV("%s: Tag %s changed", __FUNCTION__, get_camera_metadata_tag_name(tag));
+ ALOGV("%s: Tag %s changed", __FUNCTION__,
+ get_local_camera_metadata_tag_name_vendor_id(
+ tag, mVendorTagId));
lastValues.update(entry);
mMonitoringEvents.emplace(source, frameNumber, timestamp, entry);
}
} else if (lastEntry.count > 0) {
// Value has been removed
- ALOGV("%s: Tag %s removed", __FUNCTION__, get_camera_metadata_tag_name(tag));
+ ALOGV("%s: Tag %s removed", __FUNCTION__,
+ get_local_camera_metadata_tag_name_vendor_id(
+ tag, mVendorTagId));
lastValues.erase(tag);
entry.tag = tag;
- entry.type = get_camera_metadata_tag_type(tag);
+ entry.type = get_local_camera_metadata_tag_type_vendor_id(tag,
+ mVendorTagId);
entry.count = 0;
mMonitoringEvents.emplace(source, frameNumber, timestamp, entry);
}
@@ -152,8 +175,10 @@
dprintf(fd, " Tag monitoring enabled for tags:\n");
for (uint32_t tag : mMonitoredTagList) {
dprintf(fd, " %s.%s\n",
- get_camera_metadata_section_name(tag),
- get_camera_metadata_tag_name(tag));
+ get_local_camera_metadata_section_name_vendor_id(tag,
+ mVendorTagId),
+ get_local_camera_metadata_tag_name_vendor_id(tag,
+ mVendorTagId));
}
} else {
dprintf(fd, " Tag monitoring disabled (enable with -m <name1,..,nameN>)\n");
@@ -166,8 +191,10 @@
event.frameNumber, event.timestamp,
indentation,
event.source == REQUEST ? "REQ:" : "RES:",
- get_camera_metadata_section_name(event.tag),
- get_camera_metadata_tag_name(event.tag));
+ get_local_camera_metadata_section_name_vendor_id(event.tag,
+ mVendorTagId),
+ get_local_camera_metadata_tag_name_vendor_id(event.tag,
+ mVendorTagId));
if (event.newData.size() == 0) {
dprintf(fd, " (Removed)\n");
} else {
diff --git a/services/camera/libcameraservice/utils/TagMonitor.h b/services/camera/libcameraservice/utils/TagMonitor.h
index d7aa419..7155314 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.h
+++ b/services/camera/libcameraservice/utils/TagMonitor.h
@@ -27,6 +27,7 @@
#include <media/RingBuffer.h>
#include <system/camera_metadata.h>
+#include <system/camera_vendor_tags.h>
#include <camera/CameraMetadata.h>
namespace android {
@@ -44,6 +45,8 @@
TagMonitor();
+ void initialize(metadata_vendor_id_t id) { mVendorTagId = id; }
+
// Parse tag name list (comma-separated) and if valid, enable monitoring
// If invalid, do nothing.
// Recognizes "3a" as a shortcut for enabling tracking 3A state, mode, and
@@ -100,6 +103,7 @@
// 3A fields to use with the "3a" option
static const char *k3aTags;
+ metadata_vendor_id_t mVendorTagId;
};
} // namespace android
diff --git a/services/mediaanalytics/Android.mk b/services/mediaanalytics/Android.mk
index ef49df4..f7197af 100644
--- a/services/mediaanalytics/Android.mk
+++ b/services/mediaanalytics/Android.mk
@@ -18,6 +18,7 @@
libgui \
libmedia \
libmediautils \
+ libmediametrics \
libstagefright_foundation \
libutils
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index 38717b5..3a4546b 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -54,9 +54,7 @@
::android::hardware::configureRpcThreadpool(64, false);
sp<ProcessState> proc(ProcessState::self());
- int32_t trebleOmx = property_get_int32("persist.media.treble_omx", -1);
- if ((trebleOmx == 1) || ((trebleOmx == -1) &&
- property_get_bool("persist.hal.binderization", 0))) {
+ if (property_get_bool("persist.media.treble_omx", true)) {
using namespace ::android::hardware::media::omx::V1_0;
sp<IOmx> omx = new implementation::Omx();
if (omx == nullptr) {
diff --git a/services/mediadrm/Android.mk b/services/mediadrm/Android.mk
index 87fddd4..1d5fa07 100644
--- a/services/mediadrm/Android.mk
+++ b/services/mediadrm/Android.mk
@@ -40,7 +40,13 @@
endif
LOCAL_MODULE:= mediadrmserver
+
+# TODO: Some legacy DRM plugins only support 32-bit. They need to be migrated to
+# 64-bit. (b/18948909) Once all of a device's legacy DRM plugins support 64-bit,
+# that device can turn on ENABLE_MEDIADRM_64 to build this service as 64-bit.
+ifneq ($(ENABLE_MEDIADRM_64), true)
LOCAL_32_BIT_ONLY := true
+endif
LOCAL_INIT_RC := mediadrmserver.rc