Merge "stagefright: fix ABitReader integer over/underflow" into nyc-dev
diff --git a/camera/Android.mk b/camera/Android.mk
index de23953..1a3382f 100644
--- a/camera/Android.mk
+++ b/camera/Android.mk
@@ -18,7 +18,24 @@
LOCAL_PATH := $(CAMERA_CLIENT_LOCAL_PATH)
-LOCAL_SRC_FILES:= \
+LOCAL_AIDL_INCLUDES := \
+ frameworks/av/camera/aidl \
+ frameworks/base/core/java \
+ frameworks/native/aidl/gui
+
+# AIDL files for camera interfaces
+# The headers for these interfaces will be available to any modules that
+# include libcamera_client, at the path "aidl/package/path/BnFoo.h"
+
+LOCAL_SRC_FILES := \
+ aidl/android/hardware/ICameraService.aidl \
+ aidl/android/hardware/ICameraServiceListener.aidl \
+ aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl \
+ aidl/android/hardware/camera2/ICameraDeviceUser.aidl
+
+# Source for camera interface parcelables, and manually-written interfaces
+
+LOCAL_SRC_FILES += \
Camera.cpp \
CameraMetadata.cpp \
CameraParameters.cpp \
@@ -26,15 +43,12 @@
CameraParameters2.cpp \
ICamera.cpp \
ICameraClient.cpp \
- ICameraService.cpp \
- ICameraServiceListener.cpp \
ICameraServiceProxy.cpp \
ICameraRecordingProxy.cpp \
ICameraRecordingProxyListener.cpp \
- camera2/ICameraDeviceUser.cpp \
- camera2/ICameraDeviceCallbacks.cpp \
camera2/CaptureRequest.cpp \
camera2/OutputConfiguration.cpp \
+ camera2/SubmitInfo.cpp \
CameraBase.cpp \
CameraUtils.cpp \
VendorTagDescriptor.cpp
@@ -53,6 +67,13 @@
system/media/camera/include \
system/media/private/camera/include \
frameworks/native/include/media/openmax \
+ frameworks/av/include/camera
+
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+ system/media/camera/include \
+ frameworks/av/include/camera
+
+LOCAL_CFLAGS += -Werror -Wall -Wextra
LOCAL_MODULE:= libcamera_client
diff --git a/camera/Camera.cpp b/camera/Camera.cpp
index 1289348..c52e581 100644
--- a/camera/Camera.cpp
+++ b/camera/Camera.cpp
@@ -24,10 +24,10 @@
#include <binder/IServiceManager.h>
#include <binder/IMemory.h>
-#include <camera/Camera.h>
-#include <camera/ICameraRecordingProxyListener.h>
-#include <camera/ICameraService.h>
-#include <camera/ICamera.h>
+#include <Camera.h>
+#include <ICameraRecordingProxyListener.h>
+#include <android/hardware/ICameraService.h>
+#include <android/hardware/ICamera.h>
#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
@@ -40,10 +40,10 @@
}
CameraTraits<Camera>::TCamConnectService CameraTraits<Camera>::fnConnectService =
- &ICameraService::connect;
+ &::android::hardware::ICameraService::connect;
// construct a camera client from an existing camera remote
-sp<Camera> Camera::create(const sp<ICamera>& camera)
+sp<Camera> Camera::create(const sp<::android::hardware::ICamera>& camera)
{
ALOGV("create");
if (camera == 0) {
@@ -84,21 +84,51 @@
{
ALOGV("%s: connect legacy camera device", __FUNCTION__);
sp<Camera> c = new Camera(cameraId);
- sp<ICameraClient> cl = c;
+ sp<::android::hardware::ICameraClient> cl = c;
status_t status = NO_ERROR;
- const sp<ICameraService>& cs = CameraBaseT::getCameraService();
+ const sp<::android::hardware::ICameraService>& cs = CameraBaseT::getCameraService();
- if (cs != 0) {
- status = cs.get()->connectLegacy(cl, cameraId, halVersion, clientPackageName,
- clientUid, /*out*/c->mCamera);
+ binder::Status ret;
+ if (cs != nullptr) {
+ ret = cs.get()->connectLegacy(cl, cameraId, halVersion, clientPackageName,
+ clientUid, /*out*/&(c->mCamera));
}
- if (status == OK && c->mCamera != 0) {
+ if (ret.isOk() && c->mCamera != nullptr) {
IInterface::asBinder(c->mCamera)->linkToDeath(c);
c->mStatus = NO_ERROR;
camera = c;
} else {
- ALOGW("An error occurred while connecting to camera %d: %d (%s)",
- cameraId, status, strerror(-status));
+ switch(ret.serviceSpecificErrorCode()) {
+ case hardware::ICameraService::ERROR_DISCONNECTED:
+ status = -ENODEV;
+ break;
+ case hardware::ICameraService::ERROR_CAMERA_IN_USE:
+ status = -EBUSY;
+ break;
+ case hardware::ICameraService::ERROR_INVALID_OPERATION:
+ status = -EINVAL;
+ break;
+ case hardware::ICameraService::ERROR_MAX_CAMERAS_IN_USE:
+ status = -EUSERS;
+ break;
+ case hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT:
+ status = BAD_VALUE;
+ break;
+ case hardware::ICameraService::ERROR_DEPRECATED_HAL:
+ status = -EOPNOTSUPP;
+ break;
+ case hardware::ICameraService::ERROR_DISABLED:
+ status = -EACCES;
+ break;
+ case hardware::ICameraService::ERROR_PERMISSION_DENIED:
+ status = PERMISSION_DENIED;
+ break;
+ default:
+ status = -EINVAL;
+ ALOGW("An error occurred while connecting to camera %d: %s", cameraId,
+ (cs != nullptr) ? "Service not available" : ret.toString8().string());
+ break;
+ }
c.clear();
}
return status;
@@ -107,21 +137,21 @@
status_t Camera::reconnect()
{
ALOGV("reconnect");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->connect(this);
}
status_t Camera::lock()
{
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->lock();
}
status_t Camera::unlock()
{
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->unlock();
}
@@ -130,7 +160,7 @@
status_t Camera::setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer)
{
ALOGV("setPreviewTarget(%p)", bufferProducer.get());
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
ALOGD_IF(bufferProducer == 0, "app passed NULL surface");
return c->setPreviewTarget(bufferProducer);
@@ -139,7 +169,7 @@
status_t Camera::setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer)
{
ALOGV("setVideoTarget(%p)", bufferProducer.get());
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
ALOGD_IF(bufferProducer == 0, "app passed NULL video surface");
return c->setVideoTarget(bufferProducer);
@@ -149,7 +179,7 @@
status_t Camera::startPreview()
{
ALOGV("startPreview");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->startPreview();
}
@@ -157,7 +187,7 @@
status_t Camera::setVideoBufferMode(int32_t videoBufferMode)
{
ALOGV("setVideoBufferMode: %d", videoBufferMode);
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->setVideoBufferMode(videoBufferMode);
}
@@ -166,7 +196,7 @@
status_t Camera::startRecording()
{
ALOGV("startRecording");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->startRecording();
}
@@ -175,7 +205,7 @@
void Camera::stopPreview()
{
ALOGV("stopPreview");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return;
c->stopPreview();
}
@@ -188,7 +218,7 @@
Mutex::Autolock _l(mLock);
mRecordingProxyListener.clear();
}
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return;
c->stopRecording();
}
@@ -197,7 +227,7 @@
void Camera::releaseRecordingFrame(const sp<IMemory>& mem)
{
ALOGV("releaseRecordingFrame");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return;
c->releaseRecordingFrame(mem);
}
@@ -206,7 +236,7 @@
bool Camera::previewEnabled()
{
ALOGV("previewEnabled");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return false;
return c->previewEnabled();
}
@@ -215,7 +245,7 @@
bool Camera::recordingEnabled()
{
ALOGV("recordingEnabled");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return false;
return c->recordingEnabled();
}
@@ -223,7 +253,7 @@
status_t Camera::autoFocus()
{
ALOGV("autoFocus");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->autoFocus();
}
@@ -231,7 +261,7 @@
status_t Camera::cancelAutoFocus()
{
ALOGV("cancelAutoFocus");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->cancelAutoFocus();
}
@@ -240,7 +270,7 @@
status_t Camera::takePicture(int msgType)
{
ALOGV("takePicture: 0x%x", msgType);
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->takePicture(msgType);
}
@@ -249,7 +279,7 @@
status_t Camera::setParameters(const String8& params)
{
ALOGV("setParameters");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->setParameters(params);
}
@@ -259,7 +289,7 @@
{
ALOGV("getParameters");
String8 params;
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c != 0) params = mCamera->getParameters();
return params;
}
@@ -268,7 +298,7 @@
status_t Camera::sendCommand(int32_t cmd, int32_t arg1, int32_t arg2)
{
ALOGV("sendCommand");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->sendCommand(cmd, arg1, arg2);
}
@@ -288,7 +318,7 @@
void Camera::setPreviewCallbackFlags(int flag)
{
ALOGV("setPreviewCallbackFlags");
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return;
mCamera->setPreviewCallbackFlag(flag);
}
@@ -296,7 +326,7 @@
status_t Camera::setPreviewCallbackTarget(
const sp<IGraphicBufferProducer>& callbackProducer)
{
- sp <ICamera> c = mCamera;
+ sp <::android::hardware::ICamera> c = mCamera;
if (c == 0) return NO_INIT;
return c->setPreviewCallbackTarget(callbackProducer);
}
diff --git a/camera/CameraBase.cpp b/camera/CameraBase.cpp
index 9ee7ae5..15d7715 100644
--- a/camera/CameraBase.cpp
+++ b/camera/CameraBase.cpp
@@ -21,12 +21,13 @@
#include <utils/threads.h>
#include <utils/Mutex.h>
+#include <android/hardware/ICameraService.h>
+
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <binder/IMemory.h>
#include <camera/CameraBase.h>
-#include <camera/ICameraService.h>
// needed to instantiate
#include <camera/Camera.h>
@@ -35,8 +36,28 @@
namespace android {
+namespace hardware {
+
+status_t CameraInfo::writeToParcel(Parcel* parcel) const {
+ status_t res;
+ res = parcel->writeInt32(facing);
+ if (res != OK) return res;
+ res = parcel->writeInt32(orientation);
+ return res;
+}
+
+status_t CameraInfo::readFromParcel(const Parcel* parcel) {
+ status_t res;
+ res = parcel->readInt32(&facing);
+ if (res != OK) return res;
+ res = parcel->readInt32(&orientation);
+ return res;
+}
+
+}
+
namespace {
- sp<ICameraService> gCameraService;
+ sp<::android::hardware::ICameraService> gCameraService;
const int kCameraServicePollDelay = 500000; // 0.5s
const char* kCameraServiceName = "media.camera";
@@ -65,7 +86,7 @@
// establish binder interface to camera service
template <typename TCam, typename TCamTraits>
-const sp<ICameraService>& CameraBase<TCam, TCamTraits>::getCameraService()
+const sp<::android::hardware::ICameraService>& CameraBase<TCam, TCamTraits>::getCameraService()
{
Mutex::Autolock _l(gLock);
if (gCameraService.get() == 0) {
@@ -83,7 +104,7 @@
gDeathNotifier = new DeathNotifier();
}
binder->linkToDeath(gDeathNotifier);
- gCameraService = interface_cast<ICameraService>(binder);
+ gCameraService = interface_cast<::android::hardware::ICameraService>(binder);
}
ALOGE_IF(gCameraService == 0, "no CameraService!?");
return gCameraService;
@@ -97,19 +118,20 @@
ALOGV("%s: connect", __FUNCTION__);
sp<TCam> c = new TCam(cameraId);
sp<TCamCallbacks> cl = c;
- status_t status = NO_ERROR;
- const sp<ICameraService>& cs = getCameraService();
+ const sp<::android::hardware::ICameraService>& cs = getCameraService();
- if (cs != 0) {
+ binder::Status ret;
+ if (cs != nullptr) {
TCamConnectService fnConnectService = TCamTraits::fnConnectService;
- status = (cs.get()->*fnConnectService)(cl, cameraId, clientPackageName, clientUid,
- clientPid, /*out*/ c->mCamera);
+ ret = (cs.get()->*fnConnectService)(cl, cameraId, clientPackageName, clientUid,
+ clientPid, /*out*/ &c->mCamera);
}
- if (status == OK && c->mCamera != 0) {
+ if (ret.isOk() && c->mCamera != nullptr) {
IInterface::asBinder(c->mCamera)->linkToDeath(c);
c->mStatus = NO_ERROR;
} else {
- ALOGW("An error occurred while connecting to camera: %d", cameraId);
+ ALOGW("An error occurred while connecting to camera %d: %s", cameraId,
+ (cs != nullptr) ? "Service not available" : ret.toString8().string());
c.clear();
}
return c;
@@ -182,38 +204,50 @@
template <typename TCam, typename TCamTraits>
int CameraBase<TCam, TCamTraits>::getNumberOfCameras() {
- const sp<ICameraService> cs = getCameraService();
+ const sp<::android::hardware::ICameraService> cs = getCameraService();
if (!cs.get()) {
// as required by the public Java APIs
return 0;
}
- return cs->getNumberOfCameras();
+ int32_t count;
+ binder::Status res = cs->getNumberOfCameras(
+ ::android::hardware::ICameraService::CAMERA_TYPE_BACKWARD_COMPATIBLE,
+ &count);
+ if (!res.isOk()) {
+ ALOGE("Error reading number of cameras: %s",
+ res.toString8().string());
+ count = 0;
+ }
+ return count;
}
// this can be in BaseCamera but it should be an instance method
template <typename TCam, typename TCamTraits>
status_t CameraBase<TCam, TCamTraits>::getCameraInfo(int cameraId,
- struct CameraInfo* cameraInfo) {
- const sp<ICameraService>& cs = getCameraService();
+ struct hardware::CameraInfo* cameraInfo) {
+ const sp<::android::hardware::ICameraService>& cs = getCameraService();
if (cs == 0) return UNKNOWN_ERROR;
- return cs->getCameraInfo(cameraId, cameraInfo);
+ binder::Status res = cs->getCameraInfo(cameraId, cameraInfo);
+ return res.isOk() ? OK : res.serviceSpecificErrorCode();
}
template <typename TCam, typename TCamTraits>
status_t CameraBase<TCam, TCamTraits>::addServiceListener(
- const sp<ICameraServiceListener>& listener) {
- const sp<ICameraService>& cs = getCameraService();
+ const sp<::android::hardware::ICameraServiceListener>& listener) {
+ const sp<::android::hardware::ICameraService>& cs = getCameraService();
if (cs == 0) return UNKNOWN_ERROR;
- return cs->addListener(listener);
+ binder::Status res = cs->addListener(listener);
+ return res.isOk() ? OK : res.serviceSpecificErrorCode();
}
template <typename TCam, typename TCamTraits>
status_t CameraBase<TCam, TCamTraits>::removeServiceListener(
- const sp<ICameraServiceListener>& listener) {
- const sp<ICameraService>& cs = getCameraService();
+ const sp<::android::hardware::ICameraServiceListener>& listener) {
+ const sp<::android::hardware::ICameraService>& cs = getCameraService();
if (cs == 0) return UNKNOWN_ERROR;
- return cs->removeListener(listener);
+ binder::Status res = cs->removeListener(listener);
+ return res.isOk() ? OK : res.serviceSpecificErrorCode();
}
template class CameraBase<Camera>;
diff --git a/camera/CameraMetadata.cpp b/camera/CameraMetadata.cpp
index 46bcc1d..c78fc5d 100644
--- a/camera/CameraMetadata.cpp
+++ b/camera/CameraMetadata.cpp
@@ -79,7 +79,7 @@
return mBuffer;
}
-status_t CameraMetadata::unlock(const camera_metadata_t *buffer) {
+status_t CameraMetadata::unlock(const camera_metadata_t *buffer) const {
if (!mLocked) {
ALOGE("%s: Can't unlock a non-locked CameraMetadata!", __FUNCTION__);
return INVALID_OPERATION;
@@ -621,7 +621,7 @@
return res;
}
-status_t CameraMetadata::readFromParcel(Parcel *parcel) {
+status_t CameraMetadata::readFromParcel(const Parcel *parcel) {
ALOGV("%s: parcel = %p", __FUNCTION__, parcel);
diff --git a/camera/CameraParameters2.cpp b/camera/CameraParameters2.cpp
index 378afeb..c29233c 100644
--- a/camera/CameraParameters2.cpp
+++ b/camera/CameraParameters2.cpp
@@ -351,7 +351,7 @@
void CameraParameters2::dump() const
{
- ALOGD("dump: mMap.size = %d", mMap.size());
+ ALOGD("dump: mMap.size = %zu", mMap.size());
for (size_t i = 0; i < mMap.size(); i++) {
String8 k, v;
k = mMap.keyAt(i);
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index 4e36160..0a447e7 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -26,7 +26,7 @@
return requestId >= 0;
}
-status_t CaptureResultExtras::readFromParcel(Parcel *parcel) {
+status_t CaptureResultExtras::readFromParcel(const Parcel *parcel) {
if (parcel == NULL) {
ALOGE("%s: Null parcel", __FUNCTION__);
return BAD_VALUE;
@@ -38,6 +38,7 @@
parcel->readInt32(&precaptureTriggerId);
parcel->readInt64(&frameNumber);
parcel->readInt32(&partialResultCount);
+ parcel->readInt32(&errorStreamId);
return OK;
}
@@ -54,6 +55,7 @@
parcel->writeInt32(precaptureTriggerId);
parcel->writeInt64(frameNumber);
parcel->writeInt32(partialResultCount);
+ parcel->writeInt32(errorStreamId);
return OK;
}
diff --git a/camera/ICamera.cpp b/camera/ICamera.cpp
index 1dd8912..37b0a10 100644
--- a/camera/ICamera.cpp
+++ b/camera/ICamera.cpp
@@ -22,12 +22,14 @@
#include <sys/types.h>
#include <binder/Parcel.h>
#include <camera/CameraUtils.h>
-#include <camera/ICamera.h>
+#include <android/hardware/ICamera.h>
+#include <android/hardware/ICameraClient.h>
#include <gui/IGraphicBufferProducer.h>
#include <gui/Surface.h>
#include <media/hardware/HardwareAPI.h>
namespace android {
+namespace hardware {
enum {
DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
@@ -63,13 +65,14 @@
}
// disconnect from camera service
- void disconnect()
+ binder::Status disconnect()
{
ALOGV("disconnect");
Parcel data, reply;
data.writeInterfaceToken(ICamera::getInterfaceDescriptor());
remote()->transact(DISCONNECT, data, &reply);
reply.readExceptionCode();
+ return binder::Status::ok();
}
// pass the buffered IGraphicBufferProducer to the camera service
@@ -467,4 +470,5 @@
// ----------------------------------------------------------------------------
-}; // namespace android
+} // namespace hardware
+} // namespace android
diff --git a/camera/ICameraClient.cpp b/camera/ICameraClient.cpp
index 4282f9a..d058138 100644
--- a/camera/ICameraClient.cpp
+++ b/camera/ICameraClient.cpp
@@ -21,10 +21,11 @@
#include <stdint.h>
#include <sys/types.h>
#include <camera/CameraUtils.h>
-#include <camera/ICameraClient.h>
+#include <android/hardware/ICameraClient.h>
#include <media/hardware/HardwareAPI.h>
namespace android {
+namespace hardware {
enum {
NOTIFY_CALLBACK = IBinder::FIRST_CALL_TRANSACTION,
@@ -150,5 +151,5 @@
// ----------------------------------------------------------------------------
-}; // namespace android
-
+} // namespace hardware
+} // namespace android
diff --git a/camera/ICameraService.cpp b/camera/ICameraService.cpp
deleted file mode 100644
index 4a042a6..0000000
--- a/camera/ICameraService.cpp
+++ /dev/null
@@ -1,536 +0,0 @@
-/*
-**
-** Copyright 2008, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#define LOG_TAG "BpCameraService"
-#include <utils/Log.h>
-#include <utils/Errors.h>
-#include <utils/String16.h>
-
-#include <inttypes.h>
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-
-#include <camera/ICameraService.h>
-#include <camera/ICameraServiceListener.h>
-#include <camera/ICamera.h>
-#include <camera/ICameraClient.h>
-#include <camera/camera2/ICameraDeviceUser.h>
-#include <camera/camera2/ICameraDeviceCallbacks.h>
-#include <camera/CameraMetadata.h>
-#include <camera/VendorTagDescriptor.h>
-
-namespace android {
-
-namespace {
-
-enum {
- EX_SECURITY = -1,
- EX_BAD_PARCELABLE = -2,
- EX_ILLEGAL_ARGUMENT = -3,
- EX_NULL_POINTER = -4,
- EX_ILLEGAL_STATE = -5,
- EX_HAS_REPLY_HEADER = -128, // special; see below
-};
-
-static bool readExceptionCode(Parcel& reply) {
- int32_t exceptionCode = reply.readExceptionCode();
-
- if (exceptionCode != 0) {
- const char* errorMsg;
- switch(exceptionCode) {
- case EX_SECURITY:
- errorMsg = "Security";
- break;
- case EX_BAD_PARCELABLE:
- errorMsg = "BadParcelable";
- break;
- case EX_NULL_POINTER:
- errorMsg = "NullPointer";
- break;
- case EX_ILLEGAL_STATE:
- errorMsg = "IllegalState";
- break;
- // Binder should be handling this code inside Parcel::readException
- // but lets have a to-string here anyway just in case.
- case EX_HAS_REPLY_HEADER:
- errorMsg = "HasReplyHeader";
- break;
- default:
- errorMsg = "Unknown";
- }
-
- ALOGE("Binder transmission error %s (%d)", errorMsg, exceptionCode);
- return true;
- }
-
- return false;
-}
-
-};
-
-class BpCameraService: public BpInterface<ICameraService>
-{
-public:
- BpCameraService(const sp<IBinder>& impl)
- : BpInterface<ICameraService>(impl)
- {
- }
-
- // get number of cameras available that support standard camera operations
- virtual int32_t getNumberOfCameras()
- {
- return getNumberOfCameras(CAMERA_TYPE_BACKWARD_COMPATIBLE);
- }
-
- // get number of cameras available of a given type
- virtual int32_t getNumberOfCameras(int type)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- data.writeInt32(type);
- remote()->transact(BnCameraService::GET_NUMBER_OF_CAMERAS, data, &reply);
-
- if (readExceptionCode(reply)) return 0;
- return reply.readInt32();
- }
-
- // get information about a camera
- virtual status_t getCameraInfo(int cameraId,
- struct CameraInfo* cameraInfo) {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- data.writeInt32(cameraId);
- remote()->transact(BnCameraService::GET_CAMERA_INFO, data, &reply);
-
- if (readExceptionCode(reply)) return -EPROTO;
- status_t result = reply.readInt32();
- if (reply.readInt32() != 0) {
- cameraInfo->facing = reply.readInt32();
- cameraInfo->orientation = reply.readInt32();
- }
- return result;
- }
-
- // get camera characteristics (static metadata)
- virtual status_t getCameraCharacteristics(int cameraId,
- CameraMetadata* cameraInfo) {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- data.writeInt32(cameraId);
- remote()->transact(BnCameraService::GET_CAMERA_CHARACTERISTICS, data, &reply);
-
- if (readExceptionCode(reply)) return -EPROTO;
- status_t result = reply.readInt32();
-
- CameraMetadata out;
- if (reply.readInt32() != 0) {
- out.readFromParcel(&reply);
- }
-
- if (cameraInfo != NULL) {
- cameraInfo->swap(out);
- }
-
- return result;
- }
-
- // Get enumeration and description of vendor tags for camera
- virtual status_t getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescriptor>& desc) {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- remote()->transact(BnCameraService::GET_CAMERA_VENDOR_TAG_DESCRIPTOR, data, &reply);
-
- if (readExceptionCode(reply)) return -EPROTO;
- status_t result = reply.readInt32();
-
- if (reply.readInt32() != 0) {
- sp<VendorTagDescriptor> d;
- if (VendorTagDescriptor::createFromParcel(&reply, /*out*/d) == OK) {
- desc = d;
- }
- }
- return result;
- }
-
- // connect to camera service (android.hardware.Camera)
- virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
- const String16 &clientPackageName, int clientUid, int clientPid,
- /*out*/
- sp<ICamera>& device)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(cameraClient));
- data.writeInt32(cameraId);
- data.writeString16(clientPackageName);
- data.writeInt32(clientUid);
- data.writeInt32(clientPid);
-
- status_t status;
- status = remote()->transact(BnCameraService::CONNECT, data, &reply);
- if (status != OK) return status;
-
- if (readExceptionCode(reply)) return -EPROTO;
- status = reply.readInt32();
- if (reply.readInt32() != 0) {
- device = interface_cast<ICamera>(reply.readStrongBinder());
- }
- return status;
- }
-
- // connect to camera service (android.hardware.Camera)
- virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId,
- int halVersion,
- const String16 &clientPackageName, int clientUid,
- /*out*/sp<ICamera>& device)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(cameraClient));
- data.writeInt32(cameraId);
- data.writeInt32(halVersion);
- data.writeString16(clientPackageName);
- data.writeInt32(clientUid);
-
- status_t status;
- status = remote()->transact(BnCameraService::CONNECT_LEGACY, data, &reply);
- if (status != OK) return status;
-
- if (readExceptionCode(reply)) return -EPROTO;
- status = reply.readInt32();
- if (reply.readInt32() != 0) {
- device = interface_cast<ICamera>(reply.readStrongBinder());
- }
- return status;
- }
-
- virtual status_t setTorchMode(const String16& cameraId, bool enabled,
- const sp<IBinder>& clientBinder)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- data.writeString16(cameraId);
- data.writeInt32(enabled ? 1 : 0);
- data.writeStrongBinder(clientBinder);
- remote()->transact(BnCameraService::SET_TORCH_MODE, data, &reply);
-
- if (readExceptionCode(reply)) return -EPROTO;
- return reply.readInt32();
- }
-
- // connect to camera service (android.hardware.camera2.CameraDevice)
- virtual status_t connectDevice(
- const sp<ICameraDeviceCallbacks>& cameraCb,
- int cameraId,
- const String16& clientPackageName,
- int clientUid,
- /*out*/
- sp<ICameraDeviceUser>& device)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(cameraCb));
- data.writeInt32(cameraId);
- data.writeString16(clientPackageName);
- data.writeInt32(clientUid);
-
- status_t status;
- status = remote()->transact(BnCameraService::CONNECT_DEVICE, data, &reply);
- if (status != OK) return status;
-
- if (readExceptionCode(reply)) return -EPROTO;
- status = reply.readInt32();
- if (reply.readInt32() != 0) {
- device = interface_cast<ICameraDeviceUser>(reply.readStrongBinder());
- }
- return status;
- }
-
- virtual status_t addListener(const sp<ICameraServiceListener>& listener)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(listener));
- remote()->transact(BnCameraService::ADD_LISTENER, data, &reply);
-
- if (readExceptionCode(reply)) return -EPROTO;
- return reply.readInt32();
- }
-
- virtual status_t removeListener(const sp<ICameraServiceListener>& listener)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- data.writeStrongBinder(IInterface::asBinder(listener));
- remote()->transact(BnCameraService::REMOVE_LISTENER, data, &reply);
-
- if (readExceptionCode(reply)) return -EPROTO;
- return reply.readInt32();
- }
-
- virtual status_t getLegacyParameters(int cameraId, String16* parameters) {
- if (parameters == NULL) {
- ALOGE("%s: parameters must not be null", __FUNCTION__);
- return BAD_VALUE;
- }
-
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
-
- data.writeInt32(cameraId);
- remote()->transact(BnCameraService::GET_LEGACY_PARAMETERS, data, &reply);
- if (readExceptionCode(reply)) return -EPROTO;
-
- status_t res = data.readInt32();
- int32_t length = data.readInt32(); // -1 means null
- if (length > 0) {
- *parameters = data.readString16();
- } else {
- *parameters = String16();
- }
-
- return res;
- }
-
- virtual status_t supportsCameraApi(int cameraId, int apiVersion) {
- Parcel data, reply;
-
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- data.writeInt32(cameraId);
- data.writeInt32(apiVersion);
- remote()->transact(BnCameraService::SUPPORTS_CAMERA_API, data, &reply);
- if (readExceptionCode(reply)) return -EPROTO;
-
- status_t res = data.readInt32();
- return res;
- }
-
- virtual void notifySystemEvent(int32_t eventId, const int32_t* args, size_t len) {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraService::getInterfaceDescriptor());
- data.writeInt32(eventId);
- data.writeInt32Array(len, args);
- remote()->transact(BnCameraService::NOTIFY_SYSTEM_EVENT, data, &reply,
- IBinder::FLAG_ONEWAY);
- }
-
-};
-
-IMPLEMENT_META_INTERFACE(CameraService, "android.hardware.ICameraService");
-
-// ----------------------------------------------------------------------
-
-status_t BnCameraService::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- switch(code) {
- case GET_NUMBER_OF_CAMERAS: {
- CHECK_INTERFACE(ICameraService, data, reply);
- reply->writeNoException();
- reply->writeInt32(getNumberOfCameras(data.readInt32()));
- return NO_ERROR;
- } break;
- case GET_CAMERA_INFO: {
- CHECK_INTERFACE(ICameraService, data, reply);
- CameraInfo cameraInfo = CameraInfo();
- memset(&cameraInfo, 0, sizeof(cameraInfo));
- status_t result = getCameraInfo(data.readInt32(), &cameraInfo);
- reply->writeNoException();
- reply->writeInt32(result);
-
- // Fake a parcelable object here
- reply->writeInt32(1); // means the parcelable is included
- reply->writeInt32(cameraInfo.facing);
- reply->writeInt32(cameraInfo.orientation);
- return NO_ERROR;
- } break;
- case GET_CAMERA_CHARACTERISTICS: {
- CHECK_INTERFACE(ICameraService, data, reply);
- CameraMetadata info;
- status_t result = getCameraCharacteristics(data.readInt32(), &info);
- reply->writeNoException();
- reply->writeInt32(result);
-
- // out-variables are after exception and return value
- reply->writeInt32(1); // means the parcelable is included
- info.writeToParcel(reply);
- return NO_ERROR;
- } break;
- case GET_CAMERA_VENDOR_TAG_DESCRIPTOR: {
- CHECK_INTERFACE(ICameraService, data, reply);
- sp<VendorTagDescriptor> d;
- status_t result = getCameraVendorTagDescriptor(d);
- reply->writeNoException();
- reply->writeInt32(result);
-
- // out-variables are after exception and return value
- if (d == NULL) {
- reply->writeInt32(0);
- } else {
- reply->writeInt32(1); // means the parcelable is included
- d->writeToParcel(reply);
- }
- return NO_ERROR;
- } break;
- case CONNECT: {
- CHECK_INTERFACE(ICameraService, data, reply);
- sp<ICameraClient> cameraClient =
- interface_cast<ICameraClient>(data.readStrongBinder());
- int32_t cameraId = data.readInt32();
- const String16 clientName = data.readString16();
- int32_t clientUid = data.readInt32();
- int32_t clientPid = data.readInt32();
- sp<ICamera> camera;
- status_t status = connect(cameraClient, cameraId,
- clientName, clientUid, clientPid, /*out*/camera);
- reply->writeNoException();
- reply->writeInt32(status);
- if (camera != NULL) {
- reply->writeInt32(1);
- reply->writeStrongBinder(IInterface::asBinder(camera));
- } else {
- reply->writeInt32(0);
- }
- return NO_ERROR;
- } break;
- case CONNECT_DEVICE: {
- CHECK_INTERFACE(ICameraService, data, reply);
- sp<ICameraDeviceCallbacks> cameraClient =
- interface_cast<ICameraDeviceCallbacks>(data.readStrongBinder());
- int32_t cameraId = data.readInt32();
- const String16 clientName = data.readString16();
- int32_t clientUid = data.readInt32();
- sp<ICameraDeviceUser> camera;
- status_t status = connectDevice(cameraClient, cameraId,
- clientName, clientUid, /*out*/camera);
- reply->writeNoException();
- reply->writeInt32(status);
- if (camera != NULL) {
- reply->writeInt32(1);
- reply->writeStrongBinder(IInterface::asBinder(camera));
- } else {
- reply->writeInt32(0);
- }
- return NO_ERROR;
- } break;
- case ADD_LISTENER: {
- CHECK_INTERFACE(ICameraService, data, reply);
- sp<ICameraServiceListener> listener =
- interface_cast<ICameraServiceListener>(data.readStrongBinder());
- reply->writeNoException();
- reply->writeInt32(addListener(listener));
- return NO_ERROR;
- } break;
- case REMOVE_LISTENER: {
- CHECK_INTERFACE(ICameraService, data, reply);
- sp<ICameraServiceListener> listener =
- interface_cast<ICameraServiceListener>(data.readStrongBinder());
- reply->writeNoException();
- reply->writeInt32(removeListener(listener));
- return NO_ERROR;
- } break;
- case GET_LEGACY_PARAMETERS: {
- CHECK_INTERFACE(ICameraService, data, reply);
- int cameraId = data.readInt32();
- String16 parameters;
-
- reply->writeNoException();
- // return value
- reply->writeInt32(getLegacyParameters(cameraId, ¶meters));
- // out parameters
- reply->writeInt32(1); // parameters is always available
- reply->writeString16(parameters);
- return NO_ERROR;
- } break;
- case SUPPORTS_CAMERA_API: {
- CHECK_INTERFACE(ICameraService, data, reply);
- int cameraId = data.readInt32();
- int apiVersion = data.readInt32();
-
- reply->writeNoException();
- // return value
- reply->writeInt32(supportsCameraApi(cameraId, apiVersion));
- return NO_ERROR;
- } break;
- case CONNECT_LEGACY: {
- CHECK_INTERFACE(ICameraService, data, reply);
- sp<ICameraClient> cameraClient =
- interface_cast<ICameraClient>(data.readStrongBinder());
- int32_t cameraId = data.readInt32();
- int32_t halVersion = data.readInt32();
- const String16 clientName = data.readString16();
- int32_t clientUid = data.readInt32();
- sp<ICamera> camera;
- status_t status = connectLegacy(cameraClient, cameraId, halVersion,
- clientName, clientUid, /*out*/camera);
- reply->writeNoException();
- reply->writeInt32(status);
- if (camera != NULL) {
- reply->writeInt32(1);
- reply->writeStrongBinder(IInterface::asBinder(camera));
- } else {
- reply->writeInt32(0);
- }
- return NO_ERROR;
- } break;
- case SET_TORCH_MODE: {
- CHECK_INTERFACE(ICameraService, data, reply);
- String16 cameraId = data.readString16();
- bool enabled = data.readInt32() != 0 ? true : false;
- const sp<IBinder> clientBinder = data.readStrongBinder();
- status_t status = setTorchMode(cameraId, enabled, clientBinder);
- reply->writeNoException();
- reply->writeInt32(status);
- return NO_ERROR;
- } break;
- case NOTIFY_SYSTEM_EVENT: {
- CHECK_INTERFACE(ICameraService, data, reply);
- int32_t eventId = data.readInt32();
- int32_t len = data.readInt32();
- if (len < 0) {
- ALOGE("%s: Received poorly formatted length in binder request: notifySystemEvent.",
- __FUNCTION__);
- return FAILED_TRANSACTION;
- }
- if (len > 512) {
- ALOGE("%s: Length %" PRIi32 " too long in binder request: notifySystemEvent.",
- __FUNCTION__, len);
- return FAILED_TRANSACTION;
- }
- int32_t events[len];
- memset(events, 0, sizeof(int32_t) * len);
- status_t status = data.read(events, sizeof(int32_t) * len);
- if (status != NO_ERROR) {
- ALOGE("%s: Received poorly formatted binder request: notifySystemEvent.",
- __FUNCTION__);
- return FAILED_TRANSACTION;
- }
- notifySystemEvent(eventId, events, len);
- return NO_ERROR;
- } break;
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/camera/ICameraServiceListener.cpp b/camera/ICameraServiceListener.cpp
deleted file mode 100644
index 0010325..0000000
--- a/camera/ICameraServiceListener.cpp
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
-**
-** Copyright 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <binder/IPCThreadState.h>
-#include <binder/IServiceManager.h>
-
-#include <camera/ICameraServiceListener.h>
-
-namespace android {
-
-namespace {
- enum {
- STATUS_CHANGED = IBinder::FIRST_CALL_TRANSACTION,
- TORCH_STATUS_CHANGED,
- };
-}; // namespace anonymous
-
-class BpCameraServiceListener: public BpInterface<ICameraServiceListener>
-{
-
-public:
- BpCameraServiceListener(const sp<IBinder>& impl)
- : BpInterface<ICameraServiceListener>(impl)
- {
- }
-
- virtual void onStatusChanged(Status status, int32_t cameraId)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraServiceListener::getInterfaceDescriptor());
-
- data.writeInt32(static_cast<int32_t>(status));
- data.writeInt32(cameraId);
-
- remote()->transact(STATUS_CHANGED,
- data,
- &reply,
- IBinder::FLAG_ONEWAY);
- }
-
- virtual void onTorchStatusChanged(TorchStatus status, const String16 &cameraId)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraServiceListener::getInterfaceDescriptor());
-
- data.writeInt32(static_cast<int32_t>(status));
- data.writeString16(cameraId);
-
- remote()->transact(TORCH_STATUS_CHANGED,
- data,
- &reply,
- IBinder::FLAG_ONEWAY);
- }
-};
-
-IMPLEMENT_META_INTERFACE(CameraServiceListener, "android.hardware.ICameraServiceListener");
-
-// ----------------------------------------------------------------------
-
-status_t BnCameraServiceListener::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
- uint32_t flags) {
- switch(code) {
- case STATUS_CHANGED: {
- CHECK_INTERFACE(ICameraServiceListener, data, reply);
-
- Status status = static_cast<Status>(data.readInt32());
- int32_t cameraId = data.readInt32();
-
- onStatusChanged(status, cameraId);
-
- return NO_ERROR;
- } break;
- case TORCH_STATUS_CHANGED: {
- CHECK_INTERFACE(ICameraServiceListener, data, reply);
-
- TorchStatus status = static_cast<TorchStatus>(data.readInt32());
- String16 cameraId = data.readString16();
-
- onTorchStatusChanged(status, cameraId);
-
- return NO_ERROR;
- } break;
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index dce313a..5538da9 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -46,7 +46,9 @@
static Mutex sLock;
static sp<VendorTagDescriptor> sGlobalVendorTagDescriptor;
-VendorTagDescriptor::VendorTagDescriptor() {}
+namespace hardware {
+namespace camera2 {
+namespace params {
VendorTagDescriptor::~VendorTagDescriptor() {
size_t len = mReverseMapping.size();
@@ -55,90 +57,46 @@
}
}
-status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps,
- /*out*/
- sp<VendorTagDescriptor>& descriptor) {
- if (vOps == NULL) {
- ALOGE("%s: vendor_tag_ops argument was NULL.", __FUNCTION__);
- return BAD_VALUE;
- }
-
- int tagCount = vOps->get_tag_count(vOps);
- if (tagCount < 0 || tagCount > INT32_MAX) {
- ALOGE("%s: tag count %d from vendor ops is invalid.", __FUNCTION__, tagCount);
- return BAD_VALUE;
- }
-
- Vector<uint32_t> tagArray;
- LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
- "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
-
- vOps->get_all_tags(vOps, /*out*/tagArray.editArray());
-
- sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
- desc->mTagCount = tagCount;
-
- SortedVector<String8> sections;
- KeyedVector<uint32_t, String8> tagToSectionMap;
-
- for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
- uint32_t tag = tagArray[i];
- if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
- ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
- return BAD_VALUE;
- }
- const char *tagName = vOps->get_tag_name(vOps, tag);
- if (tagName == NULL) {
- ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
- return BAD_VALUE;
- }
- desc->mTagToNameMap.add(tag, String8(tagName));
- const char *sectionName = vOps->get_section_name(vOps, tag);
- if (sectionName == NULL) {
- ALOGE("%s: no section name defined for vendor tag %d.", __FUNCTION__, tag);
- return BAD_VALUE;
- }
-
- String8 sectionString(sectionName);
-
- sections.add(sectionString);
- tagToSectionMap.add(tag, sectionString);
-
- int tagType = vOps->get_tag_type(vOps, tag);
- if (tagType < 0 || tagType >= NUM_TYPES) {
- ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
- return BAD_VALUE;
- }
- desc->mTagToTypeMap.add(tag, tagType);
- }
-
- desc->mSections = sections;
-
- for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
- uint32_t tag = tagArray[i];
- String8 sectionString = tagToSectionMap.valueFor(tag);
-
- // Set up tag to section index map
- ssize_t index = sections.indexOf(sectionString);
- LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index);
- desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index));
-
- // Set up reverse mapping
- ssize_t reverseIndex = -1;
- if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
- KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
- reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
- }
- desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
- }
-
- descriptor = desc;
- return OK;
+VendorTagDescriptor::VendorTagDescriptor() :
+ mTagCount(0),
+ mVendorOps() {
}
-status_t VendorTagDescriptor::createFromParcel(const Parcel* parcel,
- /*out*/
- sp<VendorTagDescriptor>& descriptor) {
+VendorTagDescriptor::VendorTagDescriptor(const VendorTagDescriptor& src) {
+ copyFrom(src);
+}
+
+VendorTagDescriptor& VendorTagDescriptor::operator=(const VendorTagDescriptor& rhs) {
+ copyFrom(rhs);
+ return *this;
+}
+
+void VendorTagDescriptor::copyFrom(const VendorTagDescriptor& src) {
+ if (this == &src) return;
+
+ size_t len = mReverseMapping.size();
+ for (size_t i = 0; i < len; ++i) {
+ delete mReverseMapping[i];
+ }
+ mReverseMapping.clear();
+
+ len = src.mReverseMapping.size();
+ // Have to copy KeyedVectors inside mReverseMapping
+ for (size_t i = 0; i < len; ++i) {
+ KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
+ *nameMapper = *(src.mReverseMapping.valueAt(i));
+ mReverseMapping.add(src.mReverseMapping.keyAt(i), nameMapper);
+ }
+ // Everything else is simple
+ mTagToNameMap = src.mTagToNameMap;
+ mTagToSectionMap = src.mTagToSectionMap;
+ mTagToTypeMap = src.mTagToTypeMap;
+ mSections = src.mSections;
+ mTagCount = src.mTagCount;
+ mVendorOps = src.mVendorOps;
+}
+
+status_t VendorTagDescriptor::readFromParcel(const Parcel* parcel) {
status_t res = OK;
if (parcel == NULL) {
ALOGE("%s: parcel argument was NULL.", __FUNCTION__);
@@ -156,8 +114,7 @@
return BAD_VALUE;
}
- sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
- desc->mTagCount = tagCount;
+ mTagCount = tagCount;
uint32_t tag, sectionIndex;
uint32_t maxSectionIndex = 0;
@@ -197,9 +154,9 @@
maxSectionIndex = (maxSectionIndex >= sectionIndex) ? maxSectionIndex : sectionIndex;
allTags.add(tag);
- desc->mTagToNameMap.add(tag, tagName);
- desc->mTagToSectionMap.add(tag, sectionIndex);
- desc->mTagToTypeMap.add(tag, tagType);
+ mTagToNameMap.add(tag, tagName);
+ mTagToSectionMap.add(tag, sectionIndex);
+ mTagToTypeMap.add(tag, tagType);
}
if (res != OK) {
@@ -217,7 +174,7 @@
__FUNCTION__, sectionCount, (maxSectionIndex + 1));
return BAD_VALUE;
}
- LOG_ALWAYS_FATAL_IF(desc->mSections.setCapacity(sectionCount) <= 0,
+ LOG_ALWAYS_FATAL_IF(mSections.setCapacity(sectionCount) <= 0,
"Vector capacity must be positive");
for (size_t i = 0; i < sectionCount; ++i) {
String8 sectionName = parcel->readString8();
@@ -226,7 +183,7 @@
__FUNCTION__, i);
return NOT_ENOUGH_DATA;
}
- desc->mSections.add(sectionName);
+ mSections.add(sectionName);
}
}
@@ -235,17 +192,16 @@
// Set up reverse mapping
for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
uint32_t tag = allTags[i];
- String8 sectionString = desc->mSections[desc->mTagToSectionMap.valueFor(tag)];
+ String8 sectionString = mSections[mTagToSectionMap.valueFor(tag)];
ssize_t reverseIndex = -1;
- if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
+ if ((reverseIndex = mReverseMapping.indexOfKey(sectionString)) < 0) {
KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
- reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
+ reverseIndex = mReverseMapping.add(sectionString, nameMapper);
}
- desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
+ mReverseMapping[reverseIndex]->add(mTagToNameMap.valueFor(tag), tag);
}
- descriptor = desc;
return res;
}
@@ -377,6 +333,92 @@
}
+} // namespace params
+} // namespace camera2
+} // namespace hardware
+
+
+status_t VendorTagDescriptor::createDescriptorFromOps(const vendor_tag_ops_t* vOps,
+ /*out*/
+ sp<VendorTagDescriptor>& descriptor) {
+ if (vOps == NULL) {
+ ALOGE("%s: vendor_tag_ops argument was NULL.", __FUNCTION__);
+ return BAD_VALUE;
+ }
+
+ int tagCount = vOps->get_tag_count(vOps);
+ if (tagCount < 0 || tagCount > INT32_MAX) {
+ ALOGE("%s: tag count %d from vendor ops is invalid.", __FUNCTION__, tagCount);
+ return BAD_VALUE;
+ }
+
+ Vector<uint32_t> tagArray;
+ LOG_ALWAYS_FATAL_IF(tagArray.resize(tagCount) != tagCount,
+ "%s: too many (%u) vendor tags defined.", __FUNCTION__, tagCount);
+
+ vOps->get_all_tags(vOps, /*out*/tagArray.editArray());
+
+ sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
+ desc->mTagCount = tagCount;
+
+ SortedVector<String8> sections;
+ KeyedVector<uint32_t, String8> tagToSectionMap;
+
+ for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
+ uint32_t tag = tagArray[i];
+ if (tag < CAMERA_METADATA_VENDOR_TAG_BOUNDARY) {
+ ALOGE("%s: vendor tag %d not in vendor tag section.", __FUNCTION__, tag);
+ return BAD_VALUE;
+ }
+ const char *tagName = vOps->get_tag_name(vOps, tag);
+ if (tagName == NULL) {
+ ALOGE("%s: no tag name defined for vendor tag %d.", __FUNCTION__, tag);
+ return BAD_VALUE;
+ }
+ desc->mTagToNameMap.add(tag, String8(tagName));
+ const char *sectionName = vOps->get_section_name(vOps, tag);
+ if (sectionName == NULL) {
+ ALOGE("%s: no section name defined for vendor tag %d.", __FUNCTION__, tag);
+ return BAD_VALUE;
+ }
+
+ String8 sectionString(sectionName);
+
+ sections.add(sectionString);
+ tagToSectionMap.add(tag, sectionString);
+
+ int tagType = vOps->get_tag_type(vOps, tag);
+ if (tagType < 0 || tagType >= NUM_TYPES) {
+ ALOGE("%s: tag type %d from vendor ops does not exist.", __FUNCTION__, tagType);
+ return BAD_VALUE;
+ }
+ desc->mTagToTypeMap.add(tag, tagType);
+ }
+
+ desc->mSections = sections;
+
+ for (size_t i = 0; i < static_cast<size_t>(tagCount); ++i) {
+ uint32_t tag = tagArray[i];
+ String8 sectionString = tagToSectionMap.valueFor(tag);
+
+ // Set up tag to section index map
+ ssize_t index = sections.indexOf(sectionString);
+ LOG_ALWAYS_FATAL_IF(index < 0, "index %zd must be non-negative", index);
+ desc->mTagToSectionMap.add(tag, static_cast<uint32_t>(index));
+
+ // Set up reverse mapping
+ ssize_t reverseIndex = -1;
+ if ((reverseIndex = desc->mReverseMapping.indexOfKey(sectionString)) < 0) {
+ KeyedVector<String8, uint32_t>* nameMapper = new KeyedVector<String8, uint32_t>();
+ reverseIndex = desc->mReverseMapping.add(sectionString, nameMapper);
+ }
+ desc->mReverseMapping[reverseIndex]->add(desc->mTagToNameMap.valueFor(tag), tag);
+ }
+
+ descriptor = desc;
+ return OK;
+}
+
status_t VendorTagDescriptor::setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc) {
status_t res = OK;
Mutex::Autolock al(sLock);
diff --git a/camera/aidl/android/hardware/CameraInfo.aidl b/camera/aidl/android/hardware/CameraInfo.aidl
new file mode 100644
index 0000000..c6a3a61
--- /dev/null
+++ b/camera/aidl/android/hardware/CameraInfo.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+/** @hide */
+parcelable CameraInfo cpp_header "camera/CameraBase.h";
diff --git a/camera/aidl/android/hardware/ICamera.aidl b/camera/aidl/android/hardware/ICamera.aidl
new file mode 100644
index 0000000..f9db842
--- /dev/null
+++ b/camera/aidl/android/hardware/ICamera.aidl
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+/** @hide */
+interface ICamera
+{
+ /**
+ * Only one call exposed, for ICameraService testing purposes
+ *
+ * Keep up-to-date with frameworks/av/include/camera/ICamera.h
+ */
+ void disconnect();
+}
diff --git a/camera/aidl/android/hardware/ICameraClient.aidl b/camera/aidl/android/hardware/ICameraClient.aidl
new file mode 100644
index 0000000..808edee
--- /dev/null
+++ b/camera/aidl/android/hardware/ICameraClient.aidl
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+/** @hide */
+interface ICameraClient
+{
+ // For now, empty because there is a manual implementation
+}
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
new file mode 100644
index 0000000..e94fd0c
--- /dev/null
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+import android.hardware.ICamera;
+import android.hardware.ICameraClient;
+import android.hardware.camera2.ICameraDeviceUser;
+import android.hardware.camera2.ICameraDeviceCallbacks;
+import android.hardware.camera2.params.VendorTagDescriptor;
+import android.hardware.camera2.impl.CameraMetadataNative;
+import android.hardware.ICameraServiceListener;
+import android.hardware.CameraInfo;
+
+/**
+ * Binder interface for the native camera service running in mediaserver.
+ *
+ * @hide
+ */
+interface ICameraService
+{
+ /**
+ * All camera service and device Binder calls may return a
+ * ServiceSpecificException with the following error codes
+ */
+ const int ERROR_PERMISSION_DENIED = 1;
+ const int ERROR_ALREADY_EXISTS = 2;
+ const int ERROR_ILLEGAL_ARGUMENT = 3;
+ const int ERROR_DISCONNECTED = 4;
+ const int ERROR_TIMED_OUT = 5;
+ const int ERROR_DISABLED = 6;
+ const int ERROR_CAMERA_IN_USE = 7;
+ const int ERROR_MAX_CAMERAS_IN_USE = 8;
+ const int ERROR_DEPRECATED_HAL = 9;
+ const int ERROR_INVALID_OPERATION = 10;
+
+ /**
+ * Types for getNumberOfCameras
+ */
+ const int CAMERA_TYPE_BACKWARD_COMPATIBLE = 0;
+ const int CAMERA_TYPE_ALL = 1;
+
+ /**
+ * Return the number of camera devices available in the system
+ */
+ int getNumberOfCameras(int type);
+
+ /**
+ * Fetch basic camera information for a camera device
+ */
+ CameraInfo getCameraInfo(int cameraId);
+
+ /**
+ * Default UID/PID values for non-privileged callers of
+ * connect(), connectDevice(), and connectLegacy()
+ */
+ const int USE_CALLING_UID = -1;
+ const int USE_CALLING_PID = -1;
+
+ /**
+ * Open a camera device through the old camera API
+ */
+ ICamera connect(ICameraClient client,
+ int cameraId,
+ String opPackageName,
+ int clientUid, int clientPid);
+
+ /**
+ * Open a camera device through the new camera API
+ * Only supported for device HAL versions >= 3.2
+ */
+ ICameraDeviceUser connectDevice(ICameraDeviceCallbacks callbacks,
+ int cameraId,
+ String opPackageName,
+ int clientUid);
+
+ /**
+ * halVersion constant for connectLegacy
+ */
+ const int CAMERA_HAL_API_VERSION_UNSPECIFIED = -1;
+
+ /**
+ * Open a camera device in legacy mode, if supported by the camera module HAL.
+ */
+ ICamera connectLegacy(ICameraClient client,
+ int cameraId,
+ int halVersion,
+ String opPackageName,
+ int clientUid);
+
+ /**
+ * Add/remove listeners for changes to camera device and flashlight state
+ */
+ void addListener(ICameraServiceListener listener);
+ void removeListener(ICameraServiceListener listener);
+
+ /**
+ * Read the static camera metadata for a camera device.
+ * Only supported for device HAL versions >= 3.2
+ */
+ CameraMetadataNative getCameraCharacteristics(int cameraId);
+
+ /**
+ * Read in the vendor tag descriptors from the camera module HAL.
+ * Intended to be used by the native code of CameraMetadataNative to correctly
+ * interpret camera metadata with vendor tags.
+ */
+ VendorTagDescriptor getCameraVendorTagDescriptor();
+
+ /**
+ * Read the legacy camera1 parameters into a String
+ */
+ String getLegacyParameters(int cameraId);
+
+ /**
+ * apiVersion constants for supportsCameraApi
+ */
+ const int API_VERSION_1 = 1;
+ const int API_VERSION_2 = 2;
+
+ // Determines if a particular API version is supported directly
+ boolean supportsCameraApi(int cameraId, int apiVersion);
+
+ void setTorchMode(String CameraId, boolean enabled, IBinder clientBinder);
+
+ /**
+ * Notify the camera service of a system event. Should only be called from system_server.
+ *
+ * Callers require the android.permission.CAMERA_SEND_SYSTEM_EVENTS permission.
+ */
+ const int EVENT_NONE = 0;
+ const int EVENT_USER_SWITCHED = 1;
+ oneway void notifySystemEvent(int eventId, in int[] args);
+}
diff --git a/camera/aidl/android/hardware/ICameraServiceListener.aidl b/camera/aidl/android/hardware/ICameraServiceListener.aidl
new file mode 100644
index 0000000..4e2a8c7
--- /dev/null
+++ b/camera/aidl/android/hardware/ICameraServiceListener.aidl
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+/** @hide */
+interface ICameraServiceListener
+{
+
+ /**
+ * Initial status will be transmitted with onStatusChange immediately
+ * after this listener is added to the service listener list.
+ *
+ * Allowed transitions:
+ *
+ * (Any) -> NOT_PRESENT
+ * NOT_PRESENT -> PRESENT
+ * NOT_PRESENT -> ENUMERATING
+ * ENUMERATING -> PRESENT
+ * PRESENT -> NOT_AVAILABLE
+ * NOT_AVAILABLE -> PRESENT
+ *
+ * A state will never immediately transition back to itself.
+ *
+ * The enums must match the values in
+ * include/hardware/camera_common.h when applicable
+ */
+ // Device physically unplugged
+ const int STATUS_NOT_PRESENT = 0;
+ // Device physically has been plugged in and the camera can be used exlusively
+ const int STATUS_PRESENT = 1;
+ // Device physically has been plugged in but it will not be connect-able until enumeration is
+ // complete
+ const int STATUS_ENUMERATING = 2;
+ // Camera is in use by another app and cannot be used exclusively
+ const int STATUS_NOT_AVAILABLE = -2;
+
+ // Use to initialize variables only
+ const int STATUS_UNKNOWN = -1;
+
+ oneway void onStatusChanged(int status, int cameraId);
+
+ /**
+ * The torch mode status of a camera.
+ *
+ * Initial status will be transmitted with onTorchStatusChanged immediately
+ * after this listener is added to the service listener list.
+ *
+ * The enums must match the values in
+ * include/hardware/camera_common.h
+ */
+ // The camera's torch mode has become not available to use via
+ // setTorchMode().
+ const int TORCH_STATUS_NOT_AVAILABLE = 0;
+ // The camera's torch mode is off and available to be turned on via
+ // setTorchMode().
+ const int TORCH_STATUS_AVAILABLE_OFF = 1;
+ // The camera's torch mode is on and available to be turned off via
+ // setTorchMode().
+ const int TORCH_STATUS_AVAILABLE_ON = 2;
+
+ // Use to initialize variables only
+ const int TORCH_STATUS_UNKNOWN = -1;
+
+ oneway void onTorchStatusChanged(int status, String cameraId);
+}
diff --git a/camera/aidl/android/hardware/ICameraServiceProxy.aidl b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
new file mode 100644
index 0000000..0e654d5
--- /dev/null
+++ b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware;
+
+/**
+ * Binder interface for the camera service proxy running in system_server.
+ *
+ * Keep in sync with frameworks/av/include/camera/ICameraServiceProxy.h
+ *
+ * @hide
+ */
+interface ICameraServiceProxy
+{
+ /**
+ * Ping the service proxy to update the valid users for the camera service.
+ */
+ oneway void pingForUserUpdate();
+
+ /**
+ * Update the status of a camera device
+ */
+ oneway void notifyCameraState(String cameraId, int newCameraState);
+}
diff --git a/camera/aidl/android/hardware/camera2/CaptureRequest.aidl b/camera/aidl/android/hardware/camera2/CaptureRequest.aidl
new file mode 100644
index 0000000..9931fc7
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/CaptureRequest.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2;
+
+/** @hide */
+parcelable CaptureRequest cpp_header "camera/camera2/CaptureRequest.h";
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
new file mode 100644
index 0000000..ab57db5
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceCallbacks.aidl
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2;
+
+import android.hardware.camera2.impl.CameraMetadataNative;
+import android.hardware.camera2.impl.CaptureResultExtras;
+
+/** @hide */
+interface ICameraDeviceCallbacks
+{
+ // Error codes for onDeviceError
+ const int ERROR_CAMERA_INVALID_ERROR = -1; // To indicate all invalid error codes
+ const int ERROR_CAMERA_DISCONNECTED = 0;
+ const int ERROR_CAMERA_DEVICE = 1;
+ const int ERROR_CAMERA_SERVICE = 2;
+ const int ERROR_CAMERA_REQUEST = 3;
+ const int ERROR_CAMERA_RESULT = 4;
+ const int ERROR_CAMERA_BUFFER = 5;
+
+ oneway void onDeviceError(int errorCode, in CaptureResultExtras resultExtras);
+ oneway void onDeviceIdle();
+ oneway void onCaptureStarted(in CaptureResultExtras resultExtras, long timestamp);
+ oneway void onResultReceived(in CameraMetadataNative result,
+ in CaptureResultExtras resultExtras);
+ oneway void onPrepared(int streamId);
+}
diff --git a/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
new file mode 100644
index 0000000..250f15e
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/ICameraDeviceUser.aidl
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2;
+
+import android.hardware.camera2.CaptureRequest;
+import android.hardware.camera2.impl.CameraMetadataNative;
+import android.hardware.camera2.params.OutputConfiguration;
+import android.hardware.camera2.utils.SubmitInfo;
+import android.view.Surface;
+
+/** @hide */
+interface ICameraDeviceUser
+{
+ void disconnect();
+
+ const int NO_IN_FLIGHT_REPEATING_FRAMES = -1;
+
+ SubmitInfo submitRequest(in CaptureRequest request, boolean streaming);
+ SubmitInfo submitRequestList(in CaptureRequest[] requestList, boolean streaming);
+
+ /**
+ * Cancel the repeating request specified by requestId
+ * Returns the frame number of the last frame that will be produced from this
+ * repeating request, or NO_IN_FLIGHT_REPEATING_FRAMES if no frames were produced
+ * by this repeating request
+ */
+ long cancelRequest(int requestId);
+
+ /**
+ * Begin the device configuration.
+ *
+ * <p>
+ * beginConfigure must be called before any call to deleteStream, createStream,
+ * or endConfigure. It is not valid to call this when the device is not idle.
+ * <p>
+ */
+ void beginConfigure();
+
+ /**
+ * End the device configuration.
+ *
+ * <p>
+ * endConfigure must be called after stream configuration is complete (i.e. after
+ * a call to beginConfigure and subsequent createStream/deleteStream calls). This
+ * must be called before any requests can be submitted.
+ * <p>
+ */
+ void endConfigure(boolean isConstrainedHighSpeed);
+
+ void deleteStream(int streamId);
+
+ /**
+ * Create an output stream
+ *
+ * <p>Create an output stream based on the given output configuration</p>
+ *
+ * @param outputConfiguration size, format, and other parameters for the stream
+ * @return new stream ID
+ */
+ int createStream(in OutputConfiguration outputConfiguration);
+
+ /**
+ * Create an input stream
+ *
+ * <p>Create an input stream of width, height, and format</p>
+ *
+ * @param width Width of the input buffers
+ * @param height Height of the input buffers
+ * @param format Format of the input buffers. One of HAL_PIXEL_FORMAT_*.
+ *
+ * @return new stream ID
+ */
+ int createInputStream(int width, int height, int format);
+
+ /**
+ * Get the surface of the input stream.
+ *
+ * <p>It's valid to call this method only after a stream configuration is completed
+ * successfully and the stream configuration includes a input stream.</p>
+ *
+ * @param surface An output argument for the surface of the input stream buffer queue.
+ */
+ Surface getInputSurface();
+
+ // Keep in sync with public API in
+ // frameworks/base/core/java/android/hardware/camera2/CameraDevice.java
+ const int TEMPLATE_PREVIEW = 1;
+ const int TEMPLATE_STILL_CAPTURE = 2;
+ const int TEMPLATE_RECORD = 3;
+ const int TEMPLATE_VIDEO_SNAPSHOT = 4;
+ const int TEMPLATE_ZERO_SHUTTER_LAG = 5;
+ const int TEMPLATE_MANUAL = 6;
+
+ CameraMetadataNative createDefaultRequest(int templateId);
+
+ CameraMetadataNative getCameraInfo();
+
+ void waitUntilIdle();
+
+ long flush();
+
+ void prepare(int streamId);
+
+ void tearDown(int streamId);
+
+ void prepare2(int maxCount, int streamId);
+}
diff --git a/camera/aidl/android/hardware/camera2/impl/CameraMetadataNative.aidl b/camera/aidl/android/hardware/camera2/impl/CameraMetadataNative.aidl
new file mode 100644
index 0000000..507f575
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/impl/CameraMetadataNative.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2.impl;
+
+/** @hide */
+parcelable CameraMetadataNative cpp_header "camera/CameraMetadata.h";
diff --git a/camera/aidl/android/hardware/camera2/impl/CaptureResultExtras.aidl b/camera/aidl/android/hardware/camera2/impl/CaptureResultExtras.aidl
new file mode 100644
index 0000000..5f47eda
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/impl/CaptureResultExtras.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2.impl;
+
+/** @hide */
+parcelable CaptureResultExtras cpp_header "camera/CaptureResult.h";
diff --git a/camera/aidl/android/hardware/camera2/params/OutputConfiguration.aidl b/camera/aidl/android/hardware/camera2/params/OutputConfiguration.aidl
new file mode 100644
index 0000000..a8ad832
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/params/OutputConfiguration.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2.params;
+
+/** @hide */
+parcelable OutputConfiguration cpp_header "camera/camera2/OutputConfiguration.h";
diff --git a/camera/aidl/android/hardware/camera2/params/VendorTagDescriptor.aidl b/camera/aidl/android/hardware/camera2/params/VendorTagDescriptor.aidl
new file mode 100644
index 0000000..9ee4263
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/params/VendorTagDescriptor.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2.params;
+
+/** @hide */
+parcelable VendorTagDescriptor cpp_header "camera/VendorTagDescriptor.h";
diff --git a/camera/aidl/android/hardware/camera2/utils/SubmitInfo.aidl b/camera/aidl/android/hardware/camera2/utils/SubmitInfo.aidl
new file mode 100644
index 0000000..57531ad
--- /dev/null
+++ b/camera/aidl/android/hardware/camera2/utils/SubmitInfo.aidl
@@ -0,0 +1,20 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.hardware.camera2.utils;
+
+/** @hide */
+parcelable SubmitInfo cpp_header "camera/camera2/SubmitInfo.h";
diff --git a/camera/camera2/CaptureRequest.cpp b/camera/camera2/CaptureRequest.cpp
index 4217bc6..fb43708 100644
--- a/camera/camera2/CaptureRequest.cpp
+++ b/camera/camera2/CaptureRequest.cpp
@@ -25,8 +25,10 @@
#include <gui/Surface.h>
namespace android {
+namespace hardware {
+namespace camera2 {
-status_t CaptureRequest::readFromParcel(Parcel* parcel) {
+status_t CaptureRequest::readFromParcel(const Parcel* parcel) {
if (parcel == NULL) {
ALOGE("%s: Null parcel", __FUNCTION__);
return BAD_VALUE;
@@ -130,4 +132,6 @@
return OK;
}
-}; // namespace android
+} // namespace camera2
+} // namespace hardware
+} // namespace android
diff --git a/camera/camera2/ICameraDeviceCallbacks.cpp b/camera/camera2/ICameraDeviceCallbacks.cpp
deleted file mode 100644
index f599879..0000000
--- a/camera/camera2/ICameraDeviceCallbacks.cpp
+++ /dev/null
@@ -1,190 +0,0 @@
-/*
-**
-** Copyright 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ICameraDeviceCallbacks"
-#include <utils/Log.h>
-#include <stdint.h>
-#include <sys/types.h>
-
-#include <binder/Parcel.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/Surface.h>
-#include <utils/Mutex.h>
-
-#include <camera/camera2/ICameraDeviceCallbacks.h>
-#include "camera/CameraMetadata.h"
-#include "camera/CaptureResult.h"
-
-namespace android {
-
-enum {
- CAMERA_ERROR = IBinder::FIRST_CALL_TRANSACTION,
- CAMERA_IDLE,
- CAPTURE_STARTED,
- RESULT_RECEIVED,
- PREPARED
-};
-
-class BpCameraDeviceCallbacks: public BpInterface<ICameraDeviceCallbacks>
-{
-public:
- BpCameraDeviceCallbacks(const sp<IBinder>& impl)
- : BpInterface<ICameraDeviceCallbacks>(impl)
- {
- }
-
- void onDeviceError(CameraErrorCode errorCode, const CaptureResultExtras& resultExtras)
- {
- ALOGV("onDeviceError");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
- data.writeInt32(static_cast<int32_t>(errorCode));
- data.writeInt32(1); // to mark presence of CaptureResultExtras object
- resultExtras.writeToParcel(&data);
- remote()->transact(CAMERA_ERROR, data, &reply, IBinder::FLAG_ONEWAY);
- data.writeNoException();
- }
-
- void onDeviceIdle()
- {
- ALOGV("onDeviceIdle");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
- remote()->transact(CAMERA_IDLE, data, &reply, IBinder::FLAG_ONEWAY);
- data.writeNoException();
- }
-
- void onCaptureStarted(const CaptureResultExtras& result, int64_t timestamp)
- {
- ALOGV("onCaptureStarted");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
- data.writeInt32(1); // to mark presence of CaptureResultExtras object
- result.writeToParcel(&data);
- data.writeInt64(timestamp);
- remote()->transact(CAPTURE_STARTED, data, &reply, IBinder::FLAG_ONEWAY);
- data.writeNoException();
- }
-
- void onResultReceived(const CameraMetadata& metadata,
- const CaptureResultExtras& resultExtras) {
- ALOGV("onResultReceived");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
- data.writeInt32(1); // to mark presence of metadata object
- metadata.writeToParcel(&data);
- data.writeInt32(1); // to mark presence of CaptureResult object
- resultExtras.writeToParcel(&data);
- remote()->transact(RESULT_RECEIVED, data, &reply, IBinder::FLAG_ONEWAY);
- data.writeNoException();
- }
-
- void onPrepared(int streamId)
- {
- ALOGV("onPrepared");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceCallbacks::getInterfaceDescriptor());
- data.writeInt32(streamId);
- remote()->transact(PREPARED, data, &reply, IBinder::FLAG_ONEWAY);
- data.writeNoException();
- }
-
-};
-
-IMPLEMENT_META_INTERFACE(CameraDeviceCallbacks,
- "android.hardware.camera2.ICameraDeviceCallbacks");
-
-// ----------------------------------------------------------------------
-
-status_t BnCameraDeviceCallbacks::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- ALOGV("onTransact - code = %d", code);
- switch(code) {
- case CAMERA_ERROR: {
- ALOGV("onDeviceError");
- CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
- CameraErrorCode errorCode =
- static_cast<CameraErrorCode>(data.readInt32());
- CaptureResultExtras resultExtras;
- if (data.readInt32() != 0) {
- resultExtras.readFromParcel(const_cast<Parcel*>(&data));
- } else {
- ALOGE("No CaptureResultExtras object is present!");
- }
- onDeviceError(errorCode, resultExtras);
- data.readExceptionCode();
- return NO_ERROR;
- } break;
- case CAMERA_IDLE: {
- ALOGV("onDeviceIdle");
- CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
- onDeviceIdle();
- data.readExceptionCode();
- return NO_ERROR;
- } break;
- case CAPTURE_STARTED: {
- ALOGV("onCaptureStarted");
- CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
- CaptureResultExtras result;
- if (data.readInt32() != 0) {
- result.readFromParcel(const_cast<Parcel*>(&data));
- } else {
- ALOGE("No CaptureResultExtras object is present in result!");
- }
- int64_t timestamp = data.readInt64();
- onCaptureStarted(result, timestamp);
- data.readExceptionCode();
- return NO_ERROR;
- } break;
- case RESULT_RECEIVED: {
- ALOGV("onResultReceived");
- CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
- CameraMetadata metadata;
- if (data.readInt32() != 0) {
- metadata.readFromParcel(const_cast<Parcel*>(&data));
- } else {
- ALOGW("No metadata object is present in result");
- }
- CaptureResultExtras resultExtras;
- if (data.readInt32() != 0) {
- resultExtras.readFromParcel(const_cast<Parcel*>(&data));
- } else {
- ALOGW("No capture result extras object is present in result");
- }
- onResultReceived(metadata, resultExtras);
- data.readExceptionCode();
- return NO_ERROR;
- } break;
- case PREPARED: {
- ALOGV("onPrepared");
- CHECK_INTERFACE(ICameraDeviceCallbacks, data, reply);
- CaptureResultExtras result;
- int streamId = data.readInt32();
- onPrepared(streamId);
- data.readExceptionCode();
- return NO_ERROR;
- } break;
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/camera/camera2/ICameraDeviceUser.cpp b/camera/camera2/ICameraDeviceUser.cpp
deleted file mode 100644
index 2a9fd2b..0000000
--- a/camera/camera2/ICameraDeviceUser.cpp
+++ /dev/null
@@ -1,626 +0,0 @@
-/*
-**
-** Copyright 2013, The Android Open Source Project
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-*/
-
-// #define LOG_NDEBUG 0
-#define LOG_TAG "ICameraDeviceUser"
-#include <utils/Log.h>
-#include <stdint.h>
-#include <sys/types.h>
-#include <binder/Parcel.h>
-#include <camera/camera2/ICameraDeviceUser.h>
-#include <gui/IGraphicBufferProducer.h>
-#include <gui/Surface.h>
-#include <camera/CameraMetadata.h>
-#include <camera/camera2/CaptureRequest.h>
-#include <camera/camera2/OutputConfiguration.h>
-
-namespace android {
-
-typedef Parcel::WritableBlob WritableBlob;
-typedef Parcel::ReadableBlob ReadableBlob;
-
-enum {
- DISCONNECT = IBinder::FIRST_CALL_TRANSACTION,
- SUBMIT_REQUEST,
- SUBMIT_REQUEST_LIST,
- CANCEL_REQUEST,
- BEGIN_CONFIGURE,
- END_CONFIGURE,
- DELETE_STREAM,
- CREATE_STREAM,
- CREATE_INPUT_STREAM,
- GET_INPUT_SURFACE,
- CREATE_DEFAULT_REQUEST,
- GET_CAMERA_INFO,
- WAIT_UNTIL_IDLE,
- FLUSH,
- PREPARE,
- TEAR_DOWN,
- PREPARE2
-};
-
-namespace {
- // Read empty strings without printing a false error message.
- String16 readMaybeEmptyString16(const Parcel& parcel) {
- size_t len;
- const char16_t* str = parcel.readString16Inplace(&len);
- if (str != NULL) {
- return String16(str, len);
- } else {
- return String16();
- }
- }
-};
-
-class BpCameraDeviceUser : public BpInterface<ICameraDeviceUser>
-{
-public:
- BpCameraDeviceUser(const sp<IBinder>& impl)
- : BpInterface<ICameraDeviceUser>(impl)
- {
- }
-
- // disconnect from camera service
- void disconnect()
- {
- ALOGV("disconnect");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- remote()->transact(DISCONNECT, data, &reply);
- reply.readExceptionCode();
- }
-
- virtual int submitRequest(sp<CaptureRequest> request, bool repeating,
- int64_t *lastFrameNumber)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-
- // arg0 = CaptureRequest
- if (request != 0) {
- data.writeInt32(1);
- request->writeToParcel(&data);
- } else {
- data.writeInt32(0);
- }
-
- // arg1 = streaming (bool)
- data.writeInt32(repeating);
-
- remote()->transact(SUBMIT_REQUEST, data, &reply);
-
- reply.readExceptionCode();
- status_t res = reply.readInt32();
-
- status_t resFrameNumber = BAD_VALUE;
- if (reply.readInt32() != 0) {
- if (lastFrameNumber != NULL) {
- resFrameNumber = reply.readInt64(lastFrameNumber);
- }
- }
-
- if (res < 0 || (resFrameNumber != NO_ERROR)) {
- res = FAILED_TRANSACTION;
- }
- return res;
- }
-
- virtual int submitRequestList(List<sp<CaptureRequest> > requestList, bool repeating,
- int64_t *lastFrameNumber)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-
- data.writeInt32(requestList.size());
-
- for (List<sp<CaptureRequest> >::iterator it = requestList.begin();
- it != requestList.end(); ++it) {
- sp<CaptureRequest> request = *it;
- if (request != 0) {
- data.writeInt32(1);
- if (request->writeToParcel(&data) != OK) {
- return BAD_VALUE;
- }
- } else {
- data.writeInt32(0);
- }
- }
-
- data.writeInt32(repeating);
-
- remote()->transact(SUBMIT_REQUEST_LIST, data, &reply);
-
- reply.readExceptionCode();
- status_t res = reply.readInt32();
-
- status_t resFrameNumber = BAD_VALUE;
- if (reply.readInt32() != 0) {
- if (lastFrameNumber != NULL) {
- resFrameNumber = reply.readInt64(lastFrameNumber);
- }
- }
- if (res < 0 || (resFrameNumber != NO_ERROR)) {
- res = FAILED_TRANSACTION;
- }
- return res;
- }
-
- virtual status_t cancelRequest(int requestId, int64_t *lastFrameNumber)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- data.writeInt32(requestId);
-
- remote()->transact(CANCEL_REQUEST, data, &reply);
-
- reply.readExceptionCode();
- status_t res = reply.readInt32();
-
- status_t resFrameNumber = BAD_VALUE;
- if (reply.readInt32() != 0) {
- if (lastFrameNumber != NULL) {
- resFrameNumber = reply.readInt64(lastFrameNumber);
- }
- }
- if ((res != NO_ERROR) || (resFrameNumber != NO_ERROR)) {
- res = FAILED_TRANSACTION;
- }
- return res;
- }
-
- virtual status_t beginConfigure()
- {
- ALOGV("beginConfigure");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- remote()->transact(BEGIN_CONFIGURE, data, &reply);
- reply.readExceptionCode();
- return reply.readInt32();
- }
-
- virtual status_t endConfigure(bool isConstrainedHighSpeed)
- {
- ALOGV("endConfigure");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- data.writeInt32(isConstrainedHighSpeed);
-
- remote()->transact(END_CONFIGURE, data, &reply);
- reply.readExceptionCode();
- return reply.readInt32();
- }
-
- virtual status_t deleteStream(int streamId)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- data.writeInt32(streamId);
-
- remote()->transact(DELETE_STREAM, data, &reply);
-
- reply.readExceptionCode();
- return reply.readInt32();
- }
-
- virtual status_t createStream(const OutputConfiguration& outputConfiguration)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- if (outputConfiguration.getGraphicBufferProducer() != NULL) {
- data.writeInt32(1); // marker that OutputConfiguration is not null. Mimic aidl behavior
- outputConfiguration.writeToParcel(data);
- } else {
- data.writeInt32(0);
- }
- remote()->transact(CREATE_STREAM, data, &reply);
-
- reply.readExceptionCode();
- return reply.readInt32();
- }
-
- virtual status_t createInputStream(int width, int height, int format)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- data.writeInt32(width);
- data.writeInt32(height);
- data.writeInt32(format);
-
- remote()->transact(CREATE_INPUT_STREAM, data, &reply);
-
- reply.readExceptionCode();
- return reply.readInt32();
- }
-
- // get the buffer producer of the input stream
- virtual status_t getInputBufferProducer(
- sp<IGraphicBufferProducer> *producer) {
- if (producer == NULL) {
- return BAD_VALUE;
- }
-
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
-
- remote()->transact(GET_INPUT_SURFACE, data, &reply);
-
- reply.readExceptionCode();
- status_t result = reply.readInt32() ;
- if (result != OK) {
- return result;
- }
-
- sp<IGraphicBufferProducer> bp = NULL;
- if (reply.readInt32() != 0) {
- String16 name = readMaybeEmptyString16(reply);
- bp = interface_cast<IGraphicBufferProducer>(
- reply.readStrongBinder());
- }
-
- *producer = bp;
-
- return *producer == NULL ? INVALID_OPERATION : OK;
- }
-
- // Create a request object from a template.
- virtual status_t createDefaultRequest(int templateId,
- /*out*/
- CameraMetadata* request)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- data.writeInt32(templateId);
- remote()->transact(CREATE_DEFAULT_REQUEST, data, &reply);
-
- reply.readExceptionCode();
- status_t result = reply.readInt32();
-
- CameraMetadata out;
- if (reply.readInt32() != 0) {
- out.readFromParcel(&reply);
- }
-
- if (request != NULL) {
- request->swap(out);
- }
- return result;
- }
-
-
- virtual status_t getCameraInfo(CameraMetadata* info)
- {
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- remote()->transact(GET_CAMERA_INFO, data, &reply);
-
- reply.readExceptionCode();
- status_t result = reply.readInt32();
-
- CameraMetadata out;
- if (reply.readInt32() != 0) {
- out.readFromParcel(&reply);
- }
-
- if (info != NULL) {
- info->swap(out);
- }
-
- return result;
- }
-
- virtual status_t waitUntilIdle()
- {
- ALOGV("waitUntilIdle");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- remote()->transact(WAIT_UNTIL_IDLE, data, &reply);
- reply.readExceptionCode();
- return reply.readInt32();
- }
-
- virtual status_t flush(int64_t *lastFrameNumber)
- {
- ALOGV("flush");
- Parcel data, reply;
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- remote()->transact(FLUSH, data, &reply);
- reply.readExceptionCode();
- status_t res = reply.readInt32();
-
- status_t resFrameNumber = BAD_VALUE;
- if (reply.readInt32() != 0) {
- if (lastFrameNumber != NULL) {
- resFrameNumber = reply.readInt64(lastFrameNumber);
- }
- }
- if ((res != NO_ERROR) || (resFrameNumber != NO_ERROR)) {
- res = FAILED_TRANSACTION;
- }
- return res;
- }
-
- virtual status_t prepare(int streamId)
- {
- ALOGV("prepare");
- Parcel data, reply;
-
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- data.writeInt32(streamId);
-
- remote()->transact(PREPARE, data, &reply);
-
- reply.readExceptionCode();
- return reply.readInt32();
- }
-
- virtual status_t prepare2(int maxCount, int streamId)
- {
- ALOGV("prepare2");
- Parcel data, reply;
-
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- data.writeInt32(maxCount);
- data.writeInt32(streamId);
-
- remote()->transact(PREPARE2, data, &reply);
-
- reply.readExceptionCode();
- return reply.readInt32();
- }
-
- virtual status_t tearDown(int streamId)
- {
- ALOGV("tearDown");
- Parcel data, reply;
-
- data.writeInterfaceToken(ICameraDeviceUser::getInterfaceDescriptor());
- data.writeInt32(streamId);
-
- remote()->transact(TEAR_DOWN, data, &reply);
-
- reply.readExceptionCode();
- return reply.readInt32();
- }
-
-private:
-
-
-};
-
-IMPLEMENT_META_INTERFACE(CameraDeviceUser,
- "android.hardware.camera2.ICameraDeviceUser");
-
-// ----------------------------------------------------------------------
-
-status_t BnCameraDeviceUser::onTransact(
- uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
-{
- switch(code) {
- case DISCONNECT: {
- ALOGV("DISCONNECT");
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- disconnect();
- reply->writeNoException();
- return NO_ERROR;
- } break;
- case SUBMIT_REQUEST: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
- // arg0 = request
- sp<CaptureRequest> request;
- if (data.readInt32() != 0) {
- request = new CaptureRequest();
- request->readFromParcel(const_cast<Parcel*>(&data));
- }
-
- // arg1 = streaming (bool)
- bool repeating = data.readInt32();
-
- // return code: requestId (int32)
- reply->writeNoException();
- int64_t lastFrameNumber = -1;
- reply->writeInt32(submitRequest(request, repeating, &lastFrameNumber));
- reply->writeInt32(1);
- reply->writeInt64(lastFrameNumber);
-
- return NO_ERROR;
- } break;
- case SUBMIT_REQUEST_LIST: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
- List<sp<CaptureRequest> > requestList;
- int requestListSize = data.readInt32();
- for (int i = 0; i < requestListSize; i++) {
- if (data.readInt32() != 0) {
- sp<CaptureRequest> request = new CaptureRequest();
- if (request->readFromParcel(const_cast<Parcel*>(&data)) != OK) {
- return BAD_VALUE;
- }
- requestList.push_back(request);
- } else {
- sp<CaptureRequest> request = 0;
- requestList.push_back(request);
- ALOGE("A request is missing. Sending in null request.");
- }
- }
-
- bool repeating = data.readInt32();
-
- reply->writeNoException();
- int64_t lastFrameNumber = -1;
- reply->writeInt32(submitRequestList(requestList, repeating, &lastFrameNumber));
- reply->writeInt32(1);
- reply->writeInt64(lastFrameNumber);
-
- return NO_ERROR;
- } break;
- case CANCEL_REQUEST: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- int requestId = data.readInt32();
- reply->writeNoException();
- int64_t lastFrameNumber = -1;
- reply->writeInt32(cancelRequest(requestId, &lastFrameNumber));
- reply->writeInt32(1);
- reply->writeInt64(lastFrameNumber);
- return NO_ERROR;
- } break;
- case DELETE_STREAM: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- int streamId = data.readInt32();
- reply->writeNoException();
- reply->writeInt32(deleteStream(streamId));
- return NO_ERROR;
- } break;
- case CREATE_STREAM: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
- status_t ret = BAD_VALUE;
- if (data.readInt32() != 0) {
- OutputConfiguration outputConfiguration(data);
- ret = createStream(outputConfiguration);
- } else {
- ALOGE("%s: cannot take an empty OutputConfiguration", __FUNCTION__);
- }
-
- reply->writeNoException();
- ALOGV("%s: CREATE_STREAM: write noException", __FUNCTION__);
- reply->writeInt32(ret);
- ALOGV("%s: CREATE_STREAM: write ret = %d", __FUNCTION__, ret);
-
- return NO_ERROR;
- } break;
- case CREATE_INPUT_STREAM: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- int width, height, format;
-
- width = data.readInt32();
- height = data.readInt32();
- format = data.readInt32();
- status_t ret = createInputStream(width, height, format);
-
- reply->writeNoException();
- reply->writeInt32(ret);
- return NO_ERROR;
-
- } break;
- case GET_INPUT_SURFACE: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
- sp<IGraphicBufferProducer> bp;
- status_t ret = getInputBufferProducer(&bp);
- sp<IBinder> b(IInterface::asBinder(ret == OK ? bp : NULL));
-
- reply->writeNoException();
- reply->writeInt32(ret);
- reply->writeInt32(1);
- reply->writeString16(String16("camera input")); // name of surface
- reply->writeStrongBinder(b);
-
- return NO_ERROR;
- } break;
- case CREATE_DEFAULT_REQUEST: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
- int templateId = data.readInt32();
-
- CameraMetadata request;
- status_t ret;
- ret = createDefaultRequest(templateId, &request);
-
- reply->writeNoException();
- reply->writeInt32(ret);
-
- // out-variables are after exception and return value
- reply->writeInt32(1); // to mark presence of metadata object
- request.writeToParcel(const_cast<Parcel*>(reply));
-
- return NO_ERROR;
- } break;
- case GET_CAMERA_INFO: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
-
- CameraMetadata info;
- status_t ret;
- ret = getCameraInfo(&info);
-
- reply->writeNoException();
- reply->writeInt32(ret);
-
- // out-variables are after exception and return value
- reply->writeInt32(1); // to mark presence of metadata object
- info.writeToParcel(reply);
-
- return NO_ERROR;
- } break;
- case WAIT_UNTIL_IDLE: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- reply->writeNoException();
- reply->writeInt32(waitUntilIdle());
- return NO_ERROR;
- } break;
- case FLUSH: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- reply->writeNoException();
- int64_t lastFrameNumber = -1;
- reply->writeInt32(flush(&lastFrameNumber));
- reply->writeInt32(1);
- reply->writeInt64(lastFrameNumber);
- return NO_ERROR;
- }
- case BEGIN_CONFIGURE: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- reply->writeNoException();
- reply->writeInt32(beginConfigure());
- return NO_ERROR;
- } break;
- case END_CONFIGURE: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- bool isConstrainedHighSpeed = data.readInt32();
- reply->writeNoException();
- reply->writeInt32(endConfigure(isConstrainedHighSpeed));
- return NO_ERROR;
- } break;
- case PREPARE: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- int streamId = data.readInt32();
- reply->writeNoException();
- reply->writeInt32(prepare(streamId));
- return NO_ERROR;
- } break;
- case TEAR_DOWN: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- int streamId = data.readInt32();
- reply->writeNoException();
- reply->writeInt32(tearDown(streamId));
- return NO_ERROR;
- } break;
- case PREPARE2: {
- CHECK_INTERFACE(ICameraDeviceUser, data, reply);
- int maxCount = data.readInt32();
- int streamId = data.readInt32();
- reply->writeNoException();
- reply->writeInt32(prepare2(maxCount, streamId));
- return NO_ERROR;
- } break;
- default:
- return BBinder::onTransact(code, data, reply, flags);
- }
-}
-
-// ----------------------------------------------------------------------------
-
-}; // namespace android
diff --git a/camera/camera2/OutputConfiguration.cpp b/camera/camera2/OutputConfiguration.cpp
index 3505154..3247d0d 100644
--- a/camera/camera2/OutputConfiguration.cpp
+++ b/camera/camera2/OutputConfiguration.cpp
@@ -16,9 +16,12 @@
*/
#define LOG_TAG "OutputConfiguration"
+//#define LOG_NDEBUG 0
+
#include <utils/Log.h>
#include <camera/camera2/OutputConfiguration.h>
+#include <gui/Surface.h>
#include <binder/Parcel.h>
namespace android {
@@ -27,17 +30,6 @@
const int OutputConfiguration::INVALID_ROTATION = -1;
const int OutputConfiguration::INVALID_SET_ID = -1;
-// Read empty strings without printing a false error message.
-String16 OutputConfiguration::readMaybeEmptyString16(const Parcel& parcel) {
- size_t len;
- const char16_t* str = parcel.readString16Inplace(&len);
- if (str != NULL) {
- return String16(str, len);
- } else {
- return String16();
- }
-}
-
sp<IGraphicBufferProducer> OutputConfiguration::getGraphicBufferProducer() const {
return mGbp;
}
@@ -50,33 +42,48 @@
return mSurfaceSetID;
}
-OutputConfiguration::OutputConfiguration(const Parcel& parcel) {
- status_t err;
+OutputConfiguration::OutputConfiguration() :
+ mRotation(INVALID_ROTATION),
+ mSurfaceSetID(INVALID_SET_ID) {
+}
+
+OutputConfiguration::OutputConfiguration(const Parcel& parcel) :
+ mRotation(INVALID_ROTATION),
+ mSurfaceSetID(INVALID_SET_ID) {
+ readFromParcel(&parcel);
+}
+
+status_t OutputConfiguration::readFromParcel(const Parcel* parcel) {
+ status_t err = OK;
int rotation = 0;
- if ((err = parcel.readInt32(&rotation)) != OK) {
+
+ if (parcel == nullptr) return BAD_VALUE;
+
+ if ((err = parcel->readInt32(&rotation)) != OK) {
ALOGE("%s: Failed to read rotation from parcel", __FUNCTION__);
- mGbp = NULL;
- mRotation = INVALID_ROTATION;
- return;
+ return err;
}
int setID = INVALID_SET_ID;
- if ((err = parcel.readInt32(&setID)) != OK) {
+ if ((err = parcel->readInt32(&setID)) != OK) {
ALOGE("%s: Failed to read surface set ID from parcel", __FUNCTION__);
- mGbp = NULL;
- mSurfaceSetID = INVALID_SET_ID;
- return;
+ return err;
}
- String16 name = readMaybeEmptyString16(parcel);
- const sp<IGraphicBufferProducer>& gbp =
- interface_cast<IGraphicBufferProducer>(parcel.readStrongBinder());
- mGbp = gbp;
+ view::Surface surfaceShim;
+ if ((err = surfaceShim.readFromParcel(parcel)) != OK) {
+ ALOGE("%s: Failed to read surface from parcel", __FUNCTION__);
+ return err;
+ }
+
+ mGbp = surfaceShim.graphicBufferProducer;
mRotation = rotation;
mSurfaceSetID = setID;
- ALOGV("%s: OutputConfiguration: bp = %p, name = %s", __FUNCTION__,
- gbp.get(), String8(name).string());
+ ALOGV("%s: OutputConfiguration: bp = %p, name = %s, rotation = %d, setId = %d", __FUNCTION__,
+ mGbp.get(), String8(surfaceShim.name).string(), mRotation, mSurfaceSetID);
+
+ return err;
}
OutputConfiguration::OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
@@ -86,16 +93,25 @@
mSurfaceSetID = surfaceSetID;
}
-status_t OutputConfiguration::writeToParcel(Parcel& parcel) const {
+status_t OutputConfiguration::writeToParcel(Parcel* parcel) const {
- parcel.writeInt32(mRotation);
- parcel.writeInt32(mSurfaceSetID);
- parcel.writeString16(String16("unknown_name")); // name of surface
- sp<IBinder> b(IInterface::asBinder(mGbp));
- parcel.writeStrongBinder(b);
+ if (parcel == nullptr) return BAD_VALUE;
+ status_t err = OK;
+
+ err = parcel->writeInt32(mRotation);
+ if (err != OK) return err;
+
+ err = parcel->writeInt32(mSurfaceSetID);
+ if (err != OK) return err;
+
+ view::Surface surfaceShim;
+ surfaceShim.name = String16("unknown_name"); // name of surface
+ surfaceShim.graphicBufferProducer = mGbp;
+
+ err = surfaceShim.writeToParcel(parcel);
+ if (err != OK) return err;
return OK;
}
}; // namespace android
-
diff --git a/camera/camera2/SubmitInfo.cpp b/camera/camera2/SubmitInfo.cpp
new file mode 100644
index 0000000..d739c79
--- /dev/null
+++ b/camera/camera2/SubmitInfo.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#include "camera/camera2/SubmitInfo.h"
+
+namespace android {
+namespace hardware {
+namespace camera2 {
+namespace utils {
+
+status_t SubmitInfo::writeToParcel(Parcel *parcel) const {
+ status_t res;
+ if (parcel == nullptr) return BAD_VALUE;
+
+ res = parcel->writeInt32(mRequestId);
+ if (res != OK) return res;
+
+ res = parcel->writeInt64(mLastFrameNumber);
+ return res;
+}
+
+status_t SubmitInfo::readFromParcel(const Parcel *parcel) {
+ status_t res;
+ if (parcel == nullptr) return BAD_VALUE;
+
+ res = parcel->readInt32(&mRequestId);
+ if (res != OK) return res;
+
+ res = parcel->readInt64(&mLastFrameNumber);
+ return res;
+}
+
+} // namespace utils
+} // namespace camera2
+} // namespace hardware
+} // namespace android
diff --git a/camera/cameraserver/Android.mk b/camera/cameraserver/Android.mk
index 4d8339c..7e36c5e 100644
--- a/camera/cameraserver/Android.mk
+++ b/camera/cameraserver/Android.mk
@@ -23,15 +23,14 @@
libcameraservice \
libcutils \
libutils \
- libbinder
-
-LOCAL_C_INCLUDES := \
- frameworks/av/services/camera/libcameraservice \
- system/media/camera/include
+ libbinder \
+ libcamera_client
LOCAL_MODULE:= cameraserver
LOCAL_32_BIT_ONLY := true
+LOCAL_CFLAGS += -Wall -Wextra -Werror -Wno-unused-parameter
+
LOCAL_INIT_RC := cameraserver.rc
include $(BUILD_EXECUTABLE)
diff --git a/camera/ndk/Android.mk b/camera/ndk/Android.mk
index 8e84e40..40dbeef 100644
--- a/camera/ndk/Android.mk
+++ b/camera/ndk/Android.mk
@@ -34,11 +34,11 @@
LOCAL_MODULE:= libcamera2ndk
LOCAL_C_INCLUDES := \
- system/media/camera/include \
frameworks/av/include/camera/ndk \
- frameworks/av/include/ndk \
+ frameworks/av/include/ndk
LOCAL_CFLAGS += -fvisibility=hidden -D EXPORT='__attribute__ ((visibility ("default")))'
+LOCAL_CFLAGS += -Wall -Wextra -Werror
LOCAL_SHARED_LIBRARIES := \
libbinder \
@@ -49,6 +49,7 @@
libcamera_client \
libstagefright_foundation \
libcutils \
+ libcamera_metadata
LOCAL_CLANG := true
diff --git a/camera/ndk/NdkCameraManager.cpp b/camera/ndk/NdkCameraManager.cpp
index 7d9f84b..ff15263 100644
--- a/camera/ndk/NdkCameraManager.cpp
+++ b/camera/ndk/NdkCameraManager.cpp
@@ -24,6 +24,8 @@
#include <NdkCameraManager.h>
#include "impl/ACameraManager.h"
+using namespace android;
+
EXPORT
ACameraManager* ACameraManager_create() {
ATRACE_CALL();
diff --git a/camera/ndk/NdkCameraMetadata.cpp b/camera/ndk/NdkCameraMetadata.cpp
index 18718d3..85fe75b 100644
--- a/camera/ndk/NdkCameraMetadata.cpp
+++ b/camera/ndk/NdkCameraMetadata.cpp
@@ -39,6 +39,18 @@
}
EXPORT
+camera_status_t ACameraMetadata_getAllTags(
+ const ACameraMetadata* acm, /*out*/int32_t* numTags, /*out*/const uint32_t** tags) {
+ ATRACE_CALL();
+ if (acm == nullptr || numTags == nullptr || tags == nullptr) {
+ ALOGE("%s: invalid argument! metadata %p, numTags %p, tags %p",
+ __FUNCTION__, acm, numTags, tags);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ return acm->getTags(numTags, tags);
+}
+
+EXPORT
ACameraMetadata* ACameraMetadata_copy(const ACameraMetadata* src) {
ATRACE_CALL();
if (src == nullptr) {
diff --git a/camera/ndk/NdkCaptureRequest.cpp b/camera/ndk/NdkCaptureRequest.cpp
index 4fee09c..77b9a33 100644
--- a/camera/ndk/NdkCaptureRequest.cpp
+++ b/camera/ndk/NdkCaptureRequest.cpp
@@ -87,6 +87,18 @@
return req->settings->getConstEntry(tag, entry);
}
+EXPORT
+camera_status_t ACaptureRequest_getAllTags(
+ const ACaptureRequest* req, /*out*/int32_t* numTags, /*out*/const uint32_t** tags) {
+ ATRACE_CALL();
+ if (req == nullptr || numTags == nullptr || tags == nullptr) {
+ ALOGE("%s: invalid argument! request %p, numTags %p, tags %p",
+ __FUNCTION__, req, numTags, tags);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ return req->settings->getTags(numTags, tags);
+}
+
#define SET_ENTRY(NAME,NDK_TYPE) \
EXPORT \
camera_status_t ACaptureRequest_setEntry_##NAME( \
diff --git a/camera/ndk/impl/ACameraCaptureSession.cpp b/camera/ndk/impl/ACameraCaptureSession.cpp
index 7f1b75d..b741e46 100644
--- a/camera/ndk/impl/ACameraCaptureSession.cpp
+++ b/camera/ndk/impl/ACameraCaptureSession.cpp
@@ -39,6 +39,15 @@
void
ACameraCaptureSession::closeByApp() {
+ {
+ Mutex::Autolock _l(mSessionLock);
+ if (mClosedByApp) {
+ // Do not close twice
+ return;
+ }
+ mClosedByApp = true;
+ }
+
sp<CameraDevice> dev = getDeviceSp();
if (dev != nullptr) {
dev->lockDeviceForSessionOps();
diff --git a/camera/ndk/impl/ACameraCaptureSession.h b/camera/ndk/impl/ACameraCaptureSession.h
index 1db1b21..f20b324 100644
--- a/camera/ndk/impl/ACameraCaptureSession.h
+++ b/camera/ndk/impl/ACameraCaptureSession.h
@@ -103,6 +103,7 @@
const ACameraCaptureSession_stateCallbacks mUserSessionCallback;
const wp<CameraDevice> mDevice;
bool mIsClosed = false;
+ bool mClosedByApp = false;
bool mIdle = true;
Mutex mSessionLock;
};
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index 5f89fa3..6bca692 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -20,6 +20,8 @@
#include <vector>
#include <utility>
#include <inttypes.h>
+#include <android/hardware/ICameraService.h>
+#include <camera2/SubmitInfo.h>
#include <gui/Surface.h>
#include "ACameraDevice.h"
#include "ACameraMetadata.h"
@@ -62,15 +64,20 @@
// Setup looper thread to perfrom device callbacks to app
mCbLooper = new ALooper;
mCbLooper->setName("C2N-dev-looper");
- status_t ret = mCbLooper->start(
+ status_t err = mCbLooper->start(
/*runOnCallingThread*/false,
/*canCallJava*/ true,
PRIORITY_DEFAULT);
+ if (err != OK) {
+ ALOGE("%s: Unable to start camera device callback looper: %s (%d)",
+ __FUNCTION__, strerror(-err), err);
+ setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
+ }
mHandler = new CallbackHandler();
mCbLooper->registerHandler(mHandler);
- CameraMetadata metadata = mChars->mData;
- camera_metadata_entry entry = metadata.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
+ const CameraMetadata& metadata = mChars->getInternalData();
+ camera_metadata_ro_entry entry = metadata.find(ANDROID_REQUEST_PARTIAL_RESULT_COUNT);
if (entry.count != 1) {
ALOGW("%s: bad count %zu for partial result count", __FUNCTION__, entry.count);
mPartialResultCount = 1;
@@ -117,13 +124,14 @@
return ACAMERA_ERROR_CAMERA_DISCONNECTED;
}
CameraMetadata rawRequest;
- status_t remoteRet = mRemote->createDefaultRequest(templateId, &rawRequest);
- if (remoteRet == BAD_VALUE) {
+ binder::Status remoteRet = mRemote->createDefaultRequest(templateId, &rawRequest);
+ if (remoteRet.serviceSpecificErrorCode() ==
+ hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT) {
ALOGW("Create capture request failed! template %d is not supported on this device",
templateId);
return ACAMERA_ERROR_UNSUPPORTED;
- } else if (remoteRet != OK) {
- ALOGE("Create capture request failed! error %d", remoteRet);
+ } else if (!remoteRet.isOk()) {
+ ALOGE("Create capture request failed: %s", remoteRet.toString8().string());
return ACAMERA_ERROR_UNKNOWN;
}
ACaptureRequest* outReq = new ACaptureRequest();
@@ -159,8 +167,6 @@
ACameraCaptureSession* newSession = new ACameraCaptureSession(
mNextSessionId++, outputs, callbacks, this);
- bool configureSucceeded = (ret == ACAMERA_OK);
-
// set new session as current session
newSession->incStrong((void *) ACameraDevice_createCaptureSession);
mCurrentSession = newSession;
@@ -201,8 +207,8 @@
return ret;
}
- // Form List/Vector of capture request
- List<sp<CaptureRequest> > requestList;
+ // Form two vectors of capture request, one for internal tracking
+ std::vector<hardware::camera2::CaptureRequest> requestList;
Vector<sp<CaptureRequest> > requestsV;
requestsV.setCapacity(numRequests);
for (int i = 0; i < numRequests; i++) {
@@ -216,7 +222,7 @@
ALOGE("Capture request without output target cannot be submitted!");
return ACAMERA_ERROR_INVALID_PARAMETER;
}
- requestList.push_back(req);
+ requestList.push_back(*(req.get()));
requestsV.push_back(req);
}
@@ -228,10 +234,11 @@
}
}
- int sequenceId;
- int64_t lastFrameNumber;
-
- sequenceId = mRemote->submitRequestList(requestList, isRepeating, &lastFrameNumber);
+ binder::Status remoteRet;
+ hardware::camera2::utils::SubmitInfo info;
+ remoteRet = mRemote->submitRequestList(requestList, isRepeating, &info);
+ int sequenceId = info.mRequestId;
+ int64_t lastFrameNumber = info.mLastFrameNumber;
if (sequenceId < 0) {
ALOGE("Camera %s submit request remote failure: ret %d", getId(), sequenceId);
return ACAMERA_ERROR_UNKNOWN;
@@ -272,7 +279,7 @@
const ACaptureRequest* request, /*out*/sp<CaptureRequest>& outReq) {
camera_status_t ret;
sp<CaptureRequest> req(new CaptureRequest());
- req->mMetadata = request->settings->mData;
+ req->mMetadata = request->settings->getInternalData();
req->mIsReprocess = false; // NDK does not support reprocessing yet
for (auto outputTarget : request->targets->mOutputs) {
@@ -371,9 +378,9 @@
mRepeatingSequenceId = REQUEST_ID_NONE;
int64_t lastFrameNumber;
- status_t remoteRet = mRemote->cancelRequest(repeatingSequenceId, &lastFrameNumber);
- if (remoteRet != OK) {
- ALOGE("Stop repeating request fails in remote! ret %d", remoteRet);
+ binder::Status remoteRet = mRemote->cancelRequest(repeatingSequenceId, &lastFrameNumber);
+ if (!remoteRet.isOk()) {
+ ALOGE("Stop repeating request fails in remote: %s", remoteRet.toString8().string());
return ACAMERA_ERROR_UNKNOWN;
}
checkRepeatingSequenceCompleteLocked(repeatingSequenceId, lastFrameNumber);
@@ -394,9 +401,9 @@
return ACAMERA_ERROR_INVALID_OPERATION;
}
- status_t remoteRet = mRemote->waitUntilIdle();
- if (remoteRet != OK) {
- ALOGE("Camera device %s waitUntilIdle failed! ret %d", getId(), remoteRet);
+ binder::Status remoteRet = mRemote->waitUntilIdle();
+ if (!remoteRet.isOk()) {
+ ALOGE("Camera device %s waitUntilIdle failed: %s", getId(), remoteRet.toString8().string());
// TODO: define a function to convert status_t -> camera_status_t
return ACAMERA_ERROR_UNKNOWN;
}
@@ -415,7 +422,7 @@
}
int value;
int err = (*anw->query)(anw, NATIVE_WINDOW_CONCRETE_TYPE, &value);
- if (value != NATIVE_WINDOW_SURFACE) {
+ if (err != OK || value != NATIVE_WINDOW_SURFACE) {
ALOGE("Error: ANativeWindow is not backed by Surface!");
return ACAMERA_ERROR_INVALID_PARAMETER;
}
@@ -433,7 +440,7 @@
}
int value;
int err = (*anw->query)(anw, NATIVE_WINDOW_CONCRETE_TYPE, &value);
- if (value != NATIVE_WINDOW_SURFACE) {
+ if (err != OK || value != NATIVE_WINDOW_SURFACE) {
ALOGE("Error: ANativeWindow is not backed by Surface!");
return ACAMERA_ERROR_INVALID_PARAMETER;
}
@@ -449,7 +456,6 @@
outputs = &emptyOutput;
}
- bool success = false;
camera_status_t ret = checkCameraClosedOrErrorLocked();
if (ret != ACAMERA_OK) {
return ret;
@@ -508,17 +514,18 @@
}
mIdle = true;
- status_t remoteRet = mRemote->beginConfigure();
- if (remoteRet != ACAMERA_OK) {
- ALOGE("Camera device %s begin configure failed, ret %d", getId(), remoteRet);
+ binder::Status remoteRet = mRemote->beginConfigure();
+ if (!remoteRet.isOk()) {
+ ALOGE("Camera device %s begin configure failed: %s", getId(), remoteRet.toString8().string());
return ACAMERA_ERROR_UNKNOWN;
}
// delete to-be-deleted streams
for (auto streamId : deleteList) {
remoteRet = mRemote->deleteStream(streamId);
- if (remoteRet != ACAMERA_OK) {
- ALOGE("Camera device %s fails to remove stream %d", getId(), streamId);
+ if (!remoteRet.isOk()) {
+ ALOGE("Camera device %s failed to remove stream %d: %s", getId(), streamId,
+ remoteRet.toString8().string());
return ACAMERA_ERROR_UNKNOWN;
}
mConfiguredOutputs.erase(streamId);
@@ -526,21 +533,23 @@
// add new streams
for (auto outConfig : addSet) {
- remoteRet = mRemote->createStream(outConfig);
- if (remoteRet < 0) {
- ALOGE("Camera device %s fails to create stream", getId());
+ int streamId;
+ remoteRet = mRemote->createStream(outConfig, &streamId);
+ if (!remoteRet.isOk()) {
+ ALOGE("Camera device %s failed to create stream: %s", getId(),
+ remoteRet.toString8().string());
return ACAMERA_ERROR_UNKNOWN;
}
- int streamId = remoteRet; // Weird, right?
mConfiguredOutputs.insert(std::make_pair(streamId, outConfig));
}
- remoteRet = mRemote->endConfigure();
- if (remoteRet == BAD_VALUE) {
- ALOGE("Camera device %s cannnot support app output configuration", getId());
+ remoteRet = mRemote->endConfigure(/*isConstrainedHighSpeed*/ false);
+ if (remoteRet.serviceSpecificErrorCode() == hardware::ICameraService::ERROR_ILLEGAL_ARGUMENT) {
+ ALOGE("Camera device %s cannnot support app output configuration: %s", getId(),
+ remoteRet.toString8().string());
return ACAMERA_ERROR_STREAM_CONFIGURE_FAIL;
- } else if (remoteRet != ACAMERA_OK) {
- ALOGE("Camera device %s end configure failed, ret %d", getId(), remoteRet);
+ } else if (!remoteRet.isOk()) {
+ ALOGE("Camera device %s end configure failed: %s", getId(), remoteRet.toString8().string());
return ACAMERA_ERROR_UNKNOWN;
}
@@ -548,7 +557,7 @@
}
void
-CameraDevice::setRemoteDevice(sp<ICameraDeviceUser> remote) {
+CameraDevice::setRemoteDevice(sp<hardware::camera2::ICameraDeviceUser> remote) {
Mutex::Autolock _l(mDeviceLock);
mRemote = remote;
}
@@ -615,14 +624,14 @@
void
CameraDevice::onCaptureErrorLocked(
- ICameraDeviceCallbacks::CameraErrorCode errorCode,
+ int32_t errorCode,
const CaptureResultExtras& resultExtras) {
int sequenceId = resultExtras.requestId;
int64_t frameNumber = resultExtras.frameNumber;
int32_t burstId = resultExtras.burstId;
// No way to report buffer error now
- if (errorCode == ICameraDeviceCallbacks::CameraErrorCode::ERROR_CAMERA_BUFFER) {
+ if (errorCode == hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER) {
ALOGE("Camera %s Lost output buffer for frame %" PRId64,
getId(), frameNumber);
return;
@@ -646,7 +655,7 @@
failure->reason = CAPTURE_FAILURE_REASON_ERROR;
failure->sequenceId = sequenceId;
failure->wasImageCaptured = (errorCode ==
- ICameraDeviceCallbacks::CameraErrorCode::ERROR_CAMERA_RESULT);
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT);
sp<AMessage> msg = new AMessage(kWhatCaptureFail, mHandler);
msg->setPointer(kContextKey, cbh.mCallbacks.context);
@@ -999,21 +1008,21 @@
/**
* Camera service callback implementation
*/
-void
+binder::Status
CameraDevice::ServiceCallback::onDeviceError(
- CameraErrorCode errorCode,
+ int32_t errorCode,
const CaptureResultExtras& resultExtras) {
ALOGD("Device error received, code %d, frame number %" PRId64 ", request ID %d, subseq ID %d",
errorCode, resultExtras.frameNumber, resultExtras.requestId, resultExtras.burstId);
-
+ binder::Status ret = binder::Status::ok();
sp<CameraDevice> dev = mDevice.promote();
if (dev == nullptr) {
- return; // device has been closed
+ return ret; // device has been closed
}
Mutex::Autolock _l(dev->mDeviceLock);
if (dev->mRemote == nullptr) {
- return; // device has been closed
+ return ret; // device has been closed
}
switch (errorCode) {
case ERROR_CAMERA_DISCONNECTED:
@@ -1061,24 +1070,26 @@
dev->onCaptureErrorLocked(errorCode, resultExtras);
break;
}
+ return ret;
}
-void
+binder::Status
CameraDevice::ServiceCallback::onDeviceIdle() {
ALOGV("Camera is now idle");
+ binder::Status ret = binder::Status::ok();
sp<CameraDevice> dev = mDevice.promote();
if (dev == nullptr) {
- return; // device has been closed
+ return ret; // device has been closed
}
Mutex::Autolock _l(dev->mDeviceLock);
if (dev->isClosed() || dev->mRemote == nullptr) {
- return;
+ return ret;
}
if (dev->mIdle) {
// Already in idle state. Possibly other thread did waitUntilIdle
- return;
+ return ret;
}
if (dev->mCurrentSession != nullptr) {
@@ -1086,7 +1097,7 @@
if (dev->mBusySession != dev->mCurrentSession) {
ALOGE("Current session != busy session");
dev->setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
- return;
+ return ret;
}
sp<AMessage> msg = new AMessage(kWhatSessionStateCb, dev->mHandler);
msg->setPointer(kContextKey, dev->mBusySession->mUserSessionCallback.context);
@@ -1098,23 +1109,25 @@
msg->post();
}
dev->mIdle = true;
+ return ret;
}
-void
+binder::Status
CameraDevice::ServiceCallback::onCaptureStarted(
const CaptureResultExtras& resultExtras,
int64_t timestamp) {
+ binder::Status ret = binder::Status::ok();
+
sp<CameraDevice> dev = mDevice.promote();
if (dev == nullptr) {
- return; // device has been closed
+ return ret; // device has been closed
}
Mutex::Autolock _l(dev->mDeviceLock);
if (dev->isClosed() || dev->mRemote == nullptr) {
- return;
+ return ret;
}
int sequenceId = resultExtras.requestId;
- int64_t frameNumber = resultExtras.frameNumber;
int32_t burstId = resultExtras.burstId;
auto it = dev->mSequenceCallbackMap.find(sequenceId);
@@ -1136,15 +1149,18 @@
msg->setInt64(kTimeStampKey, timestamp);
msg->post();
}
+ return ret;
}
-void
+binder::Status
CameraDevice::ServiceCallback::onResultReceived(
const CameraMetadata& metadata,
const CaptureResultExtras& resultExtras) {
+ binder::Status ret = binder::Status::ok();
+
sp<CameraDevice> dev = mDevice.promote();
if (dev == nullptr) {
- return; // device has been closed
+ return ret; // device has been closed
}
int sequenceId = resultExtras.requestId;
int64_t frameNumber = resultExtras.frameNumber;
@@ -1157,7 +1173,7 @@
Mutex::Autolock _l(dev->mDeviceLock);
if (dev->mRemote == nullptr) {
- return; // device has been disconnected
+ return ret; // device has been disconnected
}
if (dev->isClosed()) {
@@ -1165,7 +1181,7 @@
dev->mFrameNumberTracker.updateTracker(frameNumber, /*isError*/false);
}
// early return to avoid callback sent to closed devices
- return;
+ return ret;
}
CameraMetadata metadataCopy = metadata;
@@ -1201,12 +1217,14 @@
dev->mFrameNumberTracker.updateTracker(frameNumber, /*isError*/false);
dev->checkAndFireSequenceCompleteLocked();
}
+
+ return ret;
}
-void
+binder::Status
CameraDevice::ServiceCallback::onPrepared(int) {
// Prepare not yet implemented in NDK
- return;
+ return binder::Status::ok();
}
} // namespace android
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index b73e621..46243b9 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -26,20 +26,18 @@
#include <utils/List.h>
#include <utils/Vector.h>
+#include <android/hardware/camera2/BnCameraDeviceCallbacks.h>
+#include <android/hardware/camera2/ICameraDeviceUser.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/foundation/AMessage.h>
#include <camera/CaptureResult.h>
-#include <camera/camera2/ICameraDeviceCallbacks.h>
-#include <camera/camera2/ICameraDeviceUser.h>
#include <camera/camera2/OutputConfiguration.h>
#include <camera/camera2/CaptureRequest.h>
#include <NdkCameraDevice.h>
#include "ACameraMetadata.h"
-using namespace android;
-
namespace android {
// Wrap ACameraCaptureFailure so it can be ref-counter
@@ -64,24 +62,26 @@
/*out*/ACameraCaptureSession** session);
// Callbacks from camera service
- class ServiceCallback : public BnCameraDeviceCallbacks {
+ class ServiceCallback : public hardware::camera2::BnCameraDeviceCallbacks {
public:
ServiceCallback(CameraDevice* device) : mDevice(device) {}
- void onDeviceError(CameraErrorCode errorCode,
+ binder::Status onDeviceError(int32_t errorCode,
const CaptureResultExtras& resultExtras) override;
- void onDeviceIdle() override;
- void onCaptureStarted(const CaptureResultExtras& resultExtras,
+ binder::Status onDeviceIdle() override;
+ binder::Status onCaptureStarted(const CaptureResultExtras& resultExtras,
int64_t timestamp) override;
- void onResultReceived(const CameraMetadata& metadata,
+ binder::Status onResultReceived(const CameraMetadata& metadata,
const CaptureResultExtras& resultExtras) override;
- void onPrepared(int streamId) override;
+ binder::Status onPrepared(int streamId) override;
private:
const wp<CameraDevice> mDevice;
};
- inline sp<ICameraDeviceCallbacks> getServiceCallback() { return mServiceCallback; };
+ inline sp<hardware::camera2::ICameraDeviceCallbacks> getServiceCallback() {
+ return mServiceCallback;
+ };
// Camera device is only functional after remote being set
- void setRemoteDevice(sp<ICameraDeviceUser> remote);
+ void setRemoteDevice(sp<hardware::camera2::ICameraDeviceUser> remote);
inline ACameraDevice* getWrapper() const { return mWrapper; };
@@ -155,14 +155,14 @@
bool mInError;
camera_status_t mError;
void onCaptureErrorLocked(
- ICameraDeviceCallbacks::CameraErrorCode errorCode,
+ int32_t errorCode,
const CaptureResultExtras& resultExtras);
bool mIdle;
// This will avoid a busy session being deleted before it's back to idle state
sp<ACameraCaptureSession> mBusySession;
- sp<ICameraDeviceUser> mRemote;
+ sp<hardware::camera2::ICameraDeviceUser> mRemote;
// Looper thread to handle callback to app
sp<ALooper> mCbLooper;
@@ -294,17 +294,17 @@
/***********************
* Device interal APIs *
***********************/
- inline sp<ICameraDeviceCallbacks> getServiceCallback() {
+ inline android::sp<android::hardware::camera2::ICameraDeviceCallbacks> getServiceCallback() {
return mDevice->getServiceCallback();
};
// Camera device is only functional after remote being set
- inline void setRemoteDevice(sp<ICameraDeviceUser> remote) {
+ inline void setRemoteDevice(android::sp<android::hardware::camera2::ICameraDeviceUser> remote) {
mDevice->setRemoteDevice(remote);
}
private:
- sp<CameraDevice> mDevice;
+ android::sp<android::CameraDevice> mDevice;
};
#endif // _ACAMERA_DEVICE_H
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index ed5c3ba..24d5282 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -71,7 +71,7 @@
mCameraService.clear();
}
-sp<ICameraService> CameraManagerGlobal::getCameraService() {
+sp<hardware::ICameraService> CameraManagerGlobal::getCameraService() {
Mutex::Autolock _l(mLock);
if (mCameraService.get() == nullptr) {
sp<IServiceManager> sm = defaultServiceManager();
@@ -88,16 +88,22 @@
mDeathNotifier = new DeathNotifier(this);
}
binder->linkToDeath(mDeathNotifier);
- mCameraService = interface_cast<ICameraService>(binder);
+ mCameraService = interface_cast<hardware::ICameraService>(binder);
// Setup looper thread to perfrom availiability callbacks
if (mCbLooper == nullptr) {
mCbLooper = new ALooper;
mCbLooper->setName("C2N-mgr-looper");
- status_t ret = mCbLooper->start(
+ status_t err = mCbLooper->start(
/*runOnCallingThread*/false,
/*canCallJava*/ true,
PRIORITY_DEFAULT);
+ if (err != OK) {
+ ALOGE("%s: Unable to start camera service listener looper: %s (%d)",
+ __FUNCTION__, strerror(-err), err);
+ mCbLooper.clear();
+ return nullptr;
+ }
if (mHandler == nullptr) {
mHandler = new CallbackHandler();
}
@@ -111,22 +117,23 @@
mCameraService->addListener(mCameraServiceListener);
// setup vendor tags
- sp<VendorTagDescriptor> desc;
- status_t ret = mCameraService->getCameraVendorTagDescriptor(/*out*/desc);
+ sp<VendorTagDescriptor> desc = new VendorTagDescriptor();
+ binder::Status ret = mCameraService->getCameraVendorTagDescriptor(/*out*/desc.get());
- if (ret == OK) {
- ret = VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
- if (ret != OK) {
+ if (ret.isOk()) {
+ status_t err = VendorTagDescriptor::setAsGlobalVendorTagDescriptor(desc);
+ if (err != OK) {
ALOGE("%s: Failed to set vendor tag descriptors, received error %s (%d)",
- __FUNCTION__, strerror(-ret), ret);
+ __FUNCTION__, strerror(-err), err);
}
- } else if (ret == -EOPNOTSUPP) {
+ } else if (ret.serviceSpecificErrorCode() ==
+ hardware::ICameraService::ERROR_DEPRECATED_HAL) {
ALOGW("%s: Camera HAL too old; does not support vendor tags",
__FUNCTION__);
VendorTagDescriptor::clearGlobalVendorTagDescriptor();
} else {
- ALOGE("%s: Failed to get vendor tag descriptors, received error %s (%d)",
- __FUNCTION__, strerror(-ret), ret);
+ ALOGE("%s: Failed to get vendor tag descriptors: %s",
+ __FUNCTION__, ret.toString8().string());
}
}
ALOGE_IF(mCameraService == nullptr, "no CameraService!?");
@@ -142,7 +149,7 @@
for (auto pair : cm->mDeviceStatusMap) {
int32_t cameraId = pair.first;
cm->onStatusChangedLocked(
- ICameraServiceListener::STATUS_NOT_PRESENT, cameraId);
+ CameraServiceListener::STATUS_NOT_PRESENT, cameraId);
}
cm->mCameraService.clear();
// TODO: consider adding re-connect call here?
@@ -158,7 +165,7 @@
if (pair.second) {
for (auto pair : mDeviceStatusMap) {
int32_t cameraId = pair.first;
- Status status = pair.second;
+ int32_t status = pair.second;
sp<AMessage> msg = new AMessage(kWhatSendSingleCallback, mHandler);
ACameraManager_AvailabilityCallback cb = isStatusAvailable(status) ?
@@ -178,21 +185,21 @@
mCallbacks.erase(cb);
}
-bool CameraManagerGlobal::validStatus(Status status) {
+bool CameraManagerGlobal::validStatus(int32_t status) {
switch (status) {
- case ICameraServiceListener::STATUS_NOT_PRESENT:
- case ICameraServiceListener::STATUS_PRESENT:
- case ICameraServiceListener::STATUS_ENUMERATING:
- case ICameraServiceListener::STATUS_NOT_AVAILABLE:
+ case hardware::ICameraServiceListener::STATUS_NOT_PRESENT:
+ case hardware::ICameraServiceListener::STATUS_PRESENT:
+ case hardware::ICameraServiceListener::STATUS_ENUMERATING:
+ case hardware::ICameraServiceListener::STATUS_NOT_AVAILABLE:
return true;
default:
return false;
}
}
-bool CameraManagerGlobal::isStatusAvailable(Status status) {
+bool CameraManagerGlobal::isStatusAvailable(int32_t status) {
switch (status) {
- case ICameraServiceListener::STATUS_PRESENT:
+ case hardware::ICameraServiceListener::STATUS_PRESENT:
return true;
default:
return false;
@@ -239,31 +246,32 @@
}
}
-void CameraManagerGlobal::CameraServiceListener::onStatusChanged(
- Status status, int32_t cameraId) {
+binder::Status CameraManagerGlobal::CameraServiceListener::onStatusChanged(
+ int32_t status, int32_t cameraId) {
sp<CameraManagerGlobal> cm = mCameraManager.promote();
- if (cm == nullptr) {
+ if (cm != nullptr) {
+ cm->onStatusChanged(status, cameraId);
+ } else {
ALOGE("Cannot deliver status change. Global camera manager died");
- return;
}
- cm->onStatusChanged(status, cameraId);
+ return binder::Status::ok();
}
void CameraManagerGlobal::onStatusChanged(
- Status status, int32_t cameraId) {
+ int32_t status, int32_t cameraId) {
Mutex::Autolock _l(mLock);
onStatusChangedLocked(status, cameraId);
}
void CameraManagerGlobal::onStatusChangedLocked(
- Status status, int32_t cameraId) {
+ int32_t status, int32_t cameraId) {
if (!validStatus(status)) {
ALOGE("%s: Invalid status %d", __FUNCTION__, status);
return;
}
bool firstStatus = (mDeviceStatusMap.count(cameraId) == 0);
- Status oldStatus = firstStatus ?
+ int32_t oldStatus = firstStatus ?
status : // first status
mDeviceStatusMap[cameraId];
@@ -296,19 +304,28 @@
if (mCachedCameraIdList.numCameras == kCameraIdListNotInit) {
int numCameras = 0;
Vector<char *> cameraIds;
- sp<ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
+ sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
if (cs == nullptr) {
ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
return ACAMERA_ERROR_CAMERA_DISCONNECTED;
}
// Get number of cameras
- int numAllCameras = cs->getNumberOfCameras(ICameraService::CAMERA_TYPE_ALL);
+ int numAllCameras = 0;
+ binder::Status serviceRet = cs->getNumberOfCameras(hardware::ICameraService::CAMERA_TYPE_ALL,
+ &numAllCameras);
+ if (!serviceRet.isOk()) {
+ ALOGE("%s: Error getting camera count: %s", __FUNCTION__,
+ serviceRet.toString8().string());
+ numAllCameras = 0;
+ }
// Filter API2 compatible cameras and push to cameraIds
for (int i = 0; i < numAllCameras; i++) {
// TODO: Only suppot HALs that supports API2 directly now
- status_t camera2Support = cs->supportsCameraApi(i, ICameraService::API_VERSION_2);
+ bool camera2Support = false;
+ serviceRet = cs->supportsCameraApi(i, hardware::ICameraService::API_VERSION_2,
+ &camera2Support);
char buf[kMaxCameraIdLen];
- if (camera2Support == OK) {
+ if (camera2Support) {
numCameras++;
mCameraIds.insert(i);
snprintf(buf, sizeof(buf), "%d", i);
@@ -401,15 +418,16 @@
ALOGE("%s: Camera ID %s does not exist!", __FUNCTION__, cameraIdStr);
return ACAMERA_ERROR_INVALID_PARAMETER;
}
- sp<ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
+ sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
if (cs == nullptr) {
ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
return ACAMERA_ERROR_CAMERA_DISCONNECTED;
}
CameraMetadata rawMetadata;
- status_t serviceRet = cs->getCameraCharacteristics(cameraId, &rawMetadata);
- if (serviceRet != OK) {
- ALOGE("Get camera characteristics from camera service failed! Err %d", ret);
+ binder::Status serviceRet = cs->getCameraCharacteristics(cameraId, &rawMetadata);
+ if (!serviceRet.isOk()) {
+ ALOGE("Get camera characteristics from camera service failed: %s",
+ serviceRet.toString8().string());
return ACAMERA_ERROR_UNKNOWN; // should not reach here
}
@@ -436,24 +454,23 @@
ACameraDevice* device = new ACameraDevice(cameraId, callback, std::move(chars));
- sp<ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
+ sp<hardware::ICameraService> cs = CameraManagerGlobal::getInstance().getCameraService();
if (cs == nullptr) {
ALOGE("%s: Cannot reach camera service!", __FUNCTION__);
return ACAMERA_ERROR_CAMERA_DISCONNECTED;
}
int id = atoi(cameraId);
- sp<ICameraDeviceCallbacks> callbacks = device->getServiceCallback();
- sp<ICameraDeviceUser> deviceRemote;
+ sp<hardware::camera2::ICameraDeviceCallbacks> callbacks = device->getServiceCallback();
+ sp<hardware::camera2::ICameraDeviceUser> deviceRemote;
// No way to get package name from native.
// Send a zero length package name and let camera service figure it out from UID
- status_t serviceRet = cs->connectDevice(
+ binder::Status serviceRet = cs->connectDevice(
callbacks, id, String16(""),
- ICameraService::USE_CALLING_UID, /*out*/deviceRemote);
+ hardware::ICameraService::USE_CALLING_UID, /*out*/&deviceRemote);
- if (serviceRet != OK) {
- ALOGE("%s: connect camera device failed! err %d", __FUNCTION__, serviceRet);
- // TODO: generate better error message here
+ if (!serviceRet.isOk()) {
+ ALOGE("%s: connect camera device failed: %s", __FUNCTION__, serviceRet.toString8().string());
delete device;
return ACAMERA_ERROR_CAMERA_DISCONNECTED;
}
@@ -476,4 +493,3 @@
delete[] mCachedCameraIdList.cameraIds;
}
}
-
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index b68685d..3f2262f 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -19,9 +19,9 @@
#include "NdkCameraManager.h"
+#include <android/hardware/ICameraService.h>
+#include <android/hardware/BnCameraServiceListener.h>
#include <camera/CameraMetadata.h>
-#include <camera/ICameraService.h>
-#include <camera/ICameraServiceListener.h>
#include <binder/IServiceManager.h>
#include <utils/StrongPointer.h>
#include <utils/Mutex.h>
@@ -33,8 +33,6 @@
#include <set>
#include <map>
-using namespace android;
-
namespace android {
/**
@@ -47,7 +45,7 @@
class CameraManagerGlobal final : public RefBase {
public:
static CameraManagerGlobal& getInstance();
- sp<ICameraService> getCameraService();
+ sp<hardware::ICameraService> getCameraService();
void registerAvailabilityCallback(
const ACameraManager_AvailabilityCallbacks *callback);
@@ -55,7 +53,7 @@
const ACameraManager_AvailabilityCallbacks *callback);
private:
- sp<ICameraService> mCameraService;
+ sp<hardware::ICameraService> mCameraService;
const int kCameraServicePollDelay = 500000; // 0.5s
const char* kCameraServiceName = "media.camera";
Mutex mLock;
@@ -71,13 +69,16 @@
};
sp<DeathNotifier> mDeathNotifier;
- class CameraServiceListener final : public BnCameraServiceListener {
+ class CameraServiceListener final : public hardware::BnCameraServiceListener {
public:
CameraServiceListener(CameraManagerGlobal* cm) : mCameraManager(cm) {}
- virtual void onStatusChanged(Status status, int32_t cameraId);
+ virtual binder::Status onStatusChanged(int32_t status, int32_t cameraId);
// Torch API not implemented yet
- virtual void onTorchStatusChanged(TorchStatus, const String16&) {};
+ virtual binder::Status onTorchStatusChanged(int32_t, const String16&) {
+ return binder::Status::ok();
+ }
+
private:
const wp<CameraManagerGlobal> mCameraManager;
};
@@ -132,15 +133,14 @@
sp<CallbackHandler> mHandler;
sp<ALooper> mCbLooper; // Looper thread where callbacks actually happen on
- typedef ICameraServiceListener::Status Status;
- void onStatusChanged(Status status, int32_t cameraId);
- void onStatusChangedLocked(Status status, int32_t cameraId);
+ void onStatusChanged(int32_t status, int32_t cameraId);
+ void onStatusChangedLocked(int32_t status, int32_t cameraId);
// Utils for status
- static bool validStatus(Status status);
- static bool isStatusAvailable(Status status);
+ static bool validStatus(int32_t status);
+ static bool isStatusAvailable(int32_t status);
// Map camera_id -> status
- std::map<int32_t, Status> mDeviceStatusMap;
+ std::map<int32_t, int32_t> mDeviceStatusMap;
// For the singleton instance
static Mutex sLock;
@@ -158,7 +158,7 @@
struct ACameraManager {
ACameraManager() :
mCachedCameraIdList({kCameraIdListNotInit, nullptr}),
- mGlobalManager(&(CameraManagerGlobal::getInstance())) {}
+ mGlobalManager(&(android::CameraManagerGlobal::getInstance())) {}
~ACameraManager();
camera_status_t getCameraIdList(ACameraIdList** cameraIdList);
static void deleteCameraIdList(ACameraIdList* cameraIdList);
@@ -175,10 +175,10 @@
enum {
kCameraIdListNotInit = -1
};
- Mutex mLock;
+ android::Mutex mLock;
std::set<int> mCameraIds; // Init by getOrCreateCameraIdListLocked
ACameraIdList mCachedCameraIdList; // Init by getOrCreateCameraIdListLocked
- sp<CameraManagerGlobal> mGlobalManager;
+ android::sp<android::CameraManagerGlobal> mGlobalManager;
};
#endif //_ACAMERA_MANAGER_H
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index fbc8d19..8366ade 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -162,6 +162,8 @@
return ACAMERA_ERROR_INVALID_PARAMETER;
}
+ Mutex::Autolock _l(mLock);
+
camera_metadata_ro_entry rawEntry = mData.find(tag);
if (rawEntry.count == 0) {
ALOGE("%s: cannot find metadata tag %d", __FUNCTION__, tag);
@@ -204,6 +206,38 @@
return updateImpl<camera_metadata_rational_t>(tag, count, data);
}
+camera_status_t
+ACameraMetadata::getTags(/*out*/int32_t* numTags,
+ /*out*/const uint32_t** tags) const {
+ Mutex::Autolock _l(mLock);
+ if (mTags.size() == 0) {
+ size_t entry_count = mData.entryCount();
+ mTags.setCapacity(entry_count);
+ const camera_metadata_t* rawMetadata = mData.getAndLock();
+ for (size_t i = 0; i < entry_count; i++) {
+ camera_metadata_ro_entry_t entry;
+ int ret = get_camera_metadata_ro_entry(rawMetadata, i, &entry);
+ if (ret != 0) {
+ ALOGE("%s: error reading metadata index %zu", __FUNCTION__, i);
+ return ACAMERA_ERROR_UNKNOWN;
+ }
+ // Hide system key from users
+ if (sSystemTags.count(entry.tag) == 0) {
+ mTags.push_back(entry.tag);
+ }
+ }
+ mData.unlock(rawMetadata);
+ }
+
+ *numTags = mTags.size();
+ *tags = mTags.array();
+ return ACAMERA_OK;
+}
+
+const CameraMetadata&
+ACameraMetadata::getInternalData() {
+ return mData;
+}
// TODO: some of key below should be hidden from user
// ex: ACAMERA_REQUEST_ID and ACAMERA_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR
@@ -286,6 +320,49 @@
}
}
+// System tags that should be hidden from users
+std::unordered_set<uint32_t> ACameraMetadata::sSystemTags ({
+ ANDROID_CONTROL_SCENE_MODE_OVERRIDES,
+ ANDROID_CONTROL_AE_PRECAPTURE_ID,
+ ANDROID_CONTROL_AF_TRIGGER_ID,
+ ANDROID_DEMOSAIC_MODE,
+ ANDROID_EDGE_STRENGTH,
+ ANDROID_FLASH_FIRING_POWER,
+ ANDROID_FLASH_FIRING_TIME,
+ ANDROID_FLASH_COLOR_TEMPERATURE,
+ ANDROID_FLASH_MAX_ENERGY,
+ ANDROID_FLASH_INFO_CHARGE_DURATION,
+ ANDROID_JPEG_MAX_SIZE,
+ ANDROID_JPEG_SIZE,
+ ANDROID_NOISE_REDUCTION_STRENGTH,
+ ANDROID_QUIRKS_METERING_CROP_REGION,
+ ANDROID_QUIRKS_TRIGGER_AF_WITH_AUTO,
+ ANDROID_QUIRKS_USE_ZSL_FORMAT,
+ ANDROID_REQUEST_INPUT_STREAMS,
+ ANDROID_REQUEST_METADATA_MODE,
+ ANDROID_REQUEST_OUTPUT_STREAMS,
+ ANDROID_REQUEST_TYPE,
+ ANDROID_REQUEST_MAX_NUM_REPROCESS_STREAMS,
+ ANDROID_SCALER_AVAILABLE_RAW_MIN_DURATIONS,
+ ANDROID_SCALER_AVAILABLE_RAW_SIZES,
+ ANDROID_SENSOR_BASE_GAIN_FACTOR,
+ ANDROID_SENSOR_PROFILE_HUE_SAT_MAP_DIMENSIONS,
+ ANDROID_SENSOR_TEMPERATURE,
+ ANDROID_SENSOR_PROFILE_HUE_SAT_MAP,
+ ANDROID_SENSOR_PROFILE_TONE_CURVE,
+ ANDROID_SENSOR_OPAQUE_RAW_SIZE,
+ ANDROID_SHADING_STRENGTH,
+ ANDROID_STATISTICS_HISTOGRAM_MODE,
+ ANDROID_STATISTICS_SHARPNESS_MAP_MODE,
+ ANDROID_STATISTICS_HISTOGRAM,
+ ANDROID_STATISTICS_SHARPNESS_MAP,
+ ANDROID_STATISTICS_INFO_HISTOGRAM_BUCKET_COUNT,
+ ANDROID_STATISTICS_INFO_MAX_HISTOGRAM_COUNT,
+ ANDROID_STATISTICS_INFO_MAX_SHARPNESS_MAP_VALUE,
+ ANDROID_STATISTICS_INFO_SHARPNESS_MAP_SIZE,
+ ANDROID_DEPTH_MAX_DEPTH_SAMPLES,
+});
+
/*~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~
* End generated code
*~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~@~O@*/
diff --git a/camera/ndk/impl/ACameraMetadata.h b/camera/ndk/impl/ACameraMetadata.h
index 442e1dd..ab651a1 100644
--- a/camera/ndk/impl/ACameraMetadata.h
+++ b/camera/ndk/impl/ACameraMetadata.h
@@ -16,8 +16,12 @@
#ifndef _ACAMERA_METADATA_H
#define _ACAMERA_METADATA_H
+#include <unordered_set>
+
#include <sys/types.h>
+#include <utils/Mutex.h>
#include <utils/RefBase.h>
+#include <utils/Vector.h>
#include <camera/CameraMetadata.h>
#include "NdkCameraMetadata.h"
@@ -51,12 +55,17 @@
camera_status_t update(uint32_t tag, uint32_t count, const int64_t* data);
camera_status_t update(uint32_t tag, uint32_t count, const ACameraMetadata_rational* data);
+ camera_status_t getTags(/*out*/int32_t* numTags,
+ /*out*/const uint32_t** tags) const;
+
bool isNdkSupportedCapability(const int32_t capability);
- inline bool isVendorTag(const uint32_t tag);
- bool isCaptureRequestTag(const uint32_t tag);
+ static inline bool isVendorTag(const uint32_t tag);
+ static bool isCaptureRequestTag(const uint32_t tag);
void filterUnsupportedFeatures(); // Hide features not yet supported by NDK
void filterStreamConfigurations(); // Hide input streams, translate hal format to NDK formats
+ const CameraMetadata& getInternalData();
+
template<typename INTERNAL_T, typename NDK_T>
camera_status_t updateImpl(uint32_t tag, uint32_t count, const NDK_T* data) {
if (mType != ACM_REQUEST) {
@@ -68,18 +77,27 @@
return ACAMERA_ERROR_INVALID_PARAMETER;
}
+ Mutex::Autolock _l(mLock);
+
// Here we have to use reinterpret_cast because the NDK data type is
// exact copy of internal data type but they do not inherit from each other
status_t ret = mData.update(tag, reinterpret_cast<const INTERNAL_T*>(data), count);
if (ret == OK) {
+ mTags.clear();
return ACAMERA_OK;
} else {
return ACAMERA_ERROR_INVALID_PARAMETER;
}
}
- CameraMetadata mData;
+ private:
+ // guard access of public APIs: get/update/getTags
+ mutable Mutex mLock;
+ CameraMetadata mData;
+ mutable Vector<uint32_t> mTags; // updated in getTags, cleared by update
const ACAMERA_METADATA_TYPE mType;
+
+ static std::unordered_set<uint32_t> sSystemTags;
};
#endif // _ACAMERA_METADATA_H
diff --git a/camera/tests/Android.mk b/camera/tests/Android.mk
index 3777d94..8019999 100644
--- a/camera/tests/Android.mk
+++ b/camera/tests/Android.mk
@@ -32,14 +32,11 @@
libbinder
LOCAL_C_INCLUDES += \
- system/media/camera/include \
system/media/private/camera/include \
system/media/camera/tests \
frameworks/av/services/camera/libcameraservice \
- frameworks/av/include/camera \
- frameworks/native/include \
-LOCAL_CFLAGS += -Wall -Wextra
+LOCAL_CFLAGS += -Wall -Wextra -Werror
LOCAL_MODULE:= camera_client_test
LOCAL_MODULE_TAGS := tests
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index a36d2f9..0b687b4 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -32,12 +32,15 @@
#include <hardware/gralloc.h>
#include <camera/CameraMetadata.h>
-#include <camera/ICameraService.h>
-#include <camera/ICameraServiceListener.h>
+#include <android/hardware/ICameraService.h>
+#include <android/hardware/ICameraServiceListener.h>
+#include <android/hardware/BnCameraServiceListener.h>
+#include <android/hardware/camera2/ICameraDeviceUser.h>
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
+#include <android/hardware/camera2/BnCameraDeviceCallbacks.h>
#include <camera/camera2/CaptureRequest.h>
-#include <camera/camera2/ICameraDeviceUser.h>
-#include <camera/camera2/ICameraDeviceCallbacks.h>
#include <camera/camera2/OutputConfiguration.h>
+#include <camera/camera2/SubmitInfo.h>
#include <gui/BufferItemConsumer.h>
#include <gui/IGraphicBufferProducer.h>
@@ -60,25 +63,27 @@
#define IDLE_TIMEOUT 2000000000 // ns
// Stub listener implementation
-class TestCameraServiceListener : public BnCameraServiceListener {
- std::map<String16, TorchStatus> mCameraTorchStatuses;
- std::map<int32_t, Status> mCameraStatuses;
+class TestCameraServiceListener : public hardware::BnCameraServiceListener {
+ std::map<String16, int32_t> mCameraTorchStatuses;
+ std::map<int32_t, int32_t> mCameraStatuses;
mutable Mutex mLock;
mutable Condition mCondition;
mutable Condition mTorchCondition;
public:
virtual ~TestCameraServiceListener() {};
- virtual void onStatusChanged(Status status, int32_t cameraId) {
+ virtual binder::Status onStatusChanged(int32_t status, int32_t cameraId) {
Mutex::Autolock l(mLock);
mCameraStatuses[cameraId] = status;
mCondition.broadcast();
+ return binder::Status::ok();
};
- virtual void onTorchStatusChanged(TorchStatus status, const String16& cameraId) {
+ virtual binder::Status onTorchStatusChanged(int32_t status, const String16& cameraId) {
Mutex::Autolock l(mLock);
mCameraTorchStatuses[cameraId] = status;
mTorchCondition.broadcast();
+ return binder::Status::ok();
};
bool waitForNumCameras(size_t num) const {
@@ -96,7 +101,7 @@
return true;
};
- bool waitForTorchState(TorchStatus status, int32_t cameraId) const {
+ bool waitForTorchState(int32_t status, int32_t cameraId) const {
Mutex::Autolock l(mLock);
const auto& iter = mCameraTorchStatuses.find(String16(String8::format("%d", cameraId)));
@@ -116,27 +121,27 @@
return true;
};
- TorchStatus getTorchStatus(int32_t cameraId) const {
+ int32_t getTorchStatus(int32_t cameraId) const {
Mutex::Autolock l(mLock);
const auto& iter = mCameraTorchStatuses.find(String16(String8::format("%d", cameraId)));
if (iter == mCameraTorchStatuses.end()) {
- return ICameraServiceListener::TORCH_STATUS_UNKNOWN;
+ return hardware::ICameraServiceListener::TORCH_STATUS_UNKNOWN;
}
return iter->second;
};
- Status getStatus(int32_t cameraId) const {
+ int32_t getStatus(int32_t cameraId) const {
Mutex::Autolock l(mLock);
const auto& iter = mCameraStatuses.find(cameraId);
if (iter == mCameraStatuses.end()) {
- return ICameraServiceListener::STATUS_UNKNOWN;
+ return hardware::ICameraServiceListener::STATUS_UNKNOWN;
}
return iter->second;
};
};
// Callback implementation
-class TestCameraDeviceCallbacks : public BnCameraDeviceCallbacks {
+class TestCameraDeviceCallbacks : public hardware::camera2::BnCameraDeviceCallbacks {
public:
enum Status {
IDLE,
@@ -149,8 +154,8 @@
protected:
bool mError;
- Status mLastStatus;
- mutable std::vector<Status> mStatusesHit;
+ int32_t mLastStatus;
+ mutable std::vector<int32_t> mStatusesHit;
mutable Mutex mLock;
mutable Condition mStatusCondition;
public:
@@ -158,7 +163,7 @@
virtual ~TestCameraDeviceCallbacks() {}
- virtual void onDeviceError(CameraErrorCode errorCode,
+ virtual binder::Status onDeviceError(int errorCode,
const CaptureResultExtras& resultExtras) {
(void) resultExtras;
ALOGE("%s: onDeviceError occurred with: %d", __FUNCTION__, static_cast<int>(errorCode));
@@ -167,16 +172,18 @@
mLastStatus = ERROR;
mStatusesHit.push_back(mLastStatus);
mStatusCondition.broadcast();
+ return binder::Status::ok();
}
- virtual void onDeviceIdle() {
+ virtual binder::Status onDeviceIdle() {
Mutex::Autolock l(mLock);
mLastStatus = IDLE;
mStatusesHit.push_back(mLastStatus);
mStatusCondition.broadcast();
+ return binder::Status::ok();
}
- virtual void onCaptureStarted(const CaptureResultExtras& resultExtras,
+ virtual binder::Status onCaptureStarted(const CaptureResultExtras& resultExtras,
int64_t timestamp) {
(void) resultExtras;
(void) timestamp;
@@ -184,10 +191,11 @@
mLastStatus = RUNNING;
mStatusesHit.push_back(mLastStatus);
mStatusCondition.broadcast();
+ return binder::Status::ok();
}
- virtual void onResultReceived(const CameraMetadata& metadata,
+ virtual binder::Status onResultReceived(const CameraMetadata& metadata,
const CaptureResultExtras& resultExtras) {
(void) metadata;
(void) resultExtras;
@@ -195,14 +203,16 @@
mLastStatus = SENT_RESULT;
mStatusesHit.push_back(mLastStatus);
mStatusCondition.broadcast();
+ return binder::Status::ok();
}
- virtual void onPrepared(int streamId) {
+ virtual binder::Status onPrepared(int streamId) {
(void) streamId;
Mutex::Autolock l(mLock);
mLastStatus = PREPARED;
mStatusesHit.push_back(mLastStatus);
mStatusCondition.broadcast();
+ return binder::Status::ok();
}
// Test helper functions:
@@ -269,89 +279,106 @@
gDeathNotifier = new DeathNotifier();
}
binder->linkToDeath(gDeathNotifier);
- sp<ICameraService> service = interface_cast<ICameraService>(binder);
+ sp<hardware::ICameraService> service =
+ interface_cast<hardware::ICameraService>(binder);
+ binder::Status res;
- int32_t numCameras = service->getNumberOfCameras(ICameraService::CAMERA_TYPE_ALL);
+ int32_t numCameras = 0;
+ res = service->getNumberOfCameras(hardware::ICameraService::CAMERA_TYPE_ALL, &numCameras);
+ EXPECT_TRUE(res.isOk()) << res;
EXPECT_LE(0, numCameras);
// Check listener binder calls
sp<TestCameraServiceListener> listener(new TestCameraServiceListener());
- EXPECT_EQ(OK, service->addListener(listener));
+ res = service->addListener(listener);
+ EXPECT_TRUE(res.isOk()) << res;
EXPECT_TRUE(listener->waitForNumCameras(numCameras));
for (int32_t i = 0; i < numCameras; i++) {
+ bool isSupported = false;
+ res = service->supportsCameraApi(i,
+ hardware::ICameraService::API_VERSION_2, &isSupported);
+ EXPECT_TRUE(res.isOk()) << res;
+
// We only care about binder calls for the Camera2 API. Camera1 is deprecated.
- status_t camera2Support = service->supportsCameraApi(i, ICameraService::API_VERSION_2);
- if (camera2Support != OK) {
- EXPECT_EQ(-EOPNOTSUPP, camera2Support);
+ if (!isSupported) {
continue;
}
// Check metadata binder call
CameraMetadata metadata;
- EXPECT_EQ(OK, service->getCameraCharacteristics(i, &metadata));
+ res = service->getCameraCharacteristics(i, &metadata);
+ EXPECT_TRUE(res.isOk()) << res;
EXPECT_FALSE(metadata.isEmpty());
// Make sure we're available, or skip device tests otherwise
- ICameraServiceListener::Status s = listener->getStatus(i);
- EXPECT_EQ(ICameraServiceListener::STATUS_AVAILABLE, s);
- if (s != ICameraServiceListener::STATUS_AVAILABLE) {
+ int32_t s = listener->getStatus(i);
+ EXPECT_EQ(::android::hardware::ICameraServiceListener::STATUS_PRESENT, s);
+ if (s != ::android::hardware::ICameraServiceListener::STATUS_PRESENT) {
continue;
}
// Check connect binder calls
sp<TestCameraDeviceCallbacks> callbacks(new TestCameraDeviceCallbacks());
- sp<ICameraDeviceUser> device;
- EXPECT_EQ(OK, service->connectDevice(callbacks, i, String16("meeeeeeeee!"),
- ICameraService::USE_CALLING_UID, /*out*/device));
+ sp<hardware::camera2::ICameraDeviceUser> device;
+ res = service->connectDevice(callbacks, i, String16("meeeeeeeee!"),
+ hardware::ICameraService::USE_CALLING_UID, /*out*/&device);
+ EXPECT_TRUE(res.isOk()) << res;
ASSERT_NE(nullptr, device.get());
device->disconnect();
EXPECT_FALSE(callbacks->hadError());
- ICameraServiceListener::TorchStatus torchStatus = listener->getTorchStatus(i);
- if (torchStatus == ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF) {
+ int32_t torchStatus = listener->getTorchStatus(i);
+ if (torchStatus == hardware::ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF) {
// Check torch calls
- EXPECT_EQ(OK, service->setTorchMode(String16(String8::format("%d", i)),
- /*enabled*/true, callbacks));
+ res = service->setTorchMode(String16(String8::format("%d", i)),
+ /*enabled*/true, callbacks);
+ EXPECT_TRUE(res.isOk()) << res;
EXPECT_TRUE(listener->waitForTorchState(
- ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON, i));
- EXPECT_EQ(OK, service->setTorchMode(String16(String8::format("%d", i)),
- /*enabled*/false, callbacks));
+ hardware::ICameraServiceListener::TORCH_STATUS_AVAILABLE_ON, i));
+ res = service->setTorchMode(String16(String8::format("%d", i)),
+ /*enabled*/false, callbacks);
+ EXPECT_TRUE(res.isOk()) << res;
EXPECT_TRUE(listener->waitForTorchState(
- ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF, i));
+ hardware::ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF, i));
}
}
- EXPECT_EQ(OK, service->removeListener(listener));
+ res = service->removeListener(listener);
+ EXPECT_TRUE(res.isOk()) << res;
}
// Test fixture for client focused binder tests
class CameraClientBinderTest : public testing::Test {
protected:
- sp<ICameraService> service;
+ sp<hardware::ICameraService> service;
int32_t numCameras;
- std::vector<std::pair<sp<TestCameraDeviceCallbacks>, sp<ICameraDeviceUser>>> openDeviceList;
+ std::vector<std::pair<sp<TestCameraDeviceCallbacks>, sp<hardware::camera2::ICameraDeviceUser>>>
+ openDeviceList;
sp<TestCameraServiceListener> serviceListener;
- std::pair<sp<TestCameraDeviceCallbacks>, sp<ICameraDeviceUser>> openNewDevice(int deviceId) {
-
+ std::pair<sp<TestCameraDeviceCallbacks>, sp<hardware::camera2::ICameraDeviceUser>>
+ openNewDevice(int deviceId) {
sp<TestCameraDeviceCallbacks> callbacks(new TestCameraDeviceCallbacks());
- sp<ICameraDeviceUser> device;
+ sp<hardware::camera2::ICameraDeviceUser> device;
{
SCOPED_TRACE("openNewDevice");
- EXPECT_EQ(OK, service->connectDevice(callbacks, deviceId, String16("meeeeeeeee!"),
- ICameraService::USE_CALLING_UID, /*out*/device));
+ binder::Status res = service->connectDevice(callbacks, deviceId, String16("meeeeeeeee!"),
+ hardware::ICameraService::USE_CALLING_UID, /*out*/&device);
+ EXPECT_TRUE(res.isOk()) << res;
}
auto p = std::make_pair(callbacks, device);
openDeviceList.push_back(p);
return p;
}
- void closeDevice(std::pair<sp<TestCameraDeviceCallbacks>, sp<ICameraDeviceUser>>& p) {
+ void closeDevice(std::pair<sp<TestCameraDeviceCallbacks>,
+ sp<hardware::camera2::ICameraDeviceUser>>& p) {
if (p.second.get() != nullptr) {
- p.second->disconnect();
+ binder::Status res = p.second->disconnect();
+ EXPECT_TRUE(res.isOk()) << res;
{
SCOPED_TRACE("closeDevice");
EXPECT_FALSE(p.first->hadError());
@@ -367,10 +394,11 @@
ProcessState::self()->startThreadPool();
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder = sm->getService(String16("media.camera"));
- service = interface_cast<ICameraService>(binder);
+ service = interface_cast<hardware::ICameraService>(binder);
serviceListener = new TestCameraServiceListener();
service->addListener(serviceListener);
- numCameras = service->getNumberOfCameras();
+ service->getNumberOfCameras(hardware::ICameraService::CAMERA_TYPE_BACKWARD_COMPATIBLE,
+ &numCameras);
}
virtual void TearDown() {
@@ -385,19 +413,19 @@
TEST_F(CameraClientBinderTest, CheckBinderCameraDeviceUser) {
ASSERT_NOT_NULL(service);
-
EXPECT_TRUE(serviceListener->waitForNumCameras(numCameras));
for (int32_t i = 0; i < numCameras; i++) {
// Make sure we're available, or skip device tests otherwise
- ICameraServiceListener::Status s = serviceListener->getStatus(i);
- EXPECT_EQ(ICameraServiceListener::STATUS_AVAILABLE, s);
- if (s != ICameraServiceListener::STATUS_AVAILABLE) {
+ int32_t s = serviceListener->getStatus(i);
+ EXPECT_EQ(hardware::ICameraServiceListener::STATUS_PRESENT, s);
+ if (s != hardware::ICameraServiceListener::STATUS_PRESENT) {
continue;
}
+ binder::Status res;
auto p = openNewDevice(i);
sp<TestCameraDeviceCallbacks> callbacks = p.first;
- sp<ICameraDeviceUser> device = p.second;
+ sp<hardware::camera2::ICameraDeviceUser> device = p.second;
// Setup a buffer queue; I'm just using the vendor opaque format here as that is
// guaranteed to be present
@@ -418,50 +446,65 @@
OutputConfiguration output(gbProducer, /*rotation*/0);
// Can we configure?
- EXPECT_EQ(OK, device->beginConfigure());
- status_t streamId = device->createStream(output);
+ res = device->beginConfigure();
+ EXPECT_TRUE(res.isOk()) << res;
+ status_t streamId;
+ res = device->createStream(output, &streamId);
+ EXPECT_TRUE(res.isOk()) << res;
EXPECT_LE(0, streamId);
- EXPECT_EQ(OK, device->endConfigure());
+ res = device->endConfigure(/*isConstrainedHighSpeed*/ false);
+ EXPECT_TRUE(res.isOk()) << res;
EXPECT_FALSE(callbacks->hadError());
// Can we make requests?
CameraMetadata requestTemplate;
- EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
- /*out*/&requestTemplate));
- sp<CaptureRequest> request(new CaptureRequest());
- request->mMetadata = requestTemplate;
- request->mSurfaceList.add(surface);
- request->mIsReprocess = false;
+ res = device->createDefaultRequest(/*preview template*/1,
+ /*out*/&requestTemplate);
+ EXPECT_TRUE(res.isOk()) << res;
+
+ hardware::camera2::CaptureRequest request;
+ request.mMetadata = requestTemplate;
+ request.mSurfaceList.add(surface);
+ request.mIsReprocess = false;
int64_t lastFrameNumber = 0;
int64_t lastFrameNumberPrev = 0;
callbacks->clearStatus();
- int requestId = device->submitRequest(request, /*streaming*/true, /*out*/&lastFrameNumber);
+
+ hardware::camera2::utils::SubmitInfo info;
+ res = device->submitRequest(request, /*streaming*/true, /*out*/&info);
+ EXPECT_TRUE(res.isOk()) << res;
EXPECT_TRUE(callbacks->waitForStatus(TestCameraDeviceCallbacks::SENT_RESULT));
- EXPECT_LE(0, requestId);
+ EXPECT_LE(0, info.mRequestId);
// Can we stop requests?
- EXPECT_EQ(OK, device->cancelRequest(requestId, /*out*/&lastFrameNumber));
+ res = device->cancelRequest(info.mRequestId, /*out*/&lastFrameNumber);
+ EXPECT_TRUE(res.isOk()) << res;
EXPECT_TRUE(callbacks->waitForIdle());
EXPECT_FALSE(callbacks->hadError());
// Can we do it again?
- lastFrameNumberPrev = lastFrameNumber;
+ lastFrameNumberPrev = info.mLastFrameNumber;
lastFrameNumber = 0;
requestTemplate.clear();
- EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
- /*out*/&requestTemplate));
- sp<CaptureRequest> request2(new CaptureRequest());
- request2->mMetadata = requestTemplate;
- request2->mSurfaceList.add(surface);
- request2->mIsReprocess = false;
+ res = device->createDefaultRequest(hardware::camera2::ICameraDeviceUser::TEMPLATE_PREVIEW,
+ /*out*/&requestTemplate);
+ EXPECT_TRUE(res.isOk()) << res;
+ hardware::camera2::CaptureRequest request2;
+ request2.mMetadata = requestTemplate;
+ request2.mSurfaceList.add(surface);
+ request2.mIsReprocess = false;
callbacks->clearStatus();
- int requestId2 = device->submitRequest(request2, /*streaming*/true,
- /*out*/&lastFrameNumber);
- EXPECT_EQ(-1, lastFrameNumber);
+ hardware::camera2::utils::SubmitInfo info2;
+ res = device->submitRequest(request2, /*streaming*/true,
+ /*out*/&info2);
+ EXPECT_TRUE(res.isOk()) << res;
+ EXPECT_EQ(hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES,
+ info2.mLastFrameNumber);
lastFrameNumber = 0;
EXPECT_TRUE(callbacks->waitForStatus(TestCameraDeviceCallbacks::SENT_RESULT));
- EXPECT_LE(0, requestId2);
- EXPECT_EQ(OK, device->cancelRequest(requestId2, /*out*/&lastFrameNumber));
+ EXPECT_LE(0, info2.mRequestId);
+ res = device->cancelRequest(info2.mRequestId, /*out*/&lastFrameNumber);
+ EXPECT_TRUE(res.isOk()) << res;
EXPECT_TRUE(callbacks->waitForIdle());
EXPECT_LE(lastFrameNumberPrev, lastFrameNumber);
sleep(/*second*/1); // allow some time for errors to show up, if any
@@ -472,36 +515,44 @@
lastFrameNumber = 0;
requestTemplate.clear();
CameraMetadata requestTemplate2;
- EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
- /*out*/&requestTemplate));
- EXPECT_EQ(OK, device->createDefaultRequest(/*preview template*/1,
- /*out*/&requestTemplate2));
- sp<CaptureRequest> request3(new CaptureRequest());
- sp<CaptureRequest> request4(new CaptureRequest());
- request3->mMetadata = requestTemplate;
- request3->mSurfaceList.add(surface);
- request3->mIsReprocess = false;
- request4->mMetadata = requestTemplate2;
- request4->mSurfaceList.add(surface);
- request4->mIsReprocess = false;
- List<sp<CaptureRequest>> requestList;
+ res = device->createDefaultRequest(hardware::camera2::ICameraDeviceUser::TEMPLATE_PREVIEW,
+ /*out*/&requestTemplate);
+ EXPECT_TRUE(res.isOk()) << res;
+ res = device->createDefaultRequest(hardware::camera2::ICameraDeviceUser::TEMPLATE_PREVIEW,
+ /*out*/&requestTemplate2);
+ EXPECT_TRUE(res.isOk()) << res;
+ android::hardware::camera2::CaptureRequest request3;
+ android::hardware::camera2::CaptureRequest request4;
+ request3.mMetadata = requestTemplate;
+ request3.mSurfaceList.add(surface);
+ request3.mIsReprocess = false;
+ request4.mMetadata = requestTemplate2;
+ request4.mSurfaceList.add(surface);
+ request4.mIsReprocess = false;
+ std::vector<hardware::camera2::CaptureRequest> requestList;
requestList.push_back(request3);
requestList.push_back(request4);
callbacks->clearStatus();
- int requestId3 = device->submitRequestList(requestList, /*streaming*/false,
- /*out*/&lastFrameNumber);
- EXPECT_LE(0, requestId3);
+ hardware::camera2::utils::SubmitInfo info3;
+ res = device->submitRequestList(requestList, /*streaming*/false,
+ /*out*/&info3);
+ EXPECT_TRUE(res.isOk()) << res;
+ EXPECT_LE(0, info3.mRequestId);
EXPECT_TRUE(callbacks->waitForStatus(TestCameraDeviceCallbacks::SENT_RESULT));
EXPECT_TRUE(callbacks->waitForIdle());
- EXPECT_LE(lastFrameNumberPrev, lastFrameNumber);
+ EXPECT_LE(lastFrameNumberPrev, info3.mLastFrameNumber);
sleep(/*second*/1); // allow some time for errors to show up, if any
EXPECT_FALSE(callbacks->hadError());
// Can we unconfigure?
- EXPECT_EQ(OK, device->beginConfigure());
- EXPECT_EQ(OK, device->deleteStream(streamId));
- EXPECT_EQ(OK, device->endConfigure());
+ res = device->beginConfigure();
+ EXPECT_TRUE(res.isOk()) << res;
+ res = device->deleteStream(streamId);
+ EXPECT_TRUE(res.isOk()) << res;
+ res = device->endConfigure(/*isConstrainedHighSpeed*/ false);
+ EXPECT_TRUE(res.isOk()) << res;
+
sleep(/*second*/1); // allow some time for errors to show up, if any
EXPECT_FALSE(callbacks->hadError());
diff --git a/camera/tests/VendorTagDescriptorTests.cpp b/camera/tests/VendorTagDescriptorTests.cpp
index 9082dbf..75cfb73 100644
--- a/camera/tests/VendorTagDescriptorTests.cpp
+++ b/camera/tests/VendorTagDescriptorTests.cpp
@@ -53,27 +53,27 @@
extern "C" {
-static int zero_get_tag_count(const vendor_tag_ops_t* vOps) {
+static int zero_get_tag_count(const vendor_tag_ops_t*) {
return 0;
}
-static int default_get_tag_count(const vendor_tag_ops_t* vOps) {
+static int default_get_tag_count(const vendor_tag_ops_t*) {
return VENDOR_TAG_COUNT_ERR;
}
-static void default_get_all_tags(const vendor_tag_ops_t* vOps, uint32_t* tagArray) {
+static void default_get_all_tags(const vendor_tag_ops_t*, uint32_t*) {
//Noop
}
-static const char* default_get_section_name(const vendor_tag_ops_t* vOps, uint32_t tag) {
+static const char* default_get_section_name(const vendor_tag_ops_t*, uint32_t) {
return VENDOR_SECTION_NAME_ERR;
}
-static const char* default_get_tag_name(const vendor_tag_ops_t* vOps, uint32_t tag) {
+static const char* default_get_tag_name(const vendor_tag_ops_t*, uint32_t) {
return VENDOR_TAG_NAME_ERR;
}
-static int default_get_tag_type(const vendor_tag_ops_t* vOps, uint32_t tag) {
+static int default_get_tag_type(const vendor_tag_ops_t*, uint32_t) {
return VENDOR_TAG_TYPE_ERR;
}
@@ -141,7 +141,8 @@
// Check whether parcel read/write succeed
EXPECT_EQ(OK, vDescOriginal->writeToParcel(&p));
p.setDataPosition(0);
- ASSERT_EQ(OK, VendorTagDescriptor::createFromParcel(&p, vDescParceled));
+
+ ASSERT_EQ(OK, vDescParceled->readFromParcel(&p));
// Ensure consistent tag count
int tagCount = vDescOriginal->getTagCount();
@@ -197,7 +198,6 @@
EXPECT_EQ(VENDOR_TAG_TYPE_ERR, vDesc->getTagType(BAD_TAG));
// Make sure global can be set/cleared
- const vendor_tag_ops_t *fakeOps = &fakevendor_ops;
sp<VendorTagDescriptor> prevGlobal = VendorTagDescriptor::getGlobalVendorTagDescriptor();
VendorTagDescriptor::clearGlobalVendorTagDescriptor();
@@ -208,4 +208,3 @@
EXPECT_EQ(OK, VendorTagDescriptor::setAsGlobalVendorTagDescriptor(prevGlobal));
EXPECT_EQ(prevGlobal, VendorTagDescriptor::getGlobalVendorTagDescriptor());
}
-
diff --git a/cmds/stagefright/SineSource.cpp b/cmds/stagefright/SineSource.cpp
index 587077a..cad8caf 100644
--- a/cmds/stagefright/SineSource.cpp
+++ b/cmds/stagefright/SineSource.cpp
@@ -53,6 +53,7 @@
meta->setInt32(kKeyChannelCount, mNumChannels);
meta->setInt32(kKeySampleRate, mSampleRate);
meta->setInt32(kKeyMaxInputSize, kBufferSize);
+ meta->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
return meta;
}
diff --git a/include/camera/Camera.h b/include/camera/Camera.h
index f19d296..b45bbfc 100644
--- a/include/camera/Camera.h
+++ b/include/camera/Camera.h
@@ -18,13 +18,15 @@
#define ANDROID_HARDWARE_CAMERA_H
#include <utils/Timers.h>
+
+#include <android/hardware/ICameraService.h>
+
#include <gui/IGraphicBufferProducer.h>
#include <system/camera.h>
-#include <camera/ICameraClient.h>
#include <camera/ICameraRecordingProxy.h>
#include <camera/ICameraRecordingProxyListener.h>
-#include <camera/ICameraService.h>
-#include <camera/ICamera.h>
+#include <camera/android/hardware/ICamera.h>
+#include <camera/android/hardware/ICameraClient.h>
#include <camera/CameraBase.h>
namespace android {
@@ -48,31 +50,32 @@
template <>
struct CameraTraits<Camera>
{
- typedef CameraListener TCamListener;
- typedef ICamera TCamUser;
- typedef ICameraClient TCamCallbacks;
- typedef status_t (ICameraService::*TCamConnectService)(const sp<ICameraClient>&,
- int, const String16&, int, int,
- /*out*/
- sp<ICamera>&);
+ typedef CameraListener TCamListener;
+ typedef ::android::hardware::ICamera TCamUser;
+ typedef ::android::hardware::ICameraClient TCamCallbacks;
+ typedef ::android::binder::Status(::android::hardware::ICameraService::*TCamConnectService)
+ (const sp<::android::hardware::ICameraClient>&,
+ int, const String16&, int, int,
+ /*out*/
+ sp<::android::hardware::ICamera>*);
static TCamConnectService fnConnectService;
};
class Camera :
public CameraBase<Camera>,
- public BnCameraClient
+ public ::android::hardware::BnCameraClient
{
public:
enum {
- USE_CALLING_UID = ICameraService::USE_CALLING_UID
+ USE_CALLING_UID = ::android::hardware::ICameraService::USE_CALLING_UID
};
enum {
- USE_CALLING_PID = ICameraService::USE_CALLING_PID
+ USE_CALLING_PID = ::android::hardware::ICameraService::USE_CALLING_PID
};
// construct a camera client from an existing remote
- static sp<Camera> create(const sp<ICamera>& camera);
+ static sp<Camera> create(const sp<::android::hardware::ICamera>& camera);
static sp<Camera> connect(int cameraId,
const String16& clientPackageName,
int clientUid, int clientPid);
diff --git a/include/camera/CameraBase.h b/include/camera/CameraBase.h
index d8561ed..0692a27 100644
--- a/include/camera/CameraBase.h
+++ b/include/camera/CameraBase.h
@@ -18,13 +18,18 @@
#define ANDROID_HARDWARE_CAMERA_BASE_H
#include <utils/Mutex.h>
-#include <camera/ICameraService.h>
struct camera_frame_metadata;
namespace android {
-struct CameraInfo {
+namespace hardware {
+
+
+class ICameraService;
+class ICameraServiceListener;
+
+struct CameraInfo : public android::Parcelable {
/**
* The direction that the camera faces to. It should be CAMERA_FACING_BACK
* or CAMERA_FACING_FRONT.
@@ -44,8 +49,17 @@
* right of the screen, the value should be 270.
*/
int orientation;
+
+ virtual status_t writeToParcel(Parcel* parcel) const;
+ virtual status_t readFromParcel(const Parcel* parcel);
+
};
+} // namespace hardware
+
+using hardware::CameraInfo;
+
+
template <typename TCam>
struct CameraTraits {
};
@@ -70,13 +84,13 @@
static status_t getCameraInfo(int cameraId,
/*out*/
- struct CameraInfo* cameraInfo);
+ struct hardware::CameraInfo* cameraInfo);
static status_t addServiceListener(
- const sp<ICameraServiceListener>& listener);
+ const sp<::android::hardware::ICameraServiceListener>& listener);
static status_t removeServiceListener(
- const sp<ICameraServiceListener>& listener);
+ const sp<::android::hardware::ICameraServiceListener>& listener);
sp<TCamUser> remote();
@@ -101,7 +115,7 @@
virtual void binderDied(const wp<IBinder>& who);
// helper function to obtain camera service handle
- static const sp<ICameraService>& getCameraService();
+ static const sp<::android::hardware::ICameraService>& getCameraService();
sp<TCamUser> mCamera;
status_t mStatus;
diff --git a/include/camera/CameraMetadata.h b/include/camera/CameraMetadata.h
index 953d711..28f47a1 100644
--- a/include/camera/CameraMetadata.h
+++ b/include/camera/CameraMetadata.h
@@ -20,14 +20,14 @@
#include "system/camera_metadata.h"
#include <utils/String8.h>
#include <utils/Vector.h>
+#include <binder/Parcelable.h>
namespace android {
-class Parcel;
/**
* A convenience wrapper around the C-based camera_metadata_t library.
*/
-class CameraMetadata {
+class CameraMetadata: public Parcelable {
public:
/** Creates an empty object; best used when expecting to acquire contents
* from elsewhere */
@@ -64,7 +64,7 @@
* from getAndLock must be provided to guarantee that the right object is
* being unlocked.
*/
- status_t unlock(const camera_metadata_t *buffer);
+ status_t unlock(const camera_metadata_t *buffer) const;
/**
* Release a raw metadata buffer to the caller. After this call,
@@ -186,8 +186,8 @@
*/
// Metadata object is unchanged when reading from parcel fails.
- status_t readFromParcel(Parcel *parcel);
- status_t writeToParcel(Parcel *parcel) const;
+ virtual status_t readFromParcel(const Parcel *parcel) override;
+ virtual status_t writeToParcel(Parcel *parcel) const override;
/**
* Caller becomes the owner of the new metadata
@@ -227,6 +227,15 @@
};
-}; // namespace android
+namespace hardware {
+namespace camera2 {
+namespace impl {
+using ::android::CameraMetadata;
+typedef CameraMetadata CameraMetadataNative;
+}
+}
+}
+
+} // namespace android
#endif
diff --git a/include/camera/CaptureResult.h b/include/camera/CaptureResult.h
index 0be7d6f..45e4518 100644
--- a/include/camera/CaptureResult.h
+++ b/include/camera/CaptureResult.h
@@ -18,15 +18,21 @@
#define ANDROID_HARDWARE_CAPTURERESULT_H
#include <utils/RefBase.h>
+#include <binder/Parcelable.h>
#include <camera/CameraMetadata.h>
+
namespace android {
+namespace hardware {
+namespace camera2 {
+namespace impl {
+
/**
* CaptureResultExtras is a structure to encapsulate various indices for a capture result.
* These indices are framework-internal and not sent to the HAL.
*/
-struct CaptureResultExtras {
+struct CaptureResultExtras : public android::Parcelable {
/**
* An integer to index the request sequence that this result belongs to.
*/
@@ -58,6 +64,12 @@
int32_t partialResultCount;
/**
+ * For buffer drop errors, the stream ID for the stream that lost a buffer.
+ * Otherwise -1.
+ */
+ int32_t errorStreamId;
+
+ /**
* Constructor initializes object as invalid by setting requestId to be -1.
*/
CaptureResultExtras()
@@ -66,7 +78,8 @@
afTriggerId(0),
precaptureTriggerId(0),
frameNumber(0),
- partialResultCount(0) {
+ partialResultCount(0),
+ errorStreamId(-1) {
}
/**
@@ -75,9 +88,14 @@
*/
bool isValid();
- status_t readFromParcel(Parcel* parcel);
- status_t writeToParcel(Parcel* parcel) const;
+ virtual status_t readFromParcel(const Parcel* parcel) override;
+ virtual status_t writeToParcel(Parcel* parcel) const override;
};
+} // namespace impl
+} // namespace camera2
+} // namespace hardware
+
+using hardware::camera2::impl::CaptureResultExtras;
struct CaptureResult : public virtual LightRefBase<CaptureResult> {
CameraMetadata mMetadata;
diff --git a/include/camera/ICameraService.h b/include/camera/ICameraService.h
deleted file mode 100644
index d568b4d..0000000
--- a/include/camera/ICameraService.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/*
- * Copyright (C) 2008 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_ICAMERASERVICE_H
-#define ANDROID_HARDWARE_ICAMERASERVICE_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-
-namespace android {
-
-class ICamera;
-class ICameraClient;
-class ICameraServiceListener;
-class ICameraDeviceUser;
-class ICameraDeviceCallbacks;
-class CameraMetadata;
-class VendorTagDescriptor;
-class String16;
-
-class ICameraService : public IInterface
-{
-public:
- /**
- * Keep up-to-date with ICameraService.aidl in frameworks/base
- */
- enum {
- GET_NUMBER_OF_CAMERAS = IBinder::FIRST_CALL_TRANSACTION,
- GET_CAMERA_INFO,
- CONNECT,
- CONNECT_DEVICE,
- ADD_LISTENER,
- REMOVE_LISTENER,
- GET_CAMERA_CHARACTERISTICS,
- GET_CAMERA_VENDOR_TAG_DESCRIPTOR,
- GET_LEGACY_PARAMETERS,
- SUPPORTS_CAMERA_API,
- CONNECT_LEGACY,
- SET_TORCH_MODE,
- NOTIFY_SYSTEM_EVENT,
- };
-
- enum {
- USE_CALLING_PID = -1
- };
-
- enum {
- USE_CALLING_UID = -1
- };
-
- enum {
- API_VERSION_1 = 1,
- API_VERSION_2 = 2,
- };
-
- enum {
- CAMERA_TYPE_BACKWARD_COMPATIBLE = 0,
- CAMERA_TYPE_ALL = 1,
- };
-
- enum {
- CAMERA_HAL_API_VERSION_UNSPECIFIED = -1
- };
-
- /**
- * Keep up-to-date with declarations in
- * frameworks/base/services/core/java/com/android/server/camera/CameraService.java
- *
- * These event codes are intended to be used with the notifySystemEvent call.
- */
- enum {
- NO_EVENT = 0,
- USER_SWITCHED,
- };
-
-public:
- DECLARE_META_INTERFACE(CameraService);
-
- // Get the number of cameras that support basic color camera operation
- // (type CAMERA_TYPE_BACKWARD_COMPATIBLE)
- virtual int32_t getNumberOfCameras() = 0;
- // Get the number of cameras of the specified type, one of CAMERA_TYPE_*
- // enums
- virtual int32_t getNumberOfCameras(int cameraType) = 0;
- virtual status_t getCameraInfo(int cameraId,
- /*out*/
- struct CameraInfo* cameraInfo) = 0;
-
- virtual status_t getCameraCharacteristics(int cameraId,
- /*out*/
- CameraMetadata* cameraInfo) = 0;
-
- virtual status_t getCameraVendorTagDescriptor(
- /*out*/
- sp<VendorTagDescriptor>& desc) = 0;
-
- // Returns 'OK' if operation succeeded
- // - Errors: ALREADY_EXISTS if the listener was already added
- virtual status_t addListener(const sp<ICameraServiceListener>& listener)
- = 0;
- // Returns 'OK' if operation succeeded
- // - Errors: BAD_VALUE if specified listener was not in the listener list
- virtual status_t removeListener(const sp<ICameraServiceListener>& listener)
- = 0;
- /**
- * clientPackageName, clientUid, and clientPid are used for permissions checking. If
- * clientUid == USE_CALLING_UID, then the calling UID is used instead. If
- * clientPid == USE_CALLING_PID, then the calling PID is used instead. Only
- * trusted callers can set a clientUid and clientPid other than USE_CALLING_UID and
- * USE_CALLING_UID respectively.
- */
- virtual status_t connect(const sp<ICameraClient>& cameraClient,
- int cameraId,
- const String16& clientPackageName,
- int clientUid,
- int clientPid,
- /*out*/
- sp<ICamera>& device) = 0;
-
- virtual status_t connectDevice(
- const sp<ICameraDeviceCallbacks>& cameraCb,
- int cameraId,
- const String16& clientPackageName,
- int clientUid,
- /*out*/
- sp<ICameraDeviceUser>& device) = 0;
-
- virtual status_t getLegacyParameters(
- int cameraId,
- /*out*/
- String16* parameters) = 0;
-
- /**
- * Returns OK if device supports camera2 api,
- * returns -EOPNOTSUPP if it doesn't.
- */
- virtual status_t supportsCameraApi(
- int cameraId, int apiVersion) = 0;
-
- /**
- * Connect the device as a legacy device for a given HAL version.
- * For halVersion, use CAMERA_API_DEVICE_VERSION_* for a particular
- * version, or CAMERA_HAL_API_VERSION_UNSPECIFIED for a service-selected version.
- */
- virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient,
- int cameraId, int halVersion,
- const String16& clientPackageName,
- int clientUid,
- /*out*/
- sp<ICamera>& device) = 0;
-
- /**
- * Turn on or off a camera's torch mode. Torch mode will be turned off by
- * camera service if the lastest client binder that turns it on dies.
- *
- * return values:
- * 0: on a successful operation.
- * -ENOSYS: the camera device doesn't support this operation. It it returned
- * if and only if android.flash.into.available is false.
- * -EBUSY: the camera device is opened.
- * -EINVAL: camera_id is invalid or clientBinder is NULL when enabling a
- * torch mode.
- */
- virtual status_t setTorchMode(const String16& cameraId, bool enabled,
- const sp<IBinder>& clientBinder) = 0;
-
- /**
- * Notify the camera service of a system event. Should only be called from system_server.
- */
- virtual void notifySystemEvent(int32_t eventId, const int32_t* args, size_t length) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnCameraService: public BnInterface<ICameraService>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/ICameraServiceListener.h b/include/camera/ICameraServiceListener.h
deleted file mode 100644
index 709ff31..0000000
--- a/include/camera/ICameraServiceListener.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_ICAMERASERVICE_LISTENER_H
-#define ANDROID_HARDWARE_ICAMERASERVICE_LISTENER_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <hardware/camera_common.h>
-
-namespace android {
-
-class ICameraServiceListener : public IInterface
-{
- /**
- * Keep up-to-date with ICameraServiceListener.aidl in frameworks/base
- */
-public:
-
- /**
- * Initial status will be transmitted with onStatusChange immediately
- * after this listener is added to the service listener list.
- *
- * Allowed transitions:
- *
- * (Any) -> NOT_PRESENT
- * NOT_PRESENT -> PRESENT
- * NOT_PRESENT -> ENUMERATING
- * ENUMERATING -> PRESENT
- * PRESENT -> NOT_AVAILABLE
- * NOT_AVAILABLE -> PRESENT
- *
- * A state will never immediately transition back to itself.
- */
- enum Status {
- // Device physically unplugged
- STATUS_NOT_PRESENT = CAMERA_DEVICE_STATUS_NOT_PRESENT,
- // Device physically has been plugged in
- // and the camera can be used exlusively
- STATUS_PRESENT = CAMERA_DEVICE_STATUS_PRESENT,
- // Device physically has been plugged in
- // but it will not be connect-able until enumeration is complete
- STATUS_ENUMERATING = CAMERA_DEVICE_STATUS_ENUMERATING,
-
- // Camera can be used exclusively
- STATUS_AVAILABLE = STATUS_PRESENT, // deprecated, will be removed
-
- // Camera is in use by another app and cannot be used exclusively
- STATUS_NOT_AVAILABLE = 0x80000000,
-
- // Use to initialize variables only
- STATUS_UNKNOWN = 0xFFFFFFFF,
- };
-
- /**
- * The torch mode status of a camera.
- *
- * Initial status will be transmitted with onTorchStatusChanged immediately
- * after this listener is added to the service listener list.
- *
- * The enums should be set to values matching
- * include/hardware/camera_common.h
- */
- enum TorchStatus {
- // The camera's torch mode has become not available to use via
- // setTorchMode().
- TORCH_STATUS_NOT_AVAILABLE = TORCH_MODE_STATUS_NOT_AVAILABLE,
- // The camera's torch mode is off and available to be turned on via
- // setTorchMode().
- TORCH_STATUS_AVAILABLE_OFF = TORCH_MODE_STATUS_AVAILABLE_OFF,
- // The camera's torch mode is on and available to be turned off via
- // setTorchMode().
- TORCH_STATUS_AVAILABLE_ON = TORCH_MODE_STATUS_AVAILABLE_ON,
-
- // Use to initialize variables only
- TORCH_STATUS_UNKNOWN = 0xFFFFFFFF,
- };
-
- DECLARE_META_INTERFACE(CameraServiceListener);
-
- virtual void onStatusChanged(Status status, int32_t cameraId) = 0;
-
- virtual void onTorchStatusChanged(TorchStatus status, const String16& cameraId) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnCameraServiceListener : public BnInterface<ICameraServiceListener>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/VendorTagDescriptor.h b/include/camera/VendorTagDescriptor.h
index 1758acf..4c1cab6 100644
--- a/include/camera/VendorTagDescriptor.h
+++ b/include/camera/VendorTagDescriptor.h
@@ -16,6 +16,7 @@
#ifndef VENDOR_TAG_DESCRIPTOR_H
+#include <binder/Parcelable.h>
#include <utils/Vector.h>
#include <utils/KeyedVector.h>
#include <utils/String8.h>
@@ -26,18 +27,27 @@
namespace android {
-class Parcel;
+class VendorTagDescriptor;
+
+namespace hardware {
+namespace camera2 {
+namespace params {
/**
* VendorTagDescriptor objects are parcelable containers for the vendor tag
* definitions provided, and are typically used to pass the vendor tag
* information enumerated by the HAL to clients of the camera service.
*/
-class VendorTagDescriptor
- : public LightRefBase<VendorTagDescriptor> {
+class VendorTagDescriptor : public Parcelable {
public:
virtual ~VendorTagDescriptor();
+ VendorTagDescriptor();
+ VendorTagDescriptor(const VendorTagDescriptor& src);
+ VendorTagDescriptor& operator=(const VendorTagDescriptor& rhs);
+
+ void copyFrom(const VendorTagDescriptor& src);
+
/**
* The following 'get*' methods implement the corresponding
* functions defined in
@@ -64,9 +74,9 @@
*
* Returns OK on success, or a negative error code.
*/
- status_t writeToParcel(
+ virtual status_t writeToParcel(
/*out*/
- Parcel* parcel) const;
+ Parcel* parcel) const override;
/**
* Convenience method to get a vector containing all vendor tag
@@ -86,48 +96,14 @@
*/
void dump(int fd, int verbosity, int indentation) const;
- // Static methods:
-
/**
- * Create a VendorTagDescriptor object from the given parcel.
+ * Read values VendorTagDescriptor object from the given parcel.
*
* Returns OK on success, or a negative error code.
*/
- static status_t createFromParcel(const Parcel* parcel,
- /*out*/
- sp<VendorTagDescriptor>& descriptor);
+ virtual status_t readFromParcel(const Parcel* parcel) override;
- /**
- * Create a VendorTagDescriptor object from the given vendor_tag_ops_t
- * struct.
- *
- * Returns OK on success, or a negative error code.
- */
- static status_t createDescriptorFromOps(const vendor_tag_ops_t* vOps,
- /*out*/
- sp<VendorTagDescriptor>& descriptor);
-
- /**
- * Sets the global vendor tag descriptor to use for this process.
- * Camera metadata operations that access vendor tags will use the
- * vendor tag definitions set this way.
- *
- * Returns OK on success, or a negative error code.
- */
- static status_t setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc);
-
- /**
- * Clears the global vendor tag descriptor used by this process.
- */
- static void clearGlobalVendorTagDescriptor();
-
- /**
- * Returns the global vendor tag descriptor used by this process.
- * This will contain NULL if no vendor tags are defined.
- */
- static sp<VendorTagDescriptor> getGlobalVendorTagDescriptor();
protected:
- VendorTagDescriptor();
KeyedVector<String8, KeyedVector<String8, uint32_t>*> mReverseMapping;
KeyedVector<uint32_t, String8> mTagToNameMap;
KeyedVector<uint32_t, uint32_t> mTagToSectionMap; // Value is offset in mSections
@@ -135,11 +111,61 @@
SortedVector<String8> mSections;
// must be int32_t to be compatible with Parcel::writeInt32
int32_t mTagCount;
- private:
+
vendor_tag_ops mVendorOps;
};
+} /* namespace params */
+} /* namespace camera2 */
+} /* namespace hardware */
+
+/**
+ * This version of VendorTagDescriptor must be stored in Android sp<>, and adds support for using it
+ * as a global tag descriptor.
+ *
+ * It's a child class of the basic hardware::camera2::params::VendorTagDescriptor since basic
+ * Parcelable objects cannot require being kept in an sp<> and still work with auto-generated AIDL
+ * interface implementations.
+ */
+class VendorTagDescriptor :
+ public ::android::hardware::camera2::params::VendorTagDescriptor,
+ public LightRefBase<VendorTagDescriptor> {
+
+ public:
+
+ /**
+ * Create a VendorTagDescriptor object from the given vendor_tag_ops_t
+ * struct.
+ *
+ * Returns OK on success, or a negative error code.
+ */
+ static status_t createDescriptorFromOps(const vendor_tag_ops_t* vOps,
+ /*out*/
+ sp<VendorTagDescriptor>& descriptor);
+
+ /**
+ * Sets the global vendor tag descriptor to use for this process.
+ * Camera metadata operations that access vendor tags will use the
+ * vendor tag definitions set this way.
+ *
+ * Returns OK on success, or a negative error code.
+ */
+ static status_t setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc);
+
+ /**
+ * Returns the global vendor tag descriptor used by this process.
+ * This will contain NULL if no vendor tags are defined.
+ */
+ static sp<VendorTagDescriptor> getGlobalVendorTagDescriptor();
+
+ /**
+ * Clears the global vendor tag descriptor used by this process.
+ */
+ static void clearGlobalVendorTagDescriptor();
+
+};
} /* namespace android */
+
#define VENDOR_TAG_DESCRIPTOR_H
#endif /* VENDOR_TAG_DESCRIPTOR_H */
diff --git a/include/camera/ICamera.h b/include/camera/android/hardware/ICamera.h
similarity index 96%
rename from include/camera/ICamera.h
rename to include/camera/android/hardware/ICamera.h
index e35c3a4..322b741 100644
--- a/include/camera/ICamera.h
+++ b/include/camera/android/hardware/ICamera.h
@@ -21,15 +21,18 @@
#include <binder/IInterface.h>
#include <binder/Parcel.h>
#include <binder/IMemory.h>
+#include <binder/Status.h>
#include <utils/String8.h>
-#include <camera/Camera.h>
namespace android {
-class ICameraClient;
class IGraphicBufferProducer;
class Surface;
+namespace hardware {
+
+class ICameraClient;
+
class ICamera: public IInterface
{
/**
@@ -47,7 +50,7 @@
DECLARE_META_INTERFACE(Camera);
- virtual void disconnect() = 0;
+ virtual binder::Status disconnect() = 0;
// connect new client with existing camera remote
virtual status_t connect(const sp<ICameraClient>& client) = 0;
@@ -141,6 +144,7 @@
uint32_t flags = 0);
};
-}; // namespace android
+} // namespace hardware
+} // namespace android
#endif
diff --git a/include/camera/ICameraClient.h b/include/camera/android/hardware/ICameraClient.h
similarity index 93%
rename from include/camera/ICameraClient.h
rename to include/camera/android/hardware/ICameraClient.h
index 1584dba..d7f9a75 100644
--- a/include/camera/ICameraClient.h
+++ b/include/camera/android/hardware/ICameraClient.h
@@ -25,12 +25,10 @@
#include <system/camera.h>
namespace android {
+namespace hardware {
class ICameraClient: public IInterface
{
- /**
- * Keep up-to-date with ICameraClient.aidl in frameworks/base
- */
public:
DECLARE_META_INTERFACE(CameraClient);
@@ -51,6 +49,7 @@
uint32_t flags = 0);
};
-}; // namespace android
+} // namespace hardware
+} // namespace android
#endif
diff --git a/include/camera/camera2/CaptureRequest.h b/include/camera/camera2/CaptureRequest.h
index 1dd15c4..c989f26 100644
--- a/include/camera/camera2/CaptureRequest.h
+++ b/include/camera/camera2/CaptureRequest.h
@@ -19,15 +19,17 @@
#include <utils/RefBase.h>
#include <utils/Vector.h>
+#include <binder/Parcelable.h>
#include <camera/CameraMetadata.h>
namespace android {
class Surface;
-struct CaptureRequest : public RefBase {
-public:
+namespace hardware {
+namespace camera2 {
+struct CaptureRequest : public Parcelable {
CameraMetadata mMetadata;
Vector<sp<Surface> > mSurfaceList;
bool mIsReprocess;
@@ -35,9 +37,20 @@
/**
* Keep impl up-to-date with CaptureRequest.java in frameworks/base
*/
- status_t readFromParcel(Parcel* parcel);
- status_t writeToParcel(Parcel* parcel) const;
+ status_t readFromParcel(const Parcel* parcel) override;
+ status_t writeToParcel(Parcel* parcel) const override;
};
-}; // namespace android
+
+} // namespace camera2
+} // namespace hardware
+
+struct CaptureRequest :
+ public RefBase, public hardware::camera2::CaptureRequest {
+ public:
+ // Same as android::hardware::camera2::CaptureRequest, except that you can
+ // put this in an sp<>
+};
+
+} // namespace android
#endif
diff --git a/include/camera/camera2/ICameraDeviceCallbacks.h b/include/camera/camera2/ICameraDeviceCallbacks.h
deleted file mode 100644
index c57b39f..0000000
--- a/include/camera/camera2/ICameraDeviceCallbacks.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_CALLBACKS_H
-#define ANDROID_HARDWARE_PHOTOGRAPHY_CALLBACKS_H
-
-#include <utils/RefBase.h>
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <binder/IMemory.h>
-#include <utils/Timers.h>
-#include <system/camera.h>
-
-#include <camera/CaptureResult.h>
-
-namespace android {
-class CameraMetadata;
-
-
-class ICameraDeviceCallbacks : public IInterface
-{
- /**
- * Keep up-to-date with ICameraDeviceCallbacks.aidl in frameworks/base
- */
-public:
- DECLARE_META_INTERFACE(CameraDeviceCallbacks);
-
- /**
- * Error codes for CAMERA_MSG_ERROR
- */
- enum CameraErrorCode {
- ERROR_CAMERA_INVALID_ERROR = -1, // To indicate all invalid error codes
- ERROR_CAMERA_DISCONNECTED = 0,
- ERROR_CAMERA_DEVICE = 1,
- ERROR_CAMERA_SERVICE = 2,
- ERROR_CAMERA_REQUEST = 3,
- ERROR_CAMERA_RESULT = 4,
- ERROR_CAMERA_BUFFER = 5,
- };
-
- // One way
- virtual void onDeviceError(CameraErrorCode errorCode,
- const CaptureResultExtras& resultExtras) = 0;
-
- // One way
- virtual void onDeviceIdle() = 0;
-
- // One way
- virtual void onCaptureStarted(const CaptureResultExtras& resultExtras,
- int64_t timestamp) = 0;
-
- // One way
- virtual void onResultReceived(const CameraMetadata& metadata,
- const CaptureResultExtras& resultExtras) = 0;
-
- // One way
- virtual void onPrepared(int streamId) = 0;
-};
-
-// ----------------------------------------------------------------------------
-
-class BnCameraDeviceCallbacks : public BnInterface<ICameraDeviceCallbacks>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/camera2/ICameraDeviceUser.h b/include/camera/camera2/ICameraDeviceUser.h
deleted file mode 100644
index 4d8eb53..0000000
--- a/include/camera/camera2/ICameraDeviceUser.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright (C) 2013 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_HARDWARE_PHOTOGRAPHY_ICAMERADEVICEUSER_H
-#define ANDROID_HARDWARE_PHOTOGRAPHY_ICAMERADEVICEUSER_H
-
-#include <binder/IInterface.h>
-#include <binder/Parcel.h>
-#include <utils/List.h>
-
-struct camera_metadata;
-
-namespace android {
-
-class ICameraDeviceUserClient;
-class IGraphicBufferProducer;
-class CaptureRequest;
-class CameraMetadata;
-class OutputConfiguration;
-
-enum {
- NO_IN_FLIGHT_REPEATING_FRAMES = -1,
-};
-
-class ICameraDeviceUser : public IInterface
-{
- /**
- * Keep up-to-date with ICameraDeviceUser.aidl in frameworks/base
- */
-public:
- DECLARE_META_INTERFACE(CameraDeviceUser);
-
- virtual void disconnect() = 0;
-
- /**
- * Request Handling
- **/
-
- /**
- * For streaming requests, output lastFrameNumber is the last frame number
- * of the previous repeating request.
- * For non-streaming requests, output lastFrameNumber is the expected last
- * frame number of the current request.
- */
- virtual int submitRequest(sp<CaptureRequest> request,
- bool streaming = false,
- /*out*/
- int64_t* lastFrameNumber = NULL) = 0;
-
- /**
- * For streaming requests, output lastFrameNumber is the last frame number
- * of the previous repeating request.
- * For non-streaming requests, output lastFrameNumber is the expected last
- * frame number of the current request.
- */
- virtual int submitRequestList(List<sp<CaptureRequest> > requestList,
- bool streaming = false,
- /*out*/
- int64_t* lastFrameNumber = NULL) = 0;
-
- /**
- * Output lastFrameNumber is the last frame number of the previous repeating request.
- */
- virtual status_t cancelRequest(int requestId,
- /*out*/
- int64_t* lastFrameNumber = NULL) = 0;
-
- /**
- * Begin the device configuration.
- *
- * <p>
- * beginConfigure must be called before any call to deleteStream, createStream,
- * or endConfigure. It is not valid to call this when the device is not idle.
- * <p>
- */
- virtual status_t beginConfigure() = 0;
-
- /**
- * End the device configuration.
- *
- * <p>
- * endConfigure must be called after stream configuration is complete (i.e. after
- * a call to beginConfigure and subsequent createStream/deleteStream calls). This
- * must be called before any requests can be submitted.
- * <p>
- */
- virtual status_t endConfigure(bool isConstrainedHighSpeed = false) = 0;
-
- virtual status_t deleteStream(int streamId) = 0;
-
- virtual status_t createStream(const OutputConfiguration& outputConfiguration) = 0;
-
- /**
- * Create an input stream of width, height, and format (one of
- * HAL_PIXEL_FORMAT_*)
- *
- * Return stream ID if it's a non-negative value. status_t if it's a
- * negative value.
- */
- virtual status_t createInputStream(int width, int height, int format) = 0;
-
- // get the buffer producer of the input stream
- virtual status_t getInputBufferProducer(
- sp<IGraphicBufferProducer> *producer) = 0;
-
- // Create a request object from a template.
- virtual status_t createDefaultRequest(int templateId,
- /*out*/
- CameraMetadata* request) = 0;
- // Get static camera metadata
- virtual status_t getCameraInfo(/*out*/
- CameraMetadata* info) = 0;
-
- // Wait until all the submitted requests have finished processing
- virtual status_t waitUntilIdle() = 0;
-
- /**
- * Flush all pending and in-progress work as quickly as possible.
- * Output lastFrameNumber is the last frame number of the previous repeating request.
- */
- virtual status_t flush(/*out*/
- int64_t* lastFrameNumber = NULL) = 0;
-
- /**
- * Preallocate buffers for a given output stream asynchronously.
- */
- virtual status_t prepare(int streamId) = 0;
-
- /**
- * Preallocate up to maxCount buffers for a given output stream asynchronously.
- */
- virtual status_t prepare2(int maxCount, int streamId) = 0;
-
- /**
- * Free all unused buffers for a given output stream.
- */
- virtual status_t tearDown(int streamId) = 0;
-
-};
-
-// ----------------------------------------------------------------------------
-
-class BnCameraDeviceUser: public BnInterface<ICameraDeviceUser>
-{
-public:
- virtual status_t onTransact( uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags = 0);
-};
-
-}; // namespace android
-
-#endif
diff --git a/include/camera/camera2/OutputConfiguration.h b/include/camera/camera2/OutputConfiguration.h
index 137d98c..72a3753 100644
--- a/include/camera/camera2/OutputConfiguration.h
+++ b/include/camera/camera2/OutputConfiguration.h
@@ -18,12 +18,17 @@
#define ANDROID_HARDWARE_CAMERA2_OUTPUTCONFIGURATION_H
#include <gui/IGraphicBufferProducer.h>
+#include <binder/Parcelable.h>
namespace android {
class Surface;
-class OutputConfiguration {
+namespace hardware {
+namespace camera2 {
+namespace params {
+
+class OutputConfiguration : public android::Parcelable {
public:
static const int INVALID_ROTATION;
@@ -35,9 +40,18 @@
/**
* Keep impl up-to-date with OutputConfiguration.java in frameworks/base
*/
- status_t writeToParcel(Parcel& parcel) const;
+ virtual status_t writeToParcel(Parcel* parcel) const override;
+
+ virtual status_t readFromParcel(const Parcel* parcel) override;
+
+ // getGraphicBufferProducer will be NULL
+ // getRotation will be INVALID_ROTATION
+ // getSurfaceSetID will be INVALID_SET_ID
+ OutputConfiguration();
+
// getGraphicBufferProducer will be NULL if error occurred
// getRotation will be INVALID_ROTATION if error occurred
+ // getSurfaceSetID will be INVALID_SET_ID if error occurred
OutputConfiguration(const Parcel& parcel);
OutputConfiguration(sp<IGraphicBufferProducer>& gbp, int rotation,
@@ -45,7 +59,8 @@
bool operator == (const OutputConfiguration& other) const {
return (mGbp == other.mGbp &&
- mRotation == other.mRotation);
+ mRotation == other.mRotation &&
+ mSurfaceSetID == other.mSurfaceSetID);
}
bool operator != (const OutputConfiguration& other) const {
return !(*this == other);
@@ -53,6 +68,9 @@
bool operator < (const OutputConfiguration& other) const {
if (*this == other) return false;
if (mGbp != other.mGbp) return mGbp < other.mGbp;
+ if (mSurfaceSetID != other.mSurfaceSetID) {
+ return mSurfaceSetID < other.mSurfaceSetID;
+ }
return mRotation < other.mRotation;
}
bool operator > (const OutputConfiguration& other) const {
@@ -64,8 +82,15 @@
int mRotation;
int mSurfaceSetID;
// helper function
- static String16 readMaybeEmptyString16(const Parcel& parcel);
+ static String16 readMaybeEmptyString16(const Parcel* parcel);
};
+} // namespace params
+} // namespace camera2
+} // namespace hardware
+
+
+using hardware::camera2::params::OutputConfiguration;
+
}; // namespace android
#endif
diff --git a/include/camera/camera2/SubmitInfo.h b/include/camera/camera2/SubmitInfo.h
new file mode 100644
index 0000000..3b47b32
--- /dev/null
+++ b/include/camera/camera2/SubmitInfo.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_HARDWARE_CAMERA2_UTIL_SUBMITINFO_H
+#define ANDROID_HARDWARE_CAMERA2_UTIL_SUBMITINFO_H
+
+#include <binder/Parcel.h>
+#include <binder/Parcelable.h>
+
+namespace android {
+namespace hardware {
+namespace camera2 {
+namespace utils {
+
+struct SubmitInfo : public android::Parcelable {
+public:
+
+ int32_t mRequestId;
+ int64_t mLastFrameNumber;
+
+ virtual status_t writeToParcel(Parcel *parcel) const override;
+ virtual status_t readFromParcel(const Parcel* parcel) override;
+
+};
+
+} // namespace utils
+} // namespace camera2
+} // namespace hardware
+} // namespace android
+
+#endif
diff --git a/include/camera/ndk/NdkCameraMetadata.h b/include/camera/ndk/NdkCameraMetadata.h
index 56412ad..9b56a9d 100644
--- a/include/camera/ndk/NdkCameraMetadata.h
+++ b/include/camera/ndk/NdkCameraMetadata.h
@@ -93,7 +93,13 @@
camera_status_t ACameraMetadata_getConstEntry(
const ACameraMetadata*, uint32_t tag, ACameraMetadata_const_entry* entry);
-// TODO: need an API to list all tags in the metadata. Same for ACaptureRequest
+/*
+ * List all the entry tags in this metadata.
+ * The memory of tags is managed by ACameraMetadata itself and must NOT be free/delete
+ * by application. Do NOT access tags after calling ACameraMetadata_free
+ */
+camera_status_t ACameraMetadata_getAllTags(
+ const ACameraMetadata*, /*out*/int32_t* numTags, /*out*/const uint32_t** tags);
/**
* Copy a metadata. Duplicates a metadata structure.
diff --git a/include/camera/ndk/NdkCameraMetadataTags.h b/include/camera/ndk/NdkCameraMetadataTags.h
index 43f5193..a1d3bf7 100644
--- a/include/camera/ndk/NdkCameraMetadataTags.h
+++ b/include/camera/ndk/NdkCameraMetadataTags.h
@@ -98,273 +98,382 @@
typedef enum acamera_metadata_tag {
ACAMERA_COLOR_CORRECTION_MODE = // byte (enum)
ACAMERA_COLOR_CORRECTION_START,
- ACAMERA_COLOR_CORRECTION_TRANSFORM, // rational[3*3]
- ACAMERA_COLOR_CORRECTION_GAINS, // float[4]
- ACAMERA_COLOR_CORRECTION_ABERRATION_MODE, // byte (enum)
- ACAMERA_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES, // byte[n]
+ ACAMERA_COLOR_CORRECTION_TRANSFORM = // rational[3*3]
+ ACAMERA_COLOR_CORRECTION_START + 1,
+ ACAMERA_COLOR_CORRECTION_GAINS = // float[4]
+ ACAMERA_COLOR_CORRECTION_START + 2,
+ ACAMERA_COLOR_CORRECTION_ABERRATION_MODE = // byte (enum)
+ ACAMERA_COLOR_CORRECTION_START + 3,
+ ACAMERA_COLOR_CORRECTION_AVAILABLE_ABERRATION_MODES = // byte[n]
+ ACAMERA_COLOR_CORRECTION_START + 4,
ACAMERA_COLOR_CORRECTION_END,
ACAMERA_CONTROL_AE_ANTIBANDING_MODE = // byte (enum)
ACAMERA_CONTROL_START,
- ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION, // int32
- ACAMERA_CONTROL_AE_LOCK, // byte (enum)
- ACAMERA_CONTROL_AE_MODE, // byte (enum)
- ACAMERA_CONTROL_AE_REGIONS, // int32[5*area_count]
- ACAMERA_CONTROL_AE_TARGET_FPS_RANGE, // int32[2]
- ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER, // byte (enum)
- ACAMERA_CONTROL_AF_MODE, // byte (enum)
- ACAMERA_CONTROL_AF_REGIONS, // int32[5*area_count]
- ACAMERA_CONTROL_AF_TRIGGER, // byte (enum)
- ACAMERA_CONTROL_AWB_LOCK, // byte (enum)
- ACAMERA_CONTROL_AWB_MODE, // byte (enum)
- ACAMERA_CONTROL_AWB_REGIONS, // int32[5*area_count]
- ACAMERA_CONTROL_CAPTURE_INTENT, // byte (enum)
- ACAMERA_CONTROL_EFFECT_MODE, // byte (enum)
- ACAMERA_CONTROL_MODE, // byte (enum)
- ACAMERA_CONTROL_SCENE_MODE, // byte (enum)
- ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE, // byte (enum)
- ACAMERA_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES, // byte[n]
- ACAMERA_CONTROL_AE_AVAILABLE_MODES, // byte[n]
- ACAMERA_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES, // int32[2*n]
- ACAMERA_CONTROL_AE_COMPENSATION_RANGE, // int32[2]
- ACAMERA_CONTROL_AE_COMPENSATION_STEP, // rational
- ACAMERA_CONTROL_AF_AVAILABLE_MODES, // byte[n]
- ACAMERA_CONTROL_AVAILABLE_EFFECTS, // byte[n]
- ACAMERA_CONTROL_AVAILABLE_SCENE_MODES, // byte[n]
- ACAMERA_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES, // byte[n]
- ACAMERA_CONTROL_AWB_AVAILABLE_MODES, // byte[n]
- ACAMERA_CONTROL_MAX_REGIONS, // int32[3]
- ACAMERA_CONTROL_RESERVED_29,
- ACAMERA_CONTROL_RESERVED_30,
- ACAMERA_CONTROL_AE_STATE, // byte (enum)
- ACAMERA_CONTROL_AF_STATE, // byte (enum)
- ACAMERA_CONTROL_RESERVED_33,
- ACAMERA_CONTROL_AWB_STATE, // byte (enum)
- ACAMERA_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS, // int32[5*n]
- ACAMERA_CONTROL_AE_LOCK_AVAILABLE, // byte (enum)
- ACAMERA_CONTROL_AWB_LOCK_AVAILABLE, // byte (enum)
- ACAMERA_CONTROL_AVAILABLE_MODES, // byte[n]
- ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE, // int32[2]
- ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST, // int32
+ ACAMERA_CONTROL_AE_EXPOSURE_COMPENSATION = // int32
+ ACAMERA_CONTROL_START + 1,
+ ACAMERA_CONTROL_AE_LOCK = // byte (enum)
+ ACAMERA_CONTROL_START + 2,
+ ACAMERA_CONTROL_AE_MODE = // byte (enum)
+ ACAMERA_CONTROL_START + 3,
+ ACAMERA_CONTROL_AE_REGIONS = // int32[5*area_count]
+ ACAMERA_CONTROL_START + 4,
+ ACAMERA_CONTROL_AE_TARGET_FPS_RANGE = // int32[2]
+ ACAMERA_CONTROL_START + 5,
+ ACAMERA_CONTROL_AE_PRECAPTURE_TRIGGER = // byte (enum)
+ ACAMERA_CONTROL_START + 6,
+ ACAMERA_CONTROL_AF_MODE = // byte (enum)
+ ACAMERA_CONTROL_START + 7,
+ ACAMERA_CONTROL_AF_REGIONS = // int32[5*area_count]
+ ACAMERA_CONTROL_START + 8,
+ ACAMERA_CONTROL_AF_TRIGGER = // byte (enum)
+ ACAMERA_CONTROL_START + 9,
+ ACAMERA_CONTROL_AWB_LOCK = // byte (enum)
+ ACAMERA_CONTROL_START + 10,
+ ACAMERA_CONTROL_AWB_MODE = // byte (enum)
+ ACAMERA_CONTROL_START + 11,
+ ACAMERA_CONTROL_AWB_REGIONS = // int32[5*area_count]
+ ACAMERA_CONTROL_START + 12,
+ ACAMERA_CONTROL_CAPTURE_INTENT = // byte (enum)
+ ACAMERA_CONTROL_START + 13,
+ ACAMERA_CONTROL_EFFECT_MODE = // byte (enum)
+ ACAMERA_CONTROL_START + 14,
+ ACAMERA_CONTROL_MODE = // byte (enum)
+ ACAMERA_CONTROL_START + 15,
+ ACAMERA_CONTROL_SCENE_MODE = // byte (enum)
+ ACAMERA_CONTROL_START + 16,
+ ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE = // byte (enum)
+ ACAMERA_CONTROL_START + 17,
+ ACAMERA_CONTROL_AE_AVAILABLE_ANTIBANDING_MODES = // byte[n]
+ ACAMERA_CONTROL_START + 18,
+ ACAMERA_CONTROL_AE_AVAILABLE_MODES = // byte[n]
+ ACAMERA_CONTROL_START + 19,
+ ACAMERA_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES = // int32[2*n]
+ ACAMERA_CONTROL_START + 20,
+ ACAMERA_CONTROL_AE_COMPENSATION_RANGE = // int32[2]
+ ACAMERA_CONTROL_START + 21,
+ ACAMERA_CONTROL_AE_COMPENSATION_STEP = // rational
+ ACAMERA_CONTROL_START + 22,
+ ACAMERA_CONTROL_AF_AVAILABLE_MODES = // byte[n]
+ ACAMERA_CONTROL_START + 23,
+ ACAMERA_CONTROL_AVAILABLE_EFFECTS = // byte[n]
+ ACAMERA_CONTROL_START + 24,
+ ACAMERA_CONTROL_AVAILABLE_SCENE_MODES = // byte[n]
+ ACAMERA_CONTROL_START + 25,
+ ACAMERA_CONTROL_AVAILABLE_VIDEO_STABILIZATION_MODES = // byte[n]
+ ACAMERA_CONTROL_START + 26,
+ ACAMERA_CONTROL_AWB_AVAILABLE_MODES = // byte[n]
+ ACAMERA_CONTROL_START + 27,
+ ACAMERA_CONTROL_MAX_REGIONS = // int32[3]
+ ACAMERA_CONTROL_START + 28,
+ ACAMERA_CONTROL_AE_STATE = // byte (enum)
+ ACAMERA_CONTROL_START + 31,
+ ACAMERA_CONTROL_AF_STATE = // byte (enum)
+ ACAMERA_CONTROL_START + 32,
+ ACAMERA_CONTROL_AWB_STATE = // byte (enum)
+ ACAMERA_CONTROL_START + 34,
+ ACAMERA_CONTROL_AVAILABLE_HIGH_SPEED_VIDEO_CONFIGURATIONS = // int32[5*n]
+ ACAMERA_CONTROL_START + 35,
+ ACAMERA_CONTROL_AE_LOCK_AVAILABLE = // byte (enum)
+ ACAMERA_CONTROL_START + 36,
+ ACAMERA_CONTROL_AWB_LOCK_AVAILABLE = // byte (enum)
+ ACAMERA_CONTROL_START + 37,
+ ACAMERA_CONTROL_AVAILABLE_MODES = // byte[n]
+ ACAMERA_CONTROL_START + 38,
+ ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST_RANGE = // int32[2]
+ ACAMERA_CONTROL_START + 39,
+ ACAMERA_CONTROL_POST_RAW_SENSITIVITY_BOOST = // int32
+ ACAMERA_CONTROL_START + 40,
ACAMERA_CONTROL_END,
- ACAMERA_DEMOSAIC_RESERVED_0 =
- ACAMERA_DEMOSAIC_START,
- ACAMERA_DEMOSAIC_END,
-
ACAMERA_EDGE_MODE = // byte (enum)
ACAMERA_EDGE_START,
- ACAMERA_EDGE_RESERVED_1,
- ACAMERA_EDGE_AVAILABLE_EDGE_MODES, // byte[n]
+ ACAMERA_EDGE_AVAILABLE_EDGE_MODES = // byte[n]
+ ACAMERA_EDGE_START + 2,
ACAMERA_EDGE_END,
- ACAMERA_FLASH_RESERVED_0 =
- ACAMERA_FLASH_START,
- ACAMERA_FLASH_RESERVED_1,
- ACAMERA_FLASH_MODE, // byte (enum)
- ACAMERA_FLASH_RESERVED_3,
- ACAMERA_FLASH_RESERVED_4,
- ACAMERA_FLASH_STATE, // byte (enum)
+ ACAMERA_FLASH_MODE = // byte (enum)
+ ACAMERA_FLASH_START + 2,
+ ACAMERA_FLASH_STATE = // byte (enum)
+ ACAMERA_FLASH_START + 5,
ACAMERA_FLASH_END,
ACAMERA_FLASH_INFO_AVAILABLE = // byte (enum)
ACAMERA_FLASH_INFO_START,
- ACAMERA_FLASH_INFO_RESERVED_1,
ACAMERA_FLASH_INFO_END,
ACAMERA_HOT_PIXEL_MODE = // byte (enum)
ACAMERA_HOT_PIXEL_START,
- ACAMERA_HOT_PIXEL_AVAILABLE_HOT_PIXEL_MODES, // byte[n]
+ ACAMERA_HOT_PIXEL_AVAILABLE_HOT_PIXEL_MODES = // byte[n]
+ ACAMERA_HOT_PIXEL_START + 1,
ACAMERA_HOT_PIXEL_END,
ACAMERA_JPEG_GPS_COORDINATES = // double[3]
ACAMERA_JPEG_START,
- ACAMERA_JPEG_GPS_PROCESSING_METHOD, // byte
- ACAMERA_JPEG_GPS_TIMESTAMP, // int64
- ACAMERA_JPEG_ORIENTATION, // int32
- ACAMERA_JPEG_QUALITY, // byte
- ACAMERA_JPEG_THUMBNAIL_QUALITY, // byte
- ACAMERA_JPEG_THUMBNAIL_SIZE, // int32[2]
- ACAMERA_JPEG_AVAILABLE_THUMBNAIL_SIZES, // int32[2*n]
- ACAMERA_JPEG_RESERVED_8,
- ACAMERA_JPEG_RESERVED_9,
+ ACAMERA_JPEG_GPS_PROCESSING_METHOD = // byte
+ ACAMERA_JPEG_START + 1,
+ ACAMERA_JPEG_GPS_TIMESTAMP = // int64
+ ACAMERA_JPEG_START + 2,
+ ACAMERA_JPEG_ORIENTATION = // int32
+ ACAMERA_JPEG_START + 3,
+ ACAMERA_JPEG_QUALITY = // byte
+ ACAMERA_JPEG_START + 4,
+ ACAMERA_JPEG_THUMBNAIL_QUALITY = // byte
+ ACAMERA_JPEG_START + 5,
+ ACAMERA_JPEG_THUMBNAIL_SIZE = // int32[2]
+ ACAMERA_JPEG_START + 6,
+ ACAMERA_JPEG_AVAILABLE_THUMBNAIL_SIZES = // int32[2*n]
+ ACAMERA_JPEG_START + 7,
ACAMERA_JPEG_END,
ACAMERA_LENS_APERTURE = // float
ACAMERA_LENS_START,
- ACAMERA_LENS_FILTER_DENSITY, // float
- ACAMERA_LENS_FOCAL_LENGTH, // float
- ACAMERA_LENS_FOCUS_DISTANCE, // float
- ACAMERA_LENS_OPTICAL_STABILIZATION_MODE, // byte (enum)
- ACAMERA_LENS_FACING, // byte (enum)
- ACAMERA_LENS_POSE_ROTATION, // float[4]
- ACAMERA_LENS_POSE_TRANSLATION, // float[3]
- ACAMERA_LENS_FOCUS_RANGE, // float[2]
- ACAMERA_LENS_STATE, // byte (enum)
- ACAMERA_LENS_INTRINSIC_CALIBRATION, // float[5]
- ACAMERA_LENS_RADIAL_DISTORTION, // float[6]
+ ACAMERA_LENS_FILTER_DENSITY = // float
+ ACAMERA_LENS_START + 1,
+ ACAMERA_LENS_FOCAL_LENGTH = // float
+ ACAMERA_LENS_START + 2,
+ ACAMERA_LENS_FOCUS_DISTANCE = // float
+ ACAMERA_LENS_START + 3,
+ ACAMERA_LENS_OPTICAL_STABILIZATION_MODE = // byte (enum)
+ ACAMERA_LENS_START + 4,
+ ACAMERA_LENS_FACING = // byte (enum)
+ ACAMERA_LENS_START + 5,
+ ACAMERA_LENS_POSE_ROTATION = // float[4]
+ ACAMERA_LENS_START + 6,
+ ACAMERA_LENS_POSE_TRANSLATION = // float[3]
+ ACAMERA_LENS_START + 7,
+ ACAMERA_LENS_FOCUS_RANGE = // float[2]
+ ACAMERA_LENS_START + 8,
+ ACAMERA_LENS_STATE = // byte (enum)
+ ACAMERA_LENS_START + 9,
+ ACAMERA_LENS_INTRINSIC_CALIBRATION = // float[5]
+ ACAMERA_LENS_START + 10,
+ ACAMERA_LENS_RADIAL_DISTORTION = // float[6]
+ ACAMERA_LENS_START + 11,
ACAMERA_LENS_END,
ACAMERA_LENS_INFO_AVAILABLE_APERTURES = // float[n]
ACAMERA_LENS_INFO_START,
- ACAMERA_LENS_INFO_AVAILABLE_FILTER_DENSITIES, // float[n]
- ACAMERA_LENS_INFO_AVAILABLE_FOCAL_LENGTHS, // float[n]
- ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION, // byte[n]
- ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE, // float
- ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE, // float
- ACAMERA_LENS_INFO_SHADING_MAP_SIZE, // int32[2]
- ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION, // byte (enum)
+ ACAMERA_LENS_INFO_AVAILABLE_FILTER_DENSITIES = // float[n]
+ ACAMERA_LENS_INFO_START + 1,
+ ACAMERA_LENS_INFO_AVAILABLE_FOCAL_LENGTHS = // float[n]
+ ACAMERA_LENS_INFO_START + 2,
+ ACAMERA_LENS_INFO_AVAILABLE_OPTICAL_STABILIZATION = // byte[n]
+ ACAMERA_LENS_INFO_START + 3,
+ ACAMERA_LENS_INFO_HYPERFOCAL_DISTANCE = // float
+ ACAMERA_LENS_INFO_START + 4,
+ ACAMERA_LENS_INFO_MINIMUM_FOCUS_DISTANCE = // float
+ ACAMERA_LENS_INFO_START + 5,
+ ACAMERA_LENS_INFO_SHADING_MAP_SIZE = // int32[2]
+ ACAMERA_LENS_INFO_START + 6,
+ ACAMERA_LENS_INFO_FOCUS_DISTANCE_CALIBRATION = // byte (enum)
+ ACAMERA_LENS_INFO_START + 7,
ACAMERA_LENS_INFO_END,
ACAMERA_NOISE_REDUCTION_MODE = // byte (enum)
ACAMERA_NOISE_REDUCTION_START,
- ACAMERA_NOISE_REDUCTION_RESERVED_1,
- ACAMERA_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES, // byte[n]
+ ACAMERA_NOISE_REDUCTION_AVAILABLE_NOISE_REDUCTION_MODES = // byte[n]
+ ACAMERA_NOISE_REDUCTION_START + 2,
ACAMERA_NOISE_REDUCTION_END,
- ACAMERA_QUIRKS_RESERVED_0 =
- ACAMERA_QUIRKS_START,
- ACAMERA_QUIRKS_RESERVED_1,
- ACAMERA_QUIRKS_RESERVED_2,
- ACAMERA_QUIRKS_USE_PARTIAL_RESULT, // Deprecated! DO NOT USE
- ACAMERA_QUIRKS_PARTIAL_RESULT, // Deprecated! DO NOT USE
+ ACAMERA_QUIRKS_USE_PARTIAL_RESULT = // Deprecated! DO NOT USE
+ ACAMERA_QUIRKS_START + 3,
+ ACAMERA_QUIRKS_PARTIAL_RESULT = // Deprecated! DO NOT USE
+ ACAMERA_QUIRKS_START + 4,
ACAMERA_QUIRKS_END,
ACAMERA_REQUEST_FRAME_COUNT = // Deprecated! DO NOT USE
ACAMERA_REQUEST_START,
- ACAMERA_REQUEST_ID, // int32
- ACAMERA_REQUEST_RESERVED_2,
- ACAMERA_REQUEST_RESERVED_3,
- ACAMERA_REQUEST_RESERVED_4,
- ACAMERA_REQUEST_RESERVED_5,
- ACAMERA_REQUEST_MAX_NUM_OUTPUT_STREAMS, // int32[3]
- ACAMERA_REQUEST_RESERVED_7,
- ACAMERA_REQUEST_MAX_NUM_INPUT_STREAMS, // int32
- ACAMERA_REQUEST_PIPELINE_DEPTH, // byte
- ACAMERA_REQUEST_PIPELINE_MAX_DEPTH, // byte
- ACAMERA_REQUEST_PARTIAL_RESULT_COUNT, // int32
- ACAMERA_REQUEST_AVAILABLE_CAPABILITIES, // byte[n] (enum)
- ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS, // int32[n]
- ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS, // int32[n]
- ACAMERA_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS, // int32[n]
+ ACAMERA_REQUEST_ID = // int32
+ ACAMERA_REQUEST_START + 1,
+ ACAMERA_REQUEST_MAX_NUM_OUTPUT_STREAMS = // int32[3]
+ ACAMERA_REQUEST_START + 6,
+ ACAMERA_REQUEST_MAX_NUM_INPUT_STREAMS = // int32
+ ACAMERA_REQUEST_START + 8,
+ ACAMERA_REQUEST_PIPELINE_DEPTH = // byte
+ ACAMERA_REQUEST_START + 9,
+ ACAMERA_REQUEST_PIPELINE_MAX_DEPTH = // byte
+ ACAMERA_REQUEST_START + 10,
+ ACAMERA_REQUEST_PARTIAL_RESULT_COUNT = // int32
+ ACAMERA_REQUEST_START + 11,
+ ACAMERA_REQUEST_AVAILABLE_CAPABILITIES = // byte[n] (enum)
+ ACAMERA_REQUEST_START + 12,
+ ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS = // int32[n]
+ ACAMERA_REQUEST_START + 13,
+ ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS = // int32[n]
+ ACAMERA_REQUEST_START + 14,
+ ACAMERA_REQUEST_AVAILABLE_CHARACTERISTICS_KEYS = // int32[n]
+ ACAMERA_REQUEST_START + 15,
ACAMERA_REQUEST_END,
ACAMERA_SCALER_CROP_REGION = // int32[4]
ACAMERA_SCALER_START,
- ACAMERA_SCALER_AVAILABLE_FORMATS, // Deprecated! DO NOT USE
- ACAMERA_SCALER_AVAILABLE_JPEG_MIN_DURATIONS, // Deprecated! DO NOT USE
- ACAMERA_SCALER_AVAILABLE_JPEG_SIZES, // Deprecated! DO NOT USE
- ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM, // float
- ACAMERA_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS, // Deprecated! DO NOT USE
- ACAMERA_SCALER_AVAILABLE_PROCESSED_SIZES, // Deprecated! DO NOT USE
- ACAMERA_SCALER_RESERVED_7,
- ACAMERA_SCALER_RESERVED_8,
- ACAMERA_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP, // int32
- ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, // int32[n*4] (enum)
- ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS, // int64[4*n]
- ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS, // int64[4*n]
- ACAMERA_SCALER_CROPPING_TYPE, // byte (enum)
+ ACAMERA_SCALER_AVAILABLE_FORMATS = // Deprecated! DO NOT USE
+ ACAMERA_SCALER_START + 1,
+ ACAMERA_SCALER_AVAILABLE_JPEG_MIN_DURATIONS = // Deprecated! DO NOT USE
+ ACAMERA_SCALER_START + 2,
+ ACAMERA_SCALER_AVAILABLE_JPEG_SIZES = // Deprecated! DO NOT USE
+ ACAMERA_SCALER_START + 3,
+ ACAMERA_SCALER_AVAILABLE_MAX_DIGITAL_ZOOM = // float
+ ACAMERA_SCALER_START + 4,
+ ACAMERA_SCALER_AVAILABLE_PROCESSED_MIN_DURATIONS = // Deprecated! DO NOT USE
+ ACAMERA_SCALER_START + 5,
+ ACAMERA_SCALER_AVAILABLE_PROCESSED_SIZES = // Deprecated! DO NOT USE
+ ACAMERA_SCALER_START + 6,
+ ACAMERA_SCALER_AVAILABLE_INPUT_OUTPUT_FORMATS_MAP = // int32
+ ACAMERA_SCALER_START + 9,
+ ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS = // int32[n*4] (enum)
+ ACAMERA_SCALER_START + 10,
+ ACAMERA_SCALER_AVAILABLE_MIN_FRAME_DURATIONS = // int64[4*n]
+ ACAMERA_SCALER_START + 11,
+ ACAMERA_SCALER_AVAILABLE_STALL_DURATIONS = // int64[4*n]
+ ACAMERA_SCALER_START + 12,
+ ACAMERA_SCALER_CROPPING_TYPE = // byte (enum)
+ ACAMERA_SCALER_START + 13,
ACAMERA_SCALER_END,
ACAMERA_SENSOR_EXPOSURE_TIME = // int64
ACAMERA_SENSOR_START,
- ACAMERA_SENSOR_FRAME_DURATION, // int64
- ACAMERA_SENSOR_SENSITIVITY, // int32
- ACAMERA_SENSOR_REFERENCE_ILLUMINANT1, // byte (enum)
- ACAMERA_SENSOR_REFERENCE_ILLUMINANT2, // byte
- ACAMERA_SENSOR_CALIBRATION_TRANSFORM1, // rational[3*3]
- ACAMERA_SENSOR_CALIBRATION_TRANSFORM2, // rational[3*3]
- ACAMERA_SENSOR_COLOR_TRANSFORM1, // rational[3*3]
- ACAMERA_SENSOR_COLOR_TRANSFORM2, // rational[3*3]
- ACAMERA_SENSOR_FORWARD_MATRIX1, // rational[3*3]
- ACAMERA_SENSOR_FORWARD_MATRIX2, // rational[3*3]
- ACAMERA_SENSOR_RESERVED_11,
- ACAMERA_SENSOR_BLACK_LEVEL_PATTERN, // int32[4]
- ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY, // int32
- ACAMERA_SENSOR_ORIENTATION, // int32
- ACAMERA_SENSOR_RESERVED_15,
- ACAMERA_SENSOR_TIMESTAMP, // int64
- ACAMERA_SENSOR_RESERVED_17,
- ACAMERA_SENSOR_NEUTRAL_COLOR_POINT, // rational[3]
- ACAMERA_SENSOR_NOISE_PROFILE, // double[2*CFA Channels]
- ACAMERA_SENSOR_RESERVED_20,
- ACAMERA_SENSOR_RESERVED_21,
- ACAMERA_SENSOR_GREEN_SPLIT, // float
- ACAMERA_SENSOR_TEST_PATTERN_DATA, // int32[4]
- ACAMERA_SENSOR_TEST_PATTERN_MODE, // int32 (enum)
- ACAMERA_SENSOR_AVAILABLE_TEST_PATTERN_MODES, // int32[n]
- ACAMERA_SENSOR_ROLLING_SHUTTER_SKEW, // int64
- ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS, // int32[4*num_regions]
- ACAMERA_SENSOR_DYNAMIC_BLACK_LEVEL, // float[4]
- ACAMERA_SENSOR_DYNAMIC_WHITE_LEVEL, // int32
- ACAMERA_SENSOR_RESERVED_30,
+ ACAMERA_SENSOR_FRAME_DURATION = // int64
+ ACAMERA_SENSOR_START + 1,
+ ACAMERA_SENSOR_SENSITIVITY = // int32
+ ACAMERA_SENSOR_START + 2,
+ ACAMERA_SENSOR_REFERENCE_ILLUMINANT1 = // byte (enum)
+ ACAMERA_SENSOR_START + 3,
+ ACAMERA_SENSOR_REFERENCE_ILLUMINANT2 = // byte
+ ACAMERA_SENSOR_START + 4,
+ ACAMERA_SENSOR_CALIBRATION_TRANSFORM1 = // rational[3*3]
+ ACAMERA_SENSOR_START + 5,
+ ACAMERA_SENSOR_CALIBRATION_TRANSFORM2 = // rational[3*3]
+ ACAMERA_SENSOR_START + 6,
+ ACAMERA_SENSOR_COLOR_TRANSFORM1 = // rational[3*3]
+ ACAMERA_SENSOR_START + 7,
+ ACAMERA_SENSOR_COLOR_TRANSFORM2 = // rational[3*3]
+ ACAMERA_SENSOR_START + 8,
+ ACAMERA_SENSOR_FORWARD_MATRIX1 = // rational[3*3]
+ ACAMERA_SENSOR_START + 9,
+ ACAMERA_SENSOR_FORWARD_MATRIX2 = // rational[3*3]
+ ACAMERA_SENSOR_START + 10,
+ ACAMERA_SENSOR_BLACK_LEVEL_PATTERN = // int32[4]
+ ACAMERA_SENSOR_START + 12,
+ ACAMERA_SENSOR_MAX_ANALOG_SENSITIVITY = // int32
+ ACAMERA_SENSOR_START + 13,
+ ACAMERA_SENSOR_ORIENTATION = // int32
+ ACAMERA_SENSOR_START + 14,
+ ACAMERA_SENSOR_TIMESTAMP = // int64
+ ACAMERA_SENSOR_START + 16,
+ ACAMERA_SENSOR_NEUTRAL_COLOR_POINT = // rational[3]
+ ACAMERA_SENSOR_START + 18,
+ ACAMERA_SENSOR_NOISE_PROFILE = // double[2*CFA Channels]
+ ACAMERA_SENSOR_START + 19,
+ ACAMERA_SENSOR_GREEN_SPLIT = // float
+ ACAMERA_SENSOR_START + 22,
+ ACAMERA_SENSOR_TEST_PATTERN_DATA = // int32[4]
+ ACAMERA_SENSOR_START + 23,
+ ACAMERA_SENSOR_TEST_PATTERN_MODE = // int32 (enum)
+ ACAMERA_SENSOR_START + 24,
+ ACAMERA_SENSOR_AVAILABLE_TEST_PATTERN_MODES = // int32[n]
+ ACAMERA_SENSOR_START + 25,
+ ACAMERA_SENSOR_ROLLING_SHUTTER_SKEW = // int64
+ ACAMERA_SENSOR_START + 26,
+ ACAMERA_SENSOR_OPTICAL_BLACK_REGIONS = // int32[4*num_regions]
+ ACAMERA_SENSOR_START + 27,
+ ACAMERA_SENSOR_DYNAMIC_BLACK_LEVEL = // float[4]
+ ACAMERA_SENSOR_START + 28,
+ ACAMERA_SENSOR_DYNAMIC_WHITE_LEVEL = // int32
+ ACAMERA_SENSOR_START + 29,
ACAMERA_SENSOR_END,
ACAMERA_SENSOR_INFO_ACTIVE_ARRAY_SIZE = // int32[4]
ACAMERA_SENSOR_INFO_START,
- ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE, // int32[2]
- ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT, // byte (enum)
- ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE, // int64[2]
- ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION, // int64
- ACAMERA_SENSOR_INFO_PHYSICAL_SIZE, // float[2]
- ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE, // int32[2]
- ACAMERA_SENSOR_INFO_WHITE_LEVEL, // int32
- ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE, // byte (enum)
- ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED, // byte (enum)
- ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE, // int32[4]
+ ACAMERA_SENSOR_INFO_SENSITIVITY_RANGE = // int32[2]
+ ACAMERA_SENSOR_INFO_START + 1,
+ ACAMERA_SENSOR_INFO_COLOR_FILTER_ARRANGEMENT = // byte (enum)
+ ACAMERA_SENSOR_INFO_START + 2,
+ ACAMERA_SENSOR_INFO_EXPOSURE_TIME_RANGE = // int64[2]
+ ACAMERA_SENSOR_INFO_START + 3,
+ ACAMERA_SENSOR_INFO_MAX_FRAME_DURATION = // int64
+ ACAMERA_SENSOR_INFO_START + 4,
+ ACAMERA_SENSOR_INFO_PHYSICAL_SIZE = // float[2]
+ ACAMERA_SENSOR_INFO_START + 5,
+ ACAMERA_SENSOR_INFO_PIXEL_ARRAY_SIZE = // int32[2]
+ ACAMERA_SENSOR_INFO_START + 6,
+ ACAMERA_SENSOR_INFO_WHITE_LEVEL = // int32
+ ACAMERA_SENSOR_INFO_START + 7,
+ ACAMERA_SENSOR_INFO_TIMESTAMP_SOURCE = // byte (enum)
+ ACAMERA_SENSOR_INFO_START + 8,
+ ACAMERA_SENSOR_INFO_LENS_SHADING_APPLIED = // byte (enum)
+ ACAMERA_SENSOR_INFO_START + 9,
+ ACAMERA_SENSOR_INFO_PRE_CORRECTION_ACTIVE_ARRAY_SIZE = // int32[4]
+ ACAMERA_SENSOR_INFO_START + 10,
ACAMERA_SENSOR_INFO_END,
ACAMERA_SHADING_MODE = // byte (enum)
ACAMERA_SHADING_START,
- ACAMERA_SHADING_RESERVED_1,
- ACAMERA_SHADING_AVAILABLE_MODES, // byte[n]
+ ACAMERA_SHADING_AVAILABLE_MODES = // byte[n]
+ ACAMERA_SHADING_START + 2,
ACAMERA_SHADING_END,
ACAMERA_STATISTICS_FACE_DETECT_MODE = // byte (enum)
ACAMERA_STATISTICS_START,
- ACAMERA_STATISTICS_RESERVED_1,
- ACAMERA_STATISTICS_RESERVED_2,
- ACAMERA_STATISTICS_HOT_PIXEL_MAP_MODE, // byte (enum)
- ACAMERA_STATISTICS_FACE_IDS, // int32[n]
- ACAMERA_STATISTICS_FACE_LANDMARKS, // int32[n*6]
- ACAMERA_STATISTICS_FACE_RECTANGLES, // int32[n*4]
- ACAMERA_STATISTICS_FACE_SCORES, // byte[n]
- ACAMERA_STATISTICS_RESERVED_8,
- ACAMERA_STATISTICS_RESERVED_9,
- ACAMERA_STATISTICS_LENS_SHADING_CORRECTION_MAP, // byte
- ACAMERA_STATISTICS_LENS_SHADING_MAP, // float[4*n*m]
- ACAMERA_STATISTICS_PREDICTED_COLOR_GAINS, // Deprecated! DO NOT USE
- ACAMERA_STATISTICS_PREDICTED_COLOR_TRANSFORM, // Deprecated! DO NOT USE
- ACAMERA_STATISTICS_SCENE_FLICKER, // byte (enum)
- ACAMERA_STATISTICS_HOT_PIXEL_MAP, // int32[2*n]
- ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE, // byte (enum)
+ ACAMERA_STATISTICS_HOT_PIXEL_MAP_MODE = // byte (enum)
+ ACAMERA_STATISTICS_START + 3,
+ ACAMERA_STATISTICS_FACE_IDS = // int32[n]
+ ACAMERA_STATISTICS_START + 4,
+ ACAMERA_STATISTICS_FACE_LANDMARKS = // int32[n*6]
+ ACAMERA_STATISTICS_START + 5,
+ ACAMERA_STATISTICS_FACE_RECTANGLES = // int32[n*4]
+ ACAMERA_STATISTICS_START + 6,
+ ACAMERA_STATISTICS_FACE_SCORES = // byte[n]
+ ACAMERA_STATISTICS_START + 7,
+ ACAMERA_STATISTICS_LENS_SHADING_CORRECTION_MAP = // byte
+ ACAMERA_STATISTICS_START + 10,
+ ACAMERA_STATISTICS_LENS_SHADING_MAP = // float[4*n*m]
+ ACAMERA_STATISTICS_START + 11,
+ ACAMERA_STATISTICS_PREDICTED_COLOR_GAINS = // Deprecated! DO NOT USE
+ ACAMERA_STATISTICS_START + 12,
+ ACAMERA_STATISTICS_PREDICTED_COLOR_TRANSFORM = // Deprecated! DO NOT USE
+ ACAMERA_STATISTICS_START + 13,
+ ACAMERA_STATISTICS_SCENE_FLICKER = // byte (enum)
+ ACAMERA_STATISTICS_START + 14,
+ ACAMERA_STATISTICS_HOT_PIXEL_MAP = // int32[2*n]
+ ACAMERA_STATISTICS_START + 15,
+ ACAMERA_STATISTICS_LENS_SHADING_MAP_MODE = // byte (enum)
+ ACAMERA_STATISTICS_START + 16,
ACAMERA_STATISTICS_END,
ACAMERA_STATISTICS_INFO_AVAILABLE_FACE_DETECT_MODES = // byte[n]
ACAMERA_STATISTICS_INFO_START,
- ACAMERA_STATISTICS_INFO_RESERVED_1,
- ACAMERA_STATISTICS_INFO_MAX_FACE_COUNT, // int32
- ACAMERA_STATISTICS_INFO_RESERVED_3,
- ACAMERA_STATISTICS_INFO_RESERVED_4,
- ACAMERA_STATISTICS_INFO_RESERVED_5,
- ACAMERA_STATISTICS_INFO_AVAILABLE_HOT_PIXEL_MAP_MODES, // byte[n]
- ACAMERA_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES, // byte[n]
+ ACAMERA_STATISTICS_INFO_MAX_FACE_COUNT = // int32
+ ACAMERA_STATISTICS_INFO_START + 2,
+ ACAMERA_STATISTICS_INFO_AVAILABLE_HOT_PIXEL_MAP_MODES = // byte[n]
+ ACAMERA_STATISTICS_INFO_START + 6,
+ ACAMERA_STATISTICS_INFO_AVAILABLE_LENS_SHADING_MAP_MODES = // byte[n]
+ ACAMERA_STATISTICS_INFO_START + 7,
ACAMERA_STATISTICS_INFO_END,
ACAMERA_TONEMAP_CURVE_BLUE = // float[n*2]
ACAMERA_TONEMAP_START,
- ACAMERA_TONEMAP_CURVE_GREEN, // float[n*2]
- ACAMERA_TONEMAP_CURVE_RED, // float[n*2]
- ACAMERA_TONEMAP_MODE, // byte (enum)
- ACAMERA_TONEMAP_MAX_CURVE_POINTS, // int32
- ACAMERA_TONEMAP_AVAILABLE_TONE_MAP_MODES, // byte[n]
- ACAMERA_TONEMAP_GAMMA, // float
- ACAMERA_TONEMAP_PRESET_CURVE, // byte (enum)
+ ACAMERA_TONEMAP_CURVE_GREEN = // float[n*2]
+ ACAMERA_TONEMAP_START + 1,
+ ACAMERA_TONEMAP_CURVE_RED = // float[n*2]
+ ACAMERA_TONEMAP_START + 2,
+ ACAMERA_TONEMAP_MODE = // byte (enum)
+ ACAMERA_TONEMAP_START + 3,
+ ACAMERA_TONEMAP_MAX_CURVE_POINTS = // int32
+ ACAMERA_TONEMAP_START + 4,
+ ACAMERA_TONEMAP_AVAILABLE_TONE_MAP_MODES = // byte[n]
+ ACAMERA_TONEMAP_START + 5,
+ ACAMERA_TONEMAP_GAMMA = // float
+ ACAMERA_TONEMAP_START + 6,
+ ACAMERA_TONEMAP_PRESET_CURVE = // byte (enum)
+ ACAMERA_TONEMAP_START + 7,
ACAMERA_TONEMAP_END,
ACAMERA_LED_TRANSMIT = // byte (enum)
ACAMERA_LED_START,
- ACAMERA_LED_AVAILABLE_LEDS, // byte[n] (enum)
+ ACAMERA_LED_AVAILABLE_LEDS = // byte[n] (enum)
+ ACAMERA_LED_START + 1,
ACAMERA_LED_END,
ACAMERA_INFO_SUPPORTED_HARDWARE_LEVEL = // byte (enum)
@@ -377,20 +486,24 @@
ACAMERA_SYNC_FRAME_NUMBER = // int64 (enum)
ACAMERA_SYNC_START,
- ACAMERA_SYNC_MAX_LATENCY, // int32 (enum)
+ ACAMERA_SYNC_MAX_LATENCY = // int32 (enum)
+ ACAMERA_SYNC_START + 1,
ACAMERA_SYNC_END,
ACAMERA_REPROCESS_EFFECTIVE_EXPOSURE_FACTOR = // float
ACAMERA_REPROCESS_START,
- ACAMERA_REPROCESS_MAX_CAPTURE_STALL, // int32
+ ACAMERA_REPROCESS_MAX_CAPTURE_STALL = // int32
+ ACAMERA_REPROCESS_START + 1,
ACAMERA_REPROCESS_END,
- ACAMERA_DEPTH_RESERVED_0 =
- ACAMERA_DEPTH_START,
- ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, // int32[n*4] (enum)
- ACAMERA_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS, // int64[4*n]
- ACAMERA_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS, // int64[4*n]
- ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE, // byte (enum)
+ ACAMERA_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS = // int32[n*4] (enum)
+ ACAMERA_DEPTH_START + 1,
+ ACAMERA_DEPTH_AVAILABLE_DEPTH_MIN_FRAME_DURATIONS = // int64[4*n]
+ ACAMERA_DEPTH_START + 2,
+ ACAMERA_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS = // int64[4*n]
+ ACAMERA_DEPTH_START + 3,
+ ACAMERA_DEPTH_DEPTH_IS_EXCLUSIVE = // byte (enum)
+ ACAMERA_DEPTH_START + 4,
ACAMERA_DEPTH_END,
} acamera_metadata_tag_t;
@@ -534,6 +647,8 @@
ACAMERA_CONTROL_SCENE_MODE_HIGH_SPEED_VIDEO,
ACAMERA_CONTROL_SCENE_MODE_HDR,
ACAMERA_CONTROL_SCENE_MODE_FACE_PRIORITY_LOW_LIGHT,
+ ACAMERA_CONTROL_SCENE_MODE_DEVICE_CUSTOM_START = 100,
+ ACAMERA_CONTROL_SCENE_MODE_DEVICE_CUSTOM_END = 127,
} acamera_metadata_enum_android_control_scene_mode_t;
// ACAMERA_CONTROL_VIDEO_STABILIZATION_MODE
diff --git a/include/camera/ndk/NdkCaptureRequest.h b/include/camera/ndk/NdkCaptureRequest.h
index 566d78f..d9fb164 100644
--- a/include/camera/ndk/NdkCaptureRequest.h
+++ b/include/camera/ndk/NdkCaptureRequest.h
@@ -54,6 +54,19 @@
*/
camera_status_t ACaptureRequest_getConstEntry(
const ACaptureRequest*, uint32_t tag, ACameraMetadata_const_entry* entry);
+
+/*
+ * List all the entry tags in this capture request.
+ * The memory of tags is managed by ACaptureRequest itself and must NOT be free/delete
+ * by application. Calling ACaptureRequest_setEntry_* API will invalidate previous
+ * output of ACaptureRequest_getAllTags. Do not access tags after calling
+ * ACaptureRequest_setEntry_*. To get new list of tags after updating capture request,
+ * application must call ACaptureRequest_getAllTags again.
+ * Do NOT access tags after calling ACaptureRequest_free.
+ */
+camera_status_t ACaptureRequest_getAllTags(
+ const ACaptureRequest*, /*out*/int32_t* numTags, /*out*/const uint32_t** tags);
+
/*
* Set an entry of corresponding type.
* The entry tag's type must match corresponding set API or an
diff --git a/include/media/AudioEffect.h b/include/media/AudioEffect.h
index 5af6c10..6af1962 100644
--- a/include/media/AudioEffect.h
+++ b/include/media/AudioEffect.h
@@ -139,7 +139,7 @@
* of descriptors to return.
* *count is limited to kMaxPreProcessing on return.
*/
- static status_t queryDefaultPreProcessing(int audioSession,
+ static status_t queryDefaultPreProcessing(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count);
@@ -237,7 +237,7 @@
int32_t priority = 0,
effect_callback_t cbf = NULL,
void* user = NULL,
- int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+ audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
);
@@ -250,7 +250,7 @@
int32_t priority = 0,
effect_callback_t cbf = NULL,
void* user = NULL,
- int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+ audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
);
@@ -272,7 +272,7 @@
int32_t priority = 0,
effect_callback_t cbf = NULL,
void* user = NULL,
- int sessionId = AUDIO_SESSION_OUTPUT_MIX,
+ audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
audio_io_handle_t io = AUDIO_IO_HANDLE_NONE
);
@@ -405,7 +405,7 @@
protected:
bool mEnabled; // enable state
- int32_t mSessionId; // audio session ID
+ audio_session_t mSessionId; // audio session ID
int32_t mPriority; // priority for effect control
status_t mStatus; // effect status
effect_callback_t mCbf; // callback function for status, control and
diff --git a/include/media/AudioIoDescriptor.h b/include/media/AudioIoDescriptor.h
index c94b738..a4907cc 100644
--- a/include/media/AudioIoDescriptor.h
+++ b/include/media/AudioIoDescriptor.h
@@ -56,13 +56,13 @@
return AUDIO_PORT_HANDLE_NONE;
}
- audio_io_handle_t mIoHandle;
- struct audio_patch mPatch;
- uint32_t mSamplingRate;
- audio_format_t mFormat;
- audio_channel_mask_t mChannelMask;
- size_t mFrameCount;
- uint32_t mLatency;
+ audio_io_handle_t mIoHandle;
+ struct audio_patch mPatch;
+ uint32_t mSamplingRate;
+ audio_format_t mFormat;
+ audio_channel_mask_t mChannelMask;
+ size_t mFrameCount;
+ uint32_t mLatency; // only valid for output
};
diff --git a/include/media/AudioPolicy.h b/include/media/AudioPolicy.h
index a171493..8528c7a 100644
--- a/include/media/AudioPolicy.h
+++ b/include/media/AudioPolicy.h
@@ -51,6 +51,7 @@
#define MIX_ROUTE_FLAG_RENDER 0x1
#define MIX_ROUTE_FLAG_LOOP_BACK (0x1 << 1)
+#define MIX_ROUTE_FLAG_ALL (MIX_ROUTE_FLAG_RENDER | MIX_ROUTE_FLAG_LOOP_BACK)
#define MAX_MIXES_PER_POLICY 10
#define MAX_CRITERIA_PER_MIX 20
@@ -81,7 +82,7 @@
AudioMix(Vector<AudioMixMatchCriterion> criteria, uint32_t mixType, audio_config_t format,
uint32_t routeFlags, String8 registrationId, uint32_t flags) :
mCriteria(criteria), mMixType(mixType), mFormat(format),
- mRouteFlags(routeFlags), mRegistrationId(registrationId), mCbFlags(flags){}
+ mRouteFlags(routeFlags), mDeviceAddress(registrationId), mCbFlags(flags){}
status_t readFromParcel(Parcel *parcel);
status_t writeToParcel(Parcel *parcel) const;
@@ -90,13 +91,15 @@
uint32_t mMixType;
audio_config_t mFormat;
uint32_t mRouteFlags;
- String8 mRegistrationId;
+ audio_devices_t mDeviceType;
+ String8 mDeviceAddress;
uint32_t mCbFlags; // flags indicating which callbacks to use, see kCbFlag*
};
// definitions for audio recording configuration updates
// which update type is reported
+#define RECORD_CONFIG_EVENT_NONE -1
#define RECORD_CONFIG_EVENT_START 1
#define RECORD_CONFIG_EVENT_STOP 0
diff --git a/include/media/AudioRecord.h b/include/media/AudioRecord.h
index 521557d..2fa1a4e 100644
--- a/include/media/AudioRecord.h
+++ b/include/media/AudioRecord.h
@@ -145,7 +145,7 @@
* Parameters:
*
* inputSource: Select the audio input to record from (e.g. AUDIO_SOURCE_DEFAULT).
- * sampleRate: Data sink sampling rate in Hz.
+ * sampleRate: Data sink sampling rate in Hz. Zero means to use the source sample rate.
* format: Audio format (e.g AUDIO_FORMAT_PCM_16_BIT for signed
* 16 bits per sample).
* channelMask: Channel mask, such that audio_is_input_channel(channelMask) is true.
@@ -177,7 +177,7 @@
callback_t cbf = NULL,
void* user = NULL,
uint32_t notificationFrames = 0,
- int sessionId = AUDIO_SESSION_ALLOCATE,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
int uid = -1,
@@ -215,7 +215,7 @@
void* user = NULL,
uint32_t notificationFrames = 0,
bool threadCanCallJava = false,
- int sessionId = AUDIO_SESSION_ALLOCATE,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
int uid = -1,
@@ -249,7 +249,7 @@
* the specified event occurs on the specified trigger session.
*/
status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
- int triggerSession = 0);
+ audio_session_t triggerSession = AUDIO_SESSION_NONE);
/* Stop a track. The callback will cease being called. Note that obtainBuffer() still
* works and will drain buffers until the pool is exhausted, and then will return WOULD_BLOCK.
@@ -258,6 +258,7 @@
bool stopped() const;
/* Return the sink sample rate for this record track in Hz.
+ * If specified as zero in constructor or set(), this will be the source sample rate.
* Unlike AudioTrack, the sample rate is const after initialization, so doesn't need a lock.
*/
uint32_t getSampleRate() const { return mSampleRate; }
@@ -351,7 +352,7 @@
*
* No lock needed because session ID doesn't change after first set().
*/
- int getSessionId() const { return mSessionId; }
+ audio_session_t getSessionId() const { return mSessionId; }
/* Public API for TRANSFER_OBTAIN mode.
* Obtains a buffer of up to "audioBuffer->frameCount" full frames.
@@ -595,8 +596,14 @@
size_t mFrameSize; // app-level frame size == AudioFlinger frame size
uint32_t mLatency; // in ms
audio_channel_mask_t mChannelMask;
- audio_input_flags_t mFlags;
- int mSessionId;
+
+ audio_input_flags_t mFlags; // same as mOrigFlags, except for bits that may
+ // be denied by client or server, such as
+ // AUDIO_INPUT_FLAG_FAST. mLock must be
+ // held to read or write those bits reliably.
+ audio_input_flags_t mOrigFlags; // as specified in constructor or set(), const
+
+ audio_session_t mSessionId;
transfer_type mTransfer;
// Next 5 fields may be changed if IAudioRecord is re-created, but always != 0
diff --git a/include/media/AudioSystem.h b/include/media/AudioSystem.h
index 7011858..c9eac2e 100644
--- a/include/media/AudioSystem.h
+++ b/include/media/AudioSystem.h
@@ -31,9 +31,9 @@
typedef void (*audio_error_callback)(status_t err);
typedef void (*dynamic_policy_callback)(int event, String8 regId, int val);
-typedef void (*record_config_callback)(int event, int session, int source,
- const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig);
+typedef void (*record_config_callback)(int event, audio_session_t session, int source,
+ const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
+ audio_patch_handle_t patchHandle);
class IAudioFlinger;
class IAudioPolicyService;
@@ -114,11 +114,12 @@
// FIXME This API assumes a route, and so should be deprecated.
static status_t getOutputLatency(uint32_t* latency,
audio_stream_type_t stream);
- static status_t getSamplingRate(audio_io_handle_t output,
+ // returns the audio HAL sample rate
+ static status_t getSamplingRate(audio_io_handle_t ioHandle,
uint32_t* samplingRate);
- // returns the number of frames per audio HAL write buffer. Corresponds to
- // audio_stream->get_buffer_size()/audio_stream_out_frame_size()
- static status_t getFrameCount(audio_io_handle_t output,
+ // returns the number of frames per audio HAL buffer. Corresponds to
+ // audio_stream->get_buffer_size()/audio_stream_out/in_frame_size()
+ static status_t getFrameCount(audio_io_handle_t ioHandle,
size_t* frameCount);
// returns the audio output latency in ms. Corresponds to
// audio_stream_out->get_latency()
@@ -150,12 +151,12 @@
// Allocate a new unique ID for use as an audio session ID or I/O handle.
// If unable to contact AudioFlinger, returns AUDIO_UNIQUE_ID_ALLOCATE instead.
// FIXME If AudioFlinger were to ever exhaust the unique ID namespace,
- // this method could fail by returning either AUDIO_UNIQUE_ID_ALLOCATE
+ // this method could fail by returning either a reserved ID like AUDIO_UNIQUE_ID_ALLOCATE
// or an unspecified existing unique ID.
- static audio_unique_id_t newAudioUniqueId();
+ static audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use);
- static void acquireAudioSessionId(int audioSession, pid_t pid);
- static void releaseAudioSessionId(int audioSession, pid_t pid);
+ static void acquireAudioSessionId(audio_session_t audioSession, pid_t pid);
+ static void releaseAudioSessionId(audio_session_t audioSession, pid_t pid);
// Get the HW synchronization source used for an audio session.
// Return a valid source or AUDIO_HW_SYNC_INVALID if an error occurs
@@ -259,7 +260,7 @@
static status_t registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
uint32_t strategy,
- int session,
+ audio_session_t session,
int id);
static status_t unregisterEffect(int id);
static status_t setEffectEnabled(int id, bool enabled);
@@ -427,7 +428,7 @@
virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
virtual void onRecordingConfigurationUpdate(int event, audio_session_t session,
audio_source_t source, const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig);
+ const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
private:
Mutex mLock;
diff --git a/include/media/AudioTrack.h b/include/media/AudioTrack.h
index a4b8571..bdd6372 100644
--- a/include/media/AudioTrack.h
+++ b/include/media/AudioTrack.h
@@ -167,7 +167,10 @@
*
* streamType: Select the type of audio stream this track is attached to
* (e.g. AUDIO_STREAM_MUSIC).
- * sampleRate: Data source sampling rate in Hz.
+ * sampleRate: Data source sampling rate in Hz. Zero means to use the sink sample rate.
+ * A non-zero value must be specified if AUDIO_OUTPUT_FLAG_DIRECT is set.
+ * 0 will not work with current policy implementation for direct output
+ * selection where an exact match is needed for sampling rate.
* format: Audio format. For mixed tracks, any PCM format supported by server is OK.
* For direct and offloaded tracks, the possible format(s) depends on the
* output sink.
@@ -210,7 +213,7 @@
callback_t cbf = NULL,
void* user = NULL,
uint32_t notificationFrames = 0,
- int sessionId = AUDIO_SESSION_ALLOCATE,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
const audio_offload_info_t *offloadInfo = NULL,
int uid = -1,
@@ -239,7 +242,7 @@
callback_t cbf = NULL,
void* user = NULL,
uint32_t notificationFrames = 0,
- int sessionId = AUDIO_SESSION_ALLOCATE,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
const audio_offload_info_t *offloadInfo = NULL,
int uid = -1,
@@ -284,7 +287,7 @@
uint32_t notificationFrames = 0,
const sp<IMemory>& sharedBuffer = 0,
bool threadCanCallJava = false,
- int sessionId = AUDIO_SESSION_ALLOCATE,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
transfer_type transferType = TRANSFER_DEFAULT,
const audio_offload_info_t *offloadInfo = NULL,
int uid = -1,
@@ -395,11 +398,14 @@
status_t setAuxEffectSendLevel(float level);
void getAuxEffectSendLevel(float* level) const;
- /* Set source sample rate for this track in Hz, mostly used for games' sound effects
+ /* Set source sample rate for this track in Hz, mostly used for games' sound effects.
+ * Zero is not permitted.
*/
status_t setSampleRate(uint32_t sampleRate);
- /* Return current source sample rate in Hz */
+ /* Return current source sample rate in Hz.
+ * If specified as zero in constructor or set(), this will be the sink sample rate.
+ */
uint32_t getSampleRate() const;
/* Return the original source sample rate in Hz. This corresponds to the sample rate
@@ -577,7 +583,7 @@
* Returned value:
* AudioTrack session ID.
*/
- int getSessionId() const { return mSessionId; }
+ audio_session_t getSessionId() const { return mSessionId; }
/* Attach track auxiliary output to specified effect. Use effectId = 0
* to detach track from effect.
@@ -824,12 +830,19 @@
bool isDirect_l() const
{ return (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0; }
+ // pure pcm data is mixable (which excludes HW_AV_SYNC, with embedded timing)
+ bool isPurePcmData_l() const
+ { return audio_is_linear_pcm(mFormat)
+ && (mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) == 0; }
+
// increment mPosition by the delta of mServer, and return new value of mPosition
Modulo<uint32_t> updateAndGetPosition_l();
// check sample rate and speed is compatible with AudioTrack
bool isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const;
+ void restartIfDisabled();
+
// Next 4 fields may be changed if IAudioTrack is re-created, but always != 0
sp<IAudioTrack> mAudioTrack;
sp<IMemory> mCblkMemory;
@@ -943,13 +956,15 @@
uint32_t mUnderrunCountOffset; // updated when restoring tracks
- audio_output_flags_t mFlags;
- // const after set(), except for bits AUDIO_OUTPUT_FLAG_FAST and AUDIO_OUTPUT_FLAG_OFFLOAD.
- // mLock must be held to read or write those bits reliably.
+ audio_output_flags_t mFlags; // same as mOrigFlags, except for bits that may
+ // be denied by client or server, such as
+ // AUDIO_OUTPUT_FLAG_FAST. mLock must be
+ // held to read or write those bits reliably.
+ audio_output_flags_t mOrigFlags; // as specified in constructor or set(), const
bool mDoNotReconnect;
- int mSessionId;
+ audio_session_t mSessionId;
int mAuxEffectId;
mutable Mutex mLock;
diff --git a/include/media/IAudioFlinger.h b/include/media/IAudioFlinger.h
index 3b69ecf..e48aa1c 100644
--- a/include/media/IAudioFlinger.h
+++ b/include/media/IAudioFlinger.h
@@ -74,7 +74,7 @@
// However on failure, the client is responsible for release.
audio_io_handle_t output,
pid_t tid, // -1 means unused, otherwise must be valid non-0
- int *sessionId,
+ audio_session_t *sessionId,
int clientUid,
status_t *status) = 0;
@@ -91,20 +91,20 @@
track_flags_t *flags,
pid_t tid, // -1 means unused, otherwise must be valid non-0
int clientUid,
- int *sessionId,
+ audio_session_t *sessionId,
size_t *notificationFrames,
sp<IMemory>& cblk,
sp<IMemory>& buffers, // return value 0 means it follows cblk
status_t *status) = 0;
- // FIXME Surprisingly, sampleRate/format/frameCount/latency don't work for input handles
+ // FIXME Surprisingly, format/latency don't work for input handles
/* query the audio hardware state. This state never changes,
* and therefore can be cached.
*/
- virtual uint32_t sampleRate(audio_io_handle_t output) const = 0;
+ virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const = 0;
virtual audio_format_t format(audio_io_handle_t output) const = 0;
- virtual size_t frameCount(audio_io_handle_t output) const = 0;
+ virtual size_t frameCount(audio_io_handle_t ioHandle) const = 0;
// return estimated latency in milliseconds
virtual uint32_t latency(audio_io_handle_t output) const = 0;
@@ -182,10 +182,10 @@
virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const = 0;
- virtual audio_unique_id_t newAudioUniqueId() = 0;
+ virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use) = 0;
- virtual void acquireAudioSessionId(int audioSession, pid_t pid) = 0;
- virtual void releaseAudioSessionId(int audioSession, pid_t pid) = 0;
+ virtual void acquireAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
+ virtual void releaseAudioSessionId(audio_session_t audioSession, pid_t pid) = 0;
virtual status_t queryNumberEffects(uint32_t *numEffects) const = 0;
@@ -200,13 +200,13 @@
int32_t priority,
// AudioFlinger doesn't take over handle reference from client
audio_io_handle_t output,
- int sessionId,
+ audio_session_t sessionId,
const String16& callingPackage,
status_t *status,
int *id,
int *enabled) = 0;
- virtual status_t moveEffects(int session, audio_io_handle_t srcOutput,
+ virtual status_t moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
audio_io_handle_t dstOutput) = 0;
virtual audio_module_handle_t loadHwModule(const char *name) = 0;
diff --git a/include/media/IAudioPolicyService.h b/include/media/IAudioPolicyService.h
index ceca71a..80437dc 100644
--- a/include/media/IAudioPolicyService.h
+++ b/include/media/IAudioPolicyService.h
@@ -59,16 +59,16 @@
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
const audio_offload_info_t *offloadInfo = NULL) = 0;
virtual status_t getOutputForAttr(const audio_attributes_t *attr,
- audio_io_handle_t *output,
- audio_session_t session,
- audio_stream_type_t *stream,
- uid_t uid,
- uint32_t samplingRate = 0,
- audio_format_t format = AUDIO_FORMAT_DEFAULT,
- audio_channel_mask_t channelMask = 0,
- audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
- const audio_offload_info_t *offloadInfo = NULL) = 0;
+ audio_io_handle_t *output,
+ audio_session_t session,
+ audio_stream_type_t *stream,
+ uid_t uid,
+ uint32_t samplingRate = 0,
+ audio_format_t format = AUDIO_FORMAT_DEFAULT,
+ audio_channel_mask_t channelMask = 0,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+ const audio_offload_info_t *offloadInfo = NULL) = 0;
virtual status_t startOutput(audio_io_handle_t output,
audio_stream_type_t stream,
audio_session_t session) = 0;
@@ -108,7 +108,7 @@
virtual status_t registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
uint32_t strategy,
- int session,
+ audio_session_t session,
int id) = 0;
virtual status_t unregisterEffect(int id) = 0;
virtual status_t setEffectEnabled(int id, bool enabled) = 0;
@@ -116,7 +116,7 @@
virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0)
const = 0;
virtual bool isSourceActive(audio_source_t source) const = 0;
- virtual status_t queryDefaultPreProcessing(int audioSession,
+ virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count) = 0;
// Check if offload is possible for given format, stream type, sample rate,
diff --git a/include/media/IAudioPolicyServiceClient.h b/include/media/IAudioPolicyServiceClient.h
index 8c3459d..d94ad00 100644
--- a/include/media/IAudioPolicyServiceClient.h
+++ b/include/media/IAudioPolicyServiceClient.h
@@ -41,7 +41,8 @@
virtual void onRecordingConfigurationUpdate(int event, audio_session_t session,
audio_source_t source,
const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig) = 0;
+ const audio_config_base_t *deviceConfig,
+ audio_patch_handle_t patchHandle) = 0;
};
diff --git a/include/media/IAudioRecord.h b/include/media/IAudioRecord.h
index 2003985..7768176 100644
--- a/include/media/IAudioRecord.h
+++ b/include/media/IAudioRecord.h
@@ -24,6 +24,7 @@
#include <utils/Errors.h>
#include <binder/IInterface.h>
#include <binder/IMemory.h>
+#include <system/audio.h>
namespace android {
@@ -37,7 +38,8 @@
/* After it's created the track is not active. Call start() to
* make it active.
*/
- virtual status_t start(int /*AudioSystem::sync_event_t*/ event, int triggerSession) = 0;
+ virtual status_t start(int /*AudioSystem::sync_event_t*/ event,
+ audio_session_t triggerSession) = 0;
/* Stop a track. If set, the callback will cease being called and
* obtainBuffer will return an error. Buffers that are already released
diff --git a/include/media/IMediaPlayerService.h b/include/media/IMediaPlayerService.h
index 99ca6f0..e5d3cda 100644
--- a/include/media/IMediaPlayerService.h
+++ b/include/media/IMediaPlayerService.h
@@ -49,8 +49,8 @@
virtual sp<IMediaRecorder> createMediaRecorder(const String16 &opPackageName) = 0;
virtual sp<IMediaMetadataRetriever> createMetadataRetriever() = 0;
- virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId = 0)
- = 0;
+ virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client,
+ audio_session_t audioSessionId = AUDIO_SESSION_ALLOCATE) = 0;
virtual sp<IOMX> getOMX() = 0;
virtual sp<ICrypto> makeCrypto() = 0;
virtual sp<IDrm> makeDrm() = 0;
diff --git a/include/media/IMediaRecorder.h b/include/media/IMediaRecorder.h
index caa6592..68a65f0 100644
--- a/include/media/IMediaRecorder.h
+++ b/include/media/IMediaRecorder.h
@@ -23,7 +23,9 @@
namespace android {
class Surface;
+namespace hardware {
class ICamera;
+}
class ICameraRecordingProxy;
class IMediaRecorderClient;
class IGraphicBufferConsumer;
@@ -34,7 +36,7 @@
public:
DECLARE_META_INTERFACE(MediaRecorder);
- virtual status_t setCamera(const sp<ICamera>& camera,
+ virtual status_t setCamera(const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy) = 0;
virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
virtual status_t setVideoSource(int vs) = 0;
diff --git a/include/media/IOMX.h b/include/media/IOMX.h
index 3f211bf..19c7955 100644
--- a/include/media/IOMX.h
+++ b/include/media/IOMX.h
@@ -121,7 +121,7 @@
// This will set *type to resulting metadata buffer type on OMX error (not on binder error) as
// well as on success.
virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer,
MetadataBufferType *type = NULL) = 0;
@@ -186,6 +186,7 @@
INTERNAL_OPTION_MAX_FPS, // data is float
INTERNAL_OPTION_START_TIME, // data is an int64_t
INTERNAL_OPTION_TIME_LAPSE, // data is an int64_t[2]
+ INTERNAL_OPTION_COLOR_ASPECTS, // data is ColorAspects
};
virtual status_t setInternalOption(
node_id node,
diff --git a/include/media/MediaPlayerInterface.h b/include/media/MediaPlayerInterface.h
index 9e5056f..54862d1 100644
--- a/include/media/MediaPlayerInterface.h
+++ b/include/media/MediaPlayerInterface.h
@@ -101,7 +101,7 @@
virtual status_t getTimestamp(AudioTimestamp &ts) const = 0;
virtual int64_t getPlayedOutDurationUs(int64_t nowUs) const = 0;
virtual status_t getFramesWritten(uint32_t *frameswritten) const = 0;
- virtual int getSessionId() const = 0;
+ virtual audio_session_t getSessionId() const = 0;
virtual audio_stream_type_t getAudioStreamType() const = 0;
virtual uint32_t getSampleRate() const = 0;
diff --git a/include/media/MediaRecorderBase.h b/include/media/MediaRecorderBase.h
index c05d782..5195993 100644
--- a/include/media/MediaRecorderBase.h
+++ b/include/media/MediaRecorderBase.h
@@ -42,7 +42,7 @@
virtual status_t setVideoEncoder(video_encoder ve) = 0;
virtual status_t setVideoSize(int width, int height) = 0;
virtual status_t setVideoFrameRate(int frames_per_second) = 0;
- virtual status_t setCamera(const sp<ICamera>& camera,
+ virtual status_t setCamera(const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy) = 0;
virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface) = 0;
virtual status_t setOutputFile(int fd, int64_t offset, int64_t length) = 0;
diff --git a/include/media/MediaResource.h b/include/media/MediaResource.h
index 20f2cad..1957a45 100644
--- a/include/media/MediaResource.h
+++ b/include/media/MediaResource.h
@@ -23,17 +23,24 @@
namespace android {
-extern const char kResourceSecureCodec[];
-extern const char kResourceNonSecureCodec[];
-extern const char kResourceAudioCodec[];
-extern const char kResourceVideoCodec[];
-extern const char kResourceGraphicMemory[];
-
class MediaResource {
public:
+ enum Type {
+ kUnspecified = 0,
+ kSecureCodec,
+ kNonSecureCodec,
+ kGraphicMemory
+ };
+
+ enum SubType {
+ kUnspecifiedSubType = 0,
+ kAudioCodec,
+ kVideoCodec
+ };
+
MediaResource();
- MediaResource(String8 type, uint64_t value);
- MediaResource(String8 type, String8 subType, uint64_t value);
+ MediaResource(Type type, uint64_t value);
+ MediaResource(Type type, SubType subType, uint64_t value);
void readFromParcel(const Parcel &parcel);
void writeToParcel(Parcel *parcel) const;
@@ -43,11 +50,30 @@
bool operator==(const MediaResource &other) const;
bool operator!=(const MediaResource &other) const;
- String8 mType;
- String8 mSubType;
+ Type mType;
+ SubType mSubType;
uint64_t mValue;
};
+inline static const char *asString(MediaResource::Type i, const char *def = "??") {
+ switch (i) {
+ case MediaResource::kUnspecified: return "unspecified";
+ case MediaResource::kSecureCodec: return "secure-codec";
+ case MediaResource::kNonSecureCodec: return "non-secure-codec";
+ case MediaResource::kGraphicMemory: return "graphic-memory";
+ default: return def;
+ }
+}
+
+inline static const char *asString(MediaResource::SubType i, const char *def = "??") {
+ switch (i) {
+ case MediaResource::kUnspecifiedSubType: return "unspecified";
+ case MediaResource::kAudioCodec: return "audio-codec";
+ case MediaResource::kVideoCodec: return "video-codec";
+ default: return def;
+ }
+}
+
}; // namespace android
#endif // ANDROID_MEDIA_RESOURCE_H
diff --git a/include/media/Visualizer.h b/include/media/Visualizer.h
index 186e018..ec0dad5 100644
--- a/include/media/Visualizer.h
+++ b/include/media/Visualizer.h
@@ -69,7 +69,7 @@
int32_t priority = 0,
effect_callback_t cbf = NULL,
void* user = NULL,
- int sessionId = 0);
+ audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX);
~Visualizer();
diff --git a/include/media/mediaplayer.h b/include/media/mediaplayer.h
index 00df523..cec9d99 100644
--- a/include/media/mediaplayer.h
+++ b/include/media/mediaplayer.h
@@ -246,8 +246,8 @@
status_t invoke(const Parcel& request, Parcel *reply);
status_t setMetadataFilter(const Parcel& filter);
status_t getMetadata(bool update_only, bool apply_filter, Parcel *metadata);
- status_t setAudioSessionId(int sessionId);
- int getAudioSessionId();
+ status_t setAudioSessionId(audio_session_t sessionId);
+ audio_session_t getAudioSessionId();
status_t setAuxEffectSendLevel(float level);
status_t attachAuxEffect(int effectId);
status_t setParameter(int key, const Parcel& request);
@@ -284,7 +284,7 @@
float mRightVolume;
int mVideoWidth;
int mVideoHeight;
- int mAudioSessionId;
+ audio_session_t mAudioSessionId;
float mSendLevel;
struct sockaddr_in mRetransmitEndpoint;
bool mRetransmitEndpointValid;
diff --git a/include/media/mediarecorder.h b/include/media/mediarecorder.h
index 64e3660..c3f39a2 100644
--- a/include/media/mediarecorder.h
+++ b/include/media/mediarecorder.h
@@ -29,12 +29,15 @@
class Surface;
class IMediaRecorder;
-class ICamera;
class ICameraRecordingProxy;
class IGraphicBufferProducer;
struct PersistentSurface;
class Surface;
+namespace hardware {
+class ICamera;
+}
+
typedef void (*media_completion_f)(status_t status, void *cookie);
enum video_source {
@@ -216,7 +219,8 @@
void died();
status_t initCheck();
- status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
+ status_t setCamera(const sp<hardware::ICamera>& camera,
+ const sp<ICameraRecordingProxy>& proxy);
status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
status_t setVideoSource(int vs);
status_t setAudioSource(int as);
diff --git a/include/media/stagefright/ACodec.h b/include/media/stagefright/ACodec.h
index 4489d37..fab92bd 100644
--- a/include/media/stagefright/ACodec.h
+++ b/include/media/stagefright/ACodec.h
@@ -25,6 +25,7 @@
#include <media/stagefright/foundation/AHierarchicalStateMachine.h>
#include <media/stagefright/CodecBase.h>
#include <media/stagefright/FrameRenderTracker.h>
+#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/SkipCutBuffer.h>
#include <utils/NativeHandle.h>
#include <OMX_Audio.h>
@@ -35,7 +36,8 @@
struct ABuffer;
struct MemoryDealer;
-struct DescribeColorFormatParams;
+struct DescribeColorFormat2Params;
+struct DataConverter;
struct ACodec : public AHierarchicalStateMachine, public CodecBase {
ACodec();
@@ -188,8 +190,11 @@
Status mStatus;
unsigned mDequeuedAt;
- sp<ABuffer> mData;
- sp<RefBase> mMemRef;
+ sp<ABuffer> mData; // the client's buffer; if not using data conversion, this is the
+ // codec buffer; otherwise, it is allocated separately
+ sp<RefBase> mMemRef; // and a reference to the IMemory, so it does not go away
+ sp<ABuffer> mCodecData; // the codec's buffer
+ sp<RefBase> mCodecRef; // and a reference to the IMemory
sp<GraphicBuffer> mGraphicBuffer;
sp<NativeHandle> mNativeHandle;
int mFenceFd;
@@ -242,10 +247,15 @@
IOMX::node_id mNode;
sp<MemoryDealer> mDealer[2];
+ bool mUsingNativeWindow;
sp<ANativeWindow> mNativeWindow;
int mNativeWindowUsageBits;
+ sp<AMessage> mConfigFormat;
sp<AMessage> mInputFormat;
sp<AMessage> mOutputFormat;
+
+ // Initial output format + configuration params that is reused as the base for all subsequent
+ // format updates. This will equal to mOutputFormat until the first actual frame is received.
sp<AMessage> mBaseOutputFormat;
FrameRenderTracker mRenderTracker; // render information for buffers rendered by ACodec
@@ -255,7 +265,7 @@
List<sp<AMessage> > mDeferredQueue;
- bool mSentFormat;
+ sp<AMessage> mLastOutputFormat;
bool mIsVideo;
bool mIsEncoder;
bool mFatalError;
@@ -278,6 +288,7 @@
bool mLegacyAdaptiveExperiment;
int32_t mMetadataBuffersToSubmit;
size_t mNumUndequeuedBuffers;
+ sp<DataConverter> mConverter[2];
int64_t mRepeatFrameDelayUs;
int64_t mMaxPtsGapUs;
@@ -290,6 +301,8 @@
bool mTunneled;
+ OMX_INDEXTYPE mDescribeColorAspectsIndex;
+
status_t setCyclicIntraMacroblockRefresh(const sp<AMessage> &msg, int32_t mode);
status_t allocateBuffersOnPort(OMX_U32 portIndex);
status_t freeBuffersOnPort(OMX_U32 portIndex);
@@ -341,16 +354,73 @@
status_t setSupportedOutputFormat(bool getLegacyFlexibleFormat);
status_t setupVideoDecoder(
- const char *mime, const sp<AMessage> &msg, bool usingNativeBuffers);
+ const char *mime, const sp<AMessage> &msg, bool usingNativeBuffers, bool haveSwRenderer,
+ sp<AMessage> &outputformat);
status_t setupVideoEncoder(
- const char *mime, const sp<AMessage> &msg);
+ const char *mime, const sp<AMessage> &msg,
+ sp<AMessage> &outputformat, sp<AMessage> &inputformat);
status_t setVideoFormatOnPort(
OMX_U32 portIndex,
int32_t width, int32_t height,
OMX_VIDEO_CODINGTYPE compressionFormat, float frameRate = -1.0);
+ // gets index or sets it to 0 on error. Returns error from codec.
+ status_t initDescribeColorAspectsIndex();
+
+ // sets |params|. If |readBack| is true, it re-gets them afterwards if set succeeded.
+ // returns the codec error.
+ status_t setCodecColorAspects(DescribeColorAspectsParams ¶ms, bool readBack = false);
+
+ // gets |params|; returns the codec error. |param| should not change on error.
+ status_t getCodecColorAspects(DescribeColorAspectsParams ¶ms);
+
+ // gets dataspace guidance from codec and platform. |params| should be set up with the color
+ // aspects to use. If |tryCodec| is true, the codec is queried first. If it succeeds, we
+ // return OK. Otherwise, we fall back to the platform guidance and return the codec error;
+ // though, we return OK if the codec failed with UNSUPPORTED, as codec guidance is optional.
+ status_t getDataSpace(
+ DescribeColorAspectsParams ¶ms, android_dataspace *dataSpace /* nonnull */,
+ bool tryCodec);
+
+ // sets color aspects for the encoder for certain |width/height| based on |configFormat|, and
+ // set resulting color config into |outputFormat|. If |usingNativeWindow| is true, we use
+ // video defaults if config is unspecified. Returns error from the codec.
+ status_t setColorAspectsForVideoDecoder(
+ int32_t width, int32_t height, bool usingNativeWindow,
+ const sp<AMessage> &configFormat, sp<AMessage> &outputFormat);
+
+ // gets color aspects for the encoder for certain |width/height| based on |configFormat|, and
+ // set resulting color config into |outputFormat|. If |dataSpace| is non-null, it requests
+ // dataspace guidance from the codec and platform and sets it into |dataSpace|. Returns the
+ // error from the codec.
+ status_t getColorAspectsAndDataSpaceForVideoDecoder(
+ int32_t width, int32_t height, const sp<AMessage> &configFormat,
+ sp<AMessage> &outputFormat, android_dataspace *dataSpace);
+
+ // sets color aspects for the video encoder assuming bytebuffer mode for certain |configFormat|
+ // and sets resulting color config into |outputFormat|. For mediarecorder, also set dataspace
+ // into |inputFormat|. Returns the error from the codec.
+ status_t setColorAspectsForVideoEncoder(
+ const sp<AMessage> &configFormat,
+ sp<AMessage> &outputFormat, sp<AMessage> &inputFormat);
+
+ // sets color aspects for the video encoder in surface mode. This basically sets the default
+ // video values for unspecified aspects and sets the dataspace to use in the input format.
+ // Also sets the dataspace into |dataSpace|.
+ // Returns any codec errors during this configuration, except for optional steps.
+ status_t setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(
+ android_dataspace *dataSpace /* nonnull */);
+
+ // gets color aspects for the video encoder input port and sets them into the |format|.
+ // Returns any codec errors.
+ status_t getInputColorAspectsForVideoEncoder(sp<AMessage> &format);
+
+ // updates the encoder output format with |aspects| defaulting to |dataSpace| for
+ // unspecified values.
+ void onDataSpaceChanged(android_dataspace dataSpace, const ColorAspects &aspects);
+
typedef struct drcParams {
int32_t drcCut;
int32_t drcBoost;
@@ -380,7 +450,8 @@
bool encoder, int32_t numChannels, int32_t sampleRate, int32_t compressionLevel);
status_t setupRawAudioFormat(
- OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels);
+ OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels,
+ AudioEncoding encoding = kAudioEncodingPcm16bit);
status_t setPriority(int32_t priority);
status_t setOperatingRate(float rateFloat, bool isVideo);
@@ -435,17 +506,23 @@
void notifyOfRenderedFrames(
bool dropIncomplete = false, FrameRenderTracker::Info *until = NULL);
- void sendFormatChange(const sp<AMessage> &reply);
+ // Pass |expectedFormat| to print a warning if the format differs from it.
+ // Using sp<> instead of const sp<>& because expectedFormat is likely the current mOutputFormat
+ // which will get updated inside.
+ void onOutputFormatChanged(sp<const AMessage> expectedFormat = NULL);
+ void addKeyFormatChangesToRenderBufferNotification(sp<AMessage> ¬ify);
+ void sendFormatChange();
+
status_t getPortFormat(OMX_U32 portIndex, sp<AMessage> ¬ify);
void signalError(
OMX_ERRORTYPE error = OMX_ErrorUndefined,
status_t internalError = UNKNOWN_ERROR);
- static bool describeDefaultColorFormat(DescribeColorFormatParams &describeParams);
+ static bool describeDefaultColorFormat(DescribeColorFormat2Params &describeParams);
static bool describeColorFormat(
const sp<IOMX> &omx, IOMX::node_id node,
- DescribeColorFormatParams &describeParams);
+ DescribeColorFormat2Params &describeParams);
status_t requestIDRFrame();
status_t setParameters(const sp<AMessage> ¶ms);
diff --git a/include/media/stagefright/CameraSource.h b/include/media/stagefright/CameraSource.h
index 3d00d30..c732b41 100644
--- a/include/media/stagefright/CameraSource.h
+++ b/include/media/stagefright/CameraSource.h
@@ -20,7 +20,8 @@
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaSource.h>
-#include <camera/ICamera.h>
+#include <camera/android/hardware/ICamera.h>
+#include <camera/ICameraRecordingProxy.h>
#include <camera/ICameraRecordingProxyListener.h>
#include <camera/CameraParameters.h>
#include <gui/BufferItemConsumer.h>
@@ -78,7 +79,7 @@
*
* @return NULL on error.
*/
- static CameraSource *CreateFromCamera(const sp<ICamera> &camera,
+ static CameraSource *CreateFromCamera(const sp<hardware::ICamera> &camera,
const sp<ICameraRecordingProxy> &proxy,
int32_t cameraId,
const String16& clientName,
@@ -200,7 +201,7 @@
// Time between capture of two frames.
int64_t mTimeBetweenFrameCaptureUs;
- CameraSource(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
+ CameraSource(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid,
Size videoSize, int32_t frameRate,
const sp<IGraphicBufferProducer>& surface,
@@ -219,6 +220,9 @@
virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
const sp<IMemory> &data);
+ // Process a buffer item received in BufferQueueListener.
+ virtual void processBufferQueueFrame(BufferItem& buffer);
+
void releaseCamera();
private:
@@ -244,12 +248,15 @@
* The following variables are used in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
*/
static const size_t kConsumerBufferCount = 8;
+ static const nsecs_t kMemoryBaseAvailableTimeoutNs = 200000000; // 200ms
// Consumer and producer of the buffer queue between this class and camera.
sp<BufferItemConsumer> mVideoBufferConsumer;
sp<IGraphicBufferProducer> mVideoBufferProducer;
// Memory used to send the buffers to encoder, where sp<IMemory> stores VideoNativeMetadata.
sp<IMemoryHeap> mMemoryHeapBase;
List<sp<IMemory>> mMemoryBases;
+ // The condition that will be signaled when there is an entry available in mMemoryBases.
+ Condition mMemoryBaseAvailableCond;
// A mapping from ANativeWindowBuffer sent to encoder to BufferItem received from camera.
// This is protected by mLock.
KeyedVector<ANativeWindowBuffer*, BufferItem> mReceivedBufferItemMap;
@@ -257,15 +264,13 @@
void releaseQueuedFrames();
void releaseOneRecordingFrame(const sp<IMemory>& frame);
- // Process a buffer item received in BufferQueueListener.
- void processBufferQueueFrame(const BufferItem& buffer);
- status_t init(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
+ status_t init(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid,
Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers);
status_t initWithCameraAccess(
- const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
+ const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid,
Size videoSize, int32_t frameRate, bool storeMetaDataInVideoBuffers);
@@ -273,7 +278,7 @@
status_t initBufferQueue(uint32_t width, uint32_t height, uint32_t format,
android_dataspace dataSpace, uint32_t bufferCount);
- status_t isCameraAvailable(const sp<ICamera>& camera,
+ status_t isCameraAvailable(const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId,
const String16& clientName,
diff --git a/include/media/stagefright/CameraSourceTimeLapse.h b/include/media/stagefright/CameraSourceTimeLapse.h
index 1023027..f17ec51 100644
--- a/include/media/stagefright/CameraSourceTimeLapse.h
+++ b/include/media/stagefright/CameraSourceTimeLapse.h
@@ -26,14 +26,17 @@
namespace android {
+namespace hardware {
class ICamera;
+}
+
class IMemory;
class Camera;
class CameraSourceTimeLapse : public CameraSource {
public:
static CameraSourceTimeLapse *CreateFromCamera(
- const sp<ICamera> &camera,
+ const sp<hardware::ICamera> &camera,
const sp<ICameraRecordingProxy> &proxy,
int32_t cameraId,
const String16& clientName,
@@ -110,7 +113,7 @@
status_t mLastReadStatus;
CameraSourceTimeLapse(
- const sp<ICamera> &camera,
+ const sp<hardware::ICamera> &camera,
const sp<ICameraRecordingProxy> &proxy,
int32_t cameraId,
const String16& clientName,
@@ -137,9 +140,15 @@
// In the video camera case calls skipFrameAndModifyTimeStamp() to modify
// timestamp and set mSkipCurrentFrame.
// Then it calls the base CameraSource::dataCallbackTimestamp()
+ // This will be called in VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV and
+ // VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA mode.
virtual void dataCallbackTimestamp(int64_t timestampUs, int32_t msgType,
const sp<IMemory> &data);
+ // Process a buffer item received in CameraSource::BufferQueueListener.
+ // This will be called in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
+ virtual void processBufferQueueFrame(BufferItem& buffer);
+
// Convenience function to fill mLastReadBufferCopy from the just read
// buffer.
void fillLastReadBufferCopy(MediaBuffer& sourceBuffer);
diff --git a/include/media/stagefright/CodecBase.h b/include/media/stagefright/CodecBase.h
index cbf9839..be2835d 100644
--- a/include/media/stagefright/CodecBase.h
+++ b/include/media/stagefright/CodecBase.h
@@ -19,18 +19,25 @@
#define CODEC_BASE_H_
#include <stdint.h>
-#include <media/IOMX.h>
+#define STRINGIFY_ENUMS
+
+#include <media/IOMX.h>
#include <media/MediaCodecInfo.h>
#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <media/hardware/HardwareAPI.h>
+
#include <utils/NativeHandle.h>
+#include <system/graphics.h>
+
namespace android {
struct ABuffer;
struct PersistentSurface;
-struct CodecBase : public AHandler {
+struct CodecBase : public AHandler, /* static */ ColorUtils {
enum {
kWhatFillThisBuffer = 'fill',
kWhatDrainThisBuffer = 'drai',
@@ -48,6 +55,10 @@
kWhatOutputFramesRendered = 'outR',
};
+ enum {
+ kMaxCodecBufferSize = 8192 * 4096 * 4, // 8K RGBA
+ };
+
virtual void setNotificationMessage(const sp<AMessage> &msg) = 0;
virtual void initiateAllocateComponent(const sp<AMessage> &msg) = 0;
@@ -78,7 +89,7 @@
virtual size_t countBuffers() = 0;
virtual IOMX::buffer_id bufferIDAt(size_t index) const = 0;
virtual sp<ABuffer> bufferAt(size_t index) const = 0;
- virtual sp<NativeHandle> handleAt(size_t index) { return NULL; };
+ virtual sp<NativeHandle> handleAt(size_t index) const { return NULL; };
virtual sp<RefBase> memRefAt(size_t index) const { return NULL; }
protected:
@@ -89,6 +100,10 @@
DISALLOW_EVIL_CONSTRUCTORS(PortDescription);
};
+ /*
+ * Codec-related defines
+ */
+
protected:
CodecBase();
virtual ~CodecBase();
diff --git a/include/media/stagefright/MediaCodec.h b/include/media/stagefright/MediaCodec.h
index 2bb1291..fe579b7 100644
--- a/include/media/stagefright/MediaCodec.h
+++ b/include/media/stagefright/MediaCodec.h
@@ -394,7 +394,7 @@
bool isExecuting() const;
uint64_t getGraphicBufferSize();
- void addResource(const String8 &type, const String8 &subtype, uint64_t value);
+ void addResource(MediaResource::Type type, MediaResource::SubType subtype, uint64_t value);
bool hasPendingBuffer(int portIndex);
bool hasPendingBuffer();
diff --git a/include/media/stagefright/MediaCodecSource.h b/include/media/stagefright/MediaCodecSource.h
index e3f3f5e..035e8ae 100644
--- a/include/media/stagefright/MediaCodecSource.h
+++ b/include/media/stagefright/MediaCodecSource.h
@@ -56,7 +56,7 @@
virtual status_t start(MetaData *params = NULL);
virtual status_t stop();
virtual status_t pause();
- virtual sp<MetaData> getFormat() { return mMeta; }
+ virtual sp<MetaData> getFormat();
virtual status_t read(
MediaBuffer **buffer,
const ReadOptions *options = NULL);
@@ -105,7 +105,7 @@
sp<ALooper> mCodecLooper;
sp<AHandlerReflector<MediaCodecSource> > mReflector;
sp<AMessage> mOutputFormat;
- sp<MetaData> mMeta;
+ Mutexed<sp<MetaData>> mMeta;
sp<Puller> mPuller;
sp<MediaCodec> mEncoder;
uint32_t mFlags;
@@ -115,8 +115,8 @@
bool mStopping;
bool mDoMoreWorkPending;
bool mSetEncoderFormat;
- int mEncoderFormat;
- int mEncoderDataSpace;
+ int32_t mEncoderFormat;
+ int32_t mEncoderDataSpace;
sp<AMessage> mEncoderActivityNotify;
sp<IGraphicBufferProducer> mGraphicBufferProducer;
sp<IGraphicBufferConsumer> mGraphicBufferConsumer;
diff --git a/include/media/stagefright/MediaDefs.h b/include/media/stagefright/MediaDefs.h
index e5bcec6..5f2a32d 100644
--- a/include/media/stagefright/MediaDefs.h
+++ b/include/media/stagefright/MediaDefs.h
@@ -68,6 +68,15 @@
extern const char *MEDIA_MIMETYPE_TEXT_CEA_708;
extern const char *MEDIA_MIMETYPE_DATA_TIMED_ID3;
+// These are values exported to JAVA API that need to be in sync with
+// frameworks/base/media/java/android/media/AudioFormat.java. Unfortunately,
+// they are not defined in frameworks/av, so defining them here.
+enum AudioEncoding {
+ kAudioEncodingPcm16bit = 2,
+ kAudioEncodingPcm8bit = 3,
+ kAudioEncodingPcmFloat = 4,
+};
+
} // namespace android
#endif // MEDIA_DEFS_H_
diff --git a/include/media/stagefright/MetaData.h b/include/media/stagefright/MetaData.h
index 91341b8..a9ae49b 100644
--- a/include/media/stagefright/MetaData.h
+++ b/include/media/stagefright/MetaData.h
@@ -49,6 +49,7 @@
kKeyChannelCount = '#chn', // int32_t
kKeyChannelMask = 'chnm', // int32_t
kKeySampleRate = 'srte', // int32_t (audio sampling rate Hz)
+ kKeyPcmEncoding = 'PCMe', // int32_t (audio encoding enum)
kKeyFrameRate = 'frmR', // int32_t (video frame rate fps)
kKeyBitRate = 'brte', // int32_t (bps)
kKeyESDS = 'esds', // raw data
@@ -188,6 +189,19 @@
// Size of NALU length in mkv/mp4
kKeyNalLengthSize = 'nals', // int32_t
+
+ // HDR related
+ kKeyMinLuminance = 'minL', // int32_t, min luminance of the content in cd/m2.
+ kKeyMaxLuminance = 'maxL', // int32_t, max luminance of the content in cd/m2.
+
+ // color aspects
+ kKeyColorRange = 'cRng', // int32_t, color range, value defined by ColorAspects.Range
+ kKeyColorPrimaries = 'cPrm', // int32_t,
+ // color Primaries, value defined by ColorAspects.Primaries
+ kKeyTransferFunction = 'tFun', // int32_t,
+ // transfer Function, value defined by ColorAspects.Transfer.
+ kKeyColorMatrix = 'cMtx', // int32_t,
+ // color Matrix, value defined by ColorAspects.MatrixCoeffs.
};
enum {
diff --git a/include/media/stagefright/NuMediaExtractor.h b/include/media/stagefright/NuMediaExtractor.h
index b8bb824..6606c58 100644
--- a/include/media/stagefright/NuMediaExtractor.h
+++ b/include/media/stagefright/NuMediaExtractor.h
@@ -84,6 +84,10 @@
kIsVorbis = 1,
};
+ enum {
+ kMaxTrackCount = 16384,
+ };
+
struct TrackInfo {
sp<IMediaSource> mSource;
size_t mTrackIndex;
@@ -113,7 +117,7 @@
void releaseTrackSamples();
bool getTotalBitrate(int64_t *bitRate) const;
- void updateDurationAndBitrate();
+ status_t updateDurationAndBitrate();
status_t appendVorbisNumPageSamples(TrackInfo *info, const sp<ABuffer> &buffer);
DISALLOW_EVIL_CONSTRUCTORS(NuMediaExtractor);
diff --git a/include/media/stagefright/foundation/ADebug.h b/include/media/stagefright/foundation/ADebug.h
index 65f415a..564b3f7 100644
--- a/include/media/stagefright/foundation/ADebug.h
+++ b/include/media/stagefright/foundation/ADebug.h
@@ -24,8 +24,9 @@
#include <media/stagefright/foundation/AString.h>
#include <utils/Log.h>
-inline static const char *asString(android::status_t i, const char *def = "??") {
- using namespace android;
+namespace android {
+
+inline static const char *asString(status_t i, const char *def = "??") {
switch (i) {
case NO_ERROR: return "NO_ERROR";
case UNKNOWN_ERROR: return "UNKNOWN_ERROR";
@@ -49,8 +50,6 @@
}
}
-namespace android {
-
#define LITERAL_TO_STRING_INTERNAL(x) #x
#define LITERAL_TO_STRING(x) LITERAL_TO_STRING_INTERNAL(x)
diff --git a/include/media/stagefright/foundation/ALookup.h b/include/media/stagefright/foundation/ALookup.h
new file mode 100644
index 0000000..5a68806
--- /dev/null
+++ b/include/media/stagefright/foundation/ALookup.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef A_LOOKUP_H_
+
+#define A_LOOKUP_H_
+
+#include <utility>
+#include <vector>
+
+namespace android {
+
+template<typename T, typename U>
+struct ALookup {
+ ALookup(std::initializer_list<std::pair<T, U>> list);
+
+ bool lookup(const T& from, U *to) const;
+ bool rlookup(const U& from, T *to) const;
+
+ template<typename V, typename = typename std::enable_if<!std::is_same<T, V>::value>::type>
+ inline bool map(const T& from, V *to) const { return lookup(from, to); }
+
+ template<typename V, typename = typename std::enable_if<!std::is_same<T, V>::value>::type>
+ inline bool map(const V& from, T *to) const { return rlookup(from, to); }
+
+private:
+ std::vector<std::pair<T, U>> mTable;
+};
+
+template<typename T, typename U>
+ALookup<T, U>::ALookup(std::initializer_list<std::pair<T, U>> list)
+ : mTable(list) {
+}
+
+template<typename T, typename U>
+bool ALookup<T, U>::lookup(const T& from, U *to) const {
+ for (auto elem : mTable) {
+ if (elem.first == from) {
+ *to = elem.second;
+ return true;
+ }
+ }
+ return false;
+}
+
+template<typename T, typename U>
+bool ALookup<T, U>::rlookup(const U& from, T *to) const {
+ for (auto elem : mTable) {
+ if (elem.second == from) {
+ *to = elem.first;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace android
+
+#endif // A_UTILS_H_
diff --git a/include/media/stagefright/foundation/AMessage.h b/include/media/stagefright/foundation/AMessage.h
index 83b9444..09d2ad8 100644
--- a/include/media/stagefright/foundation/AMessage.h
+++ b/include/media/stagefright/foundation/AMessage.h
@@ -127,6 +127,15 @@
// their refcount incremented.
sp<AMessage> dup() const;
+ // Performs a shallow or deep comparison of |this| and |other| and returns
+ // an AMessage with the differences.
+ // Warning: RefBase items, i.e. "objects" are _not_ copied but only have
+ // their refcount incremented.
+ // This is true for AMessages that have no corresponding AMessage equivalent in |other|.
+ // (E.g. there is no such key or the type is different.) On the other hand, changes in
+ // the AMessage (or AMessages if deep is |false|) are returned in new objects.
+ sp<AMessage> changesFrom(const sp<const AMessage> &other, bool deep = false) const;
+
AString debugString(int32_t indent = 0) const;
enum Type {
diff --git a/include/media/stagefright/foundation/ColorUtils.h b/include/media/stagefright/foundation/ColorUtils.h
new file mode 100644
index 0000000..f01a210
--- /dev/null
+++ b/include/media/stagefright/foundation/ColorUtils.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef COLOR_UTILS_H_
+
+#define COLOR_UTILS_H_
+
+#include <stdint.h>
+
+#define STRINGIFY_ENUMS
+
+#include <media/stagefright/foundation/AMessage.h>
+
+#include <media/hardware/VideoAPI.h>
+#include <system/graphics.h>
+
+namespace android {
+
+struct ColorUtils {
+ /*
+ * Media-platform color constants. MediaCodec uses (an extended version of) platform-defined
+ * constants that are derived from HAL_DATASPACE, since these are directly exposed to the user.
+ * We extend the values to maintain the richer set of information defined inside media
+ * containers and bitstreams that are not supported by the platform. We also expect vendors
+ * to extend some of these values with vendor-specific values. These are separated into a
+ * vendor-extension section so they won't collide with future platform values.
+ */
+
+#define GET_HAL_ENUM(class, name) HAL_DATASPACE_##class##name
+#define GET_HAL_BITFIELD(class, name) (GET_HAL_ENUM(class, _##name) >> GET_HAL_ENUM(class, _SHIFT))
+
+ enum ColorStandard : uint32_t {
+ kColorStandardUnspecified = GET_HAL_BITFIELD(STANDARD, UNSPECIFIED),
+ kColorStandardBT709 = GET_HAL_BITFIELD(STANDARD, BT709),
+ kColorStandardBT601_625 = GET_HAL_BITFIELD(STANDARD, BT601_625),
+ kColorStandardBT601_625_Unadjusted = GET_HAL_BITFIELD(STANDARD, BT601_625_UNADJUSTED),
+ kColorStandardBT601_525 = GET_HAL_BITFIELD(STANDARD, BT601_525),
+ kColorStandardBT601_525_Unadjusted = GET_HAL_BITFIELD(STANDARD, BT601_525_UNADJUSTED),
+ kColorStandardBT2020 = GET_HAL_BITFIELD(STANDARD, BT2020),
+ kColorStandardBT2020Constant = GET_HAL_BITFIELD(STANDARD, BT2020_CONSTANT_LUMINANCE),
+ kColorStandardBT470M = GET_HAL_BITFIELD(STANDARD, BT470M),
+ kColorStandardFilm = GET_HAL_BITFIELD(STANDARD, FILM),
+ kColorStandardMax = GET_HAL_BITFIELD(STANDARD, MASK),
+
+ /* This marks a section of color-standard values that are not supported by graphics HAL,
+ but track defined color primaries-matrix coefficient combinations in media.
+ These are stable for a given release. */
+ kColorStandardExtendedStart = kColorStandardMax + 1,
+
+ /* This marks a section of color-standard values that are not supported by graphics HAL
+ nor using media defined color primaries or matrix coefficients. These may differ per
+ device. */
+ kColorStandardVendorStart = 0x10000,
+ };
+
+ enum ColorTransfer : uint32_t {
+ kColorTransferUnspecified = GET_HAL_BITFIELD(TRANSFER, UNSPECIFIED),
+ kColorTransferLinear = GET_HAL_BITFIELD(TRANSFER, LINEAR),
+ kColorTransferSRGB = GET_HAL_BITFIELD(TRANSFER, SRGB),
+ kColorTransferSMPTE_170M = GET_HAL_BITFIELD(TRANSFER, SMPTE_170M),
+ kColorTransferGamma22 = GET_HAL_BITFIELD(TRANSFER, GAMMA2_2),
+ kColorTransferGamma28 = GET_HAL_BITFIELD(TRANSFER, GAMMA2_8),
+ kColorTransferST2084 = GET_HAL_BITFIELD(TRANSFER, ST2084),
+ kColorTransferHLG = GET_HAL_BITFIELD(TRANSFER, HLG),
+ kColorTransferMax = GET_HAL_BITFIELD(TRANSFER, MASK),
+
+ /* This marks a section of color-transfer values that are not supported by graphics HAL,
+ but track media-defined color-transfer. These are stable for a given release. */
+ kColorTransferExtendedStart = kColorTransferMax + 1,
+
+ /* This marks a section of color-transfer values that are not supported by graphics HAL
+ nor defined by media. These may differ per device. */
+ kColorTransferVendorStart = 0x10000,
+ };
+
+ enum ColorRange : uint32_t {
+ kColorRangeUnspecified = GET_HAL_BITFIELD(RANGE, UNSPECIFIED),
+ kColorRangeFull = GET_HAL_BITFIELD(RANGE, FULL),
+ kColorRangeLimited = GET_HAL_BITFIELD(RANGE, LIMITED),
+ kColorRangeMax = GET_HAL_BITFIELD(RANGE, MASK),
+
+ /* This marks a section of color-transfer values that are not supported by graphics HAL,
+ but track media-defined color-transfer. These are stable for a given release. */
+ kColorRangeExtendedStart = kColorRangeMax + 1,
+
+ /* This marks a section of color-transfer values that are not supported by graphics HAL
+ nor defined by media. These may differ per device. */
+ kColorRangeVendorStart = 0x10000,
+ };
+
+#undef GET_HAL_BITFIELD
+#undef GET_HAL_ENUM
+
+ /*
+ * Static utilities for codec support
+ */
+
+ // using int32_t for media range/standard/transfers to denote extended ranges
+ // wrap methods change invalid aspects to the Unspecified value
+ static int32_t wrapColorAspectsIntoColorStandard(
+ ColorAspects::Primaries primaries, ColorAspects::MatrixCoeffs coeffs);
+ static int32_t wrapColorAspectsIntoColorRange(ColorAspects::Range range);
+ static int32_t wrapColorAspectsIntoColorTransfer(ColorAspects::Transfer transfer);
+
+ // unwrap methods change invalid aspects to the Other value
+ static status_t unwrapColorAspectsFromColorRange(
+ int32_t range, ColorAspects::Range *aspect);
+ static status_t unwrapColorAspectsFromColorTransfer(
+ int32_t transfer, ColorAspects::Transfer *aspect);
+ static status_t unwrapColorAspectsFromColorStandard(
+ int32_t standard,
+ ColorAspects::Primaries *primaries, ColorAspects::MatrixCoeffs *coeffs);
+
+ static status_t convertPlatformColorAspectsToCodecAspects(
+ int32_t range, int32_t standard, int32_t transfer, ColorAspects &aspects);
+ static status_t convertCodecColorAspectsToPlatformAspects(
+ const ColorAspects &aspects, int32_t *range, int32_t *standard, int32_t *transfer);
+
+ // converts Other values to Unspecified
+ static void convertCodecColorAspectsToIsoAspects(
+ const ColorAspects &aspects,
+ int32_t *primaries, int32_t *transfer, int32_t *coeffs, bool *fullRange);
+ // converts unsupported values to Other
+ static void convertIsoColorAspectsToCodecAspects(
+ int32_t primaries, int32_t transfer, int32_t coeffs, bool fullRange,
+ ColorAspects &aspects);
+
+ // updates Unspecified color aspects to their defaults based on the video size
+ static void setDefaultCodecColorAspectsIfNeeded(
+ ColorAspects &aspects, int32_t width, int32_t height);
+
+ // it returns the closest dataSpace for given color |aspects|. if |mayExpand| is true, it allows
+ // returning a larger dataSpace that contains the color space given by |aspects|, and is better
+ // suited to blending. This requires implicit color space conversion on part of the device.
+ static android_dataspace getDataSpaceForColorAspects(ColorAspects &aspects, bool mayExpand);
+
+ // converts |dataSpace| to a V0 enum, and returns true if dataSpace is an aspect-only value
+ static bool convertDataSpaceToV0(android_dataspace &dataSpace);
+
+ // compares |aspect| to |orig|. Returns |true| if any aspects have changed, except if they
+ // changed to Unspecified value. It also sets the changed values to Unspecified in |aspect|.
+ static bool checkIfAspectsChangedAndUnspecifyThem(
+ ColorAspects &aspects, const ColorAspects &orig, bool usePlatformAspects = false);
+
+ // finds color config in format, defaulting them to 0.
+ static void getColorConfigFromFormat(
+ const sp<AMessage> &format, int *range, int *standard, int *transfer);
+
+ // copies existing color config from |source| to |target|.
+ static void copyColorConfig(const sp<AMessage> &source, sp<AMessage> &target);
+
+ // finds color config in format as ColorAspects, defaulting them to 0.
+ static void getColorAspectsFromFormat(const sp<AMessage> &format, ColorAspects &aspects);
+
+ // writes |aspects| into format. iff |force| is false, Unspecified values are not
+ // written.
+ static void setColorAspectsIntoFormat(
+ const ColorAspects &aspects, sp<AMessage> &format, bool force = false);
+};
+
+inline static const char *asString(android::ColorUtils::ColorStandard i, const char *def = "??") {
+ using namespace android;
+ switch (i) {
+ case ColorUtils::kColorStandardUnspecified: return "Unspecified";
+ case ColorUtils::kColorStandardBT709: return "BT709";
+ case ColorUtils::kColorStandardBT601_625: return "BT601_625";
+ case ColorUtils::kColorStandardBT601_625_Unadjusted: return "BT601_625_Unadjusted";
+ case ColorUtils::kColorStandardBT601_525: return "BT601_525";
+ case ColorUtils::kColorStandardBT601_525_Unadjusted: return "BT601_525_Unadjusted";
+ case ColorUtils::kColorStandardBT2020: return "BT2020";
+ case ColorUtils::kColorStandardBT2020Constant: return "BT2020Constant";
+ case ColorUtils::kColorStandardBT470M: return "BT470M";
+ case ColorUtils::kColorStandardFilm: return "Film";
+ default: return def;
+ }
+}
+
+inline static const char *asString(android::ColorUtils::ColorTransfer i, const char *def = "??") {
+ using namespace android;
+ switch (i) {
+ case ColorUtils::kColorTransferUnspecified: return "Unspecified";
+ case ColorUtils::kColorTransferLinear: return "Linear";
+ case ColorUtils::kColorTransferSRGB: return "SRGB";
+ case ColorUtils::kColorTransferSMPTE_170M: return "SMPTE_170M";
+ case ColorUtils::kColorTransferGamma22: return "Gamma22";
+ case ColorUtils::kColorTransferGamma28: return "Gamma28";
+ case ColorUtils::kColorTransferST2084: return "ST2084";
+ case ColorUtils::kColorTransferHLG: return "HLG";
+ default: return def;
+ }
+}
+
+inline static const char *asString(android::ColorUtils::ColorRange i, const char *def = "??") {
+ using namespace android;
+ switch (i) {
+ case ColorUtils::kColorRangeUnspecified: return "Unspecified";
+ case ColorUtils::kColorRangeFull: return "Full";
+ case ColorUtils::kColorRangeLimited: return "Limited";
+ default: return def;
+ }
+}
+
+} // namespace android
+
+#endif // COLOR_UTILS_H_
+
diff --git a/include/media/stagefright/foundation/Mutexed.h b/include/media/stagefright/foundation/Mutexed.h
index d4fd905..e905d86 100644
--- a/include/media/stagefright/foundation/Mutexed.h
+++ b/include/media/stagefright/foundation/Mutexed.h
@@ -110,6 +110,11 @@
inline T* operator->() const { return mLocked ? &mTreasure : nullptr; }
inline T& operator*() const { return mLocked ? mTreasure : *(T*)nullptr; }
+ // same as *
+ inline T& get() const { return mLocked ? mTreasure : *(T*)nullptr; }
+ // sets structure. this will abort if mLocked is false.
+ inline void set(T& o) const { get() = o; }
+
// Wait on the condition variable using lock. Must be locked.
inline status_t waitForCondition(Condition &cond) { return cond.wait(mLock); }
diff --git a/include/ndk/NdkMediaCodec.h b/include/ndk/NdkMediaCodec.h
index c6035bd..fcb3a99 100644
--- a/include/ndk/NdkMediaCodec.h
+++ b/include/ndk/NdkMediaCodec.h
@@ -178,7 +178,9 @@
typedef enum {
AMEDIACODECRYPTOINFO_MODE_CLEAR = 0,
- AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1
+ AMEDIACODECRYPTOINFO_MODE_AES_CTR = 1,
+ AMEDIACODECRYPTOINFO_MODE_AES_WV = 2,
+ AMEDIACODECRYPTOINFO_MODE_AES_CBC = 3
} cryptoinfo_mode_t;
typedef struct {
diff --git a/include/private/media/AudioTrackShared.h b/include/private/media/AudioTrackShared.h
index ea8a78e..92cf6bb 100644
--- a/include/private/media/AudioTrackShared.h
+++ b/include/private/media/AudioTrackShared.h
@@ -177,6 +177,10 @@
// server write-only, client read
ExtendedTimestampQueue::Shared mExtendedTimestampQueue;
+ // This is set by AudioTrack.setBufferSizeInFrames().
+ // A write will not fill the buffer above this limit.
+ volatile uint32_t mBufferSizeInFrames; // effective size of the buffer
+
public:
volatile int32_t mFlags; // combinations of CBLK_*
@@ -268,6 +272,8 @@
// DEAD_OBJECT Server has died or invalidated, caller should destroy this proxy and re-create.
// -EINTR Call has been interrupted. Look around to see why, and then perhaps try again.
// NO_INIT Shared memory is corrupt.
+ // NOT_ENOUGH_DATA Server has disabled the track because of underrun: restart the track
+ // if still in active state.
// Assertion failure on entry, if buffer == NULL or buffer->mFrameCount == 0.
status_t obtainBuffer(Buffer* buffer, const struct timespec *requested = NULL,
struct timespec *elapsed = NULL);
@@ -312,9 +318,9 @@
return mEpoch;
}
- size_t getBufferSizeInFrames() const { return mBufferSizeInFrames; }
- // See documentation for AudioTrack.setBufferSizeInFrames()
- size_t setBufferSizeInFrames(size_t requestedSize);
+ uint32_t getBufferSizeInFrames() const { return mBufferSizeInFrames; }
+ // See documentation for AudioTrack::setBufferSizeInFrames()
+ uint32_t setBufferSizeInFrames(uint32_t requestedSize);
status_t getTimestamp(ExtendedTimestamp *timestamp) {
if (timestamp == nullptr) {
@@ -329,12 +335,10 @@
mTimestamp.clear();
}
-protected:
- // This is set by AudioTrack.setBufferSizeInFrames().
- // A write will not fill the buffer above this limit.
- size_t mBufferSizeInFrames; // effective size of the buffer
-
private:
+ // This is a copy of mCblk->mBufferSizeInFrames
+ uint32_t mBufferSizeInFrames; // effective size of the buffer
+
Modulo<uint32_t> mEpoch;
// The shared buffer contents referred to by the timestamp observer
@@ -518,6 +522,11 @@
mTimestampMutator.push(timestamp);
}
+ // Get dynamic buffer size from the shared control block.
+ uint32_t getBufferSizeInFrames() const {
+ return android_atomic_acquire_load((int32_t *)&mCblk->mBufferSizeInFrames);
+ }
+
protected:
size_t mAvailToClient; // estimated frames available to client prior to releaseBuffer()
int32_t mFlush; // our copy of cblk->u.mStreaming.mFlush, for streaming output only
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index aa79bc1..5ce1798 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -31,9 +31,29 @@
$(call include-path-for, audio-utils) \
external/sonic \
+# If AUDIOSERVER_MULTILIB in device.mk is non-empty then it is used to control
+# the LOCAL_MULTILIB for all audioserver exclusive libraries.
+# This is relevant for 64 bit architectures where either or both
+# 32 and 64 bit libraries may be built.
+#
+# AUDIOSERVER_MULTILIB may be set as follows:
+# 32 to build 32 bit audioserver libraries and 32 bit audioserver.
+# 64 to build 64 bit audioserver libraries and 64 bit audioserver.
+# both to build both 32 bit and 64 bit libraries,
+# and use primary target architecture (32 or 64) for audioserver.
+# first to build libraries and audioserver for the primary target architecture only.
+# <empty> to build both 32 and 64 bit libraries and 32 bit audioserver.
+
+ifeq ($(strip $(AUDIOSERVER_MULTILIB)),)
+LOCAL_MULTILIB := 32
+else
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+endif
+
LOCAL_MODULE := audioserver
-LOCAL_32_BIT_ONLY := true
LOCAL_INIT_RC := audioserver.rc
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_EXECUTABLE)
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index 698da1f..4a7a988 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -56,6 +56,7 @@
sp<ProcessState> proc(ProcessState::self());
MediaLogService::instantiate();
ProcessState::self()->startThreadPool();
+ IPCThreadState::self()->joinThreadPool();
for (;;) {
siginfo_t info;
int ret = waitid(P_PID, childPid, &info, WEXITED | WSTOPPED | WCONTINUED);
diff --git a/media/img_utils/include/img_utils/TagDefinitions.h b/media/img_utils/include/img_utils/TagDefinitions.h
index e9a7480..1cc9866 100644
--- a/media/img_utils/include/img_utils/TagDefinitions.h
+++ b/media/img_utils/include/img_utils/TagDefinitions.h
@@ -193,6 +193,18 @@
};
/**
+ * Convenience values for tags with enumerated values
+ */
+
+enum {
+ TAG_ORIENTATION_NORMAL = 1,
+ TAG_ORIENTATION_ROTATE_180 = 3,
+ TAG_ORIENTATION_ROTATE_90 = 6,
+ TAG_ORIENTATION_ROTATE_270 = 8,
+ TAG_ORIENTATION_UNKNOWN = 9
+};
+
+/**
* TIFF_EP_TAG_DEFINITIONS contains tags defined in the TIFF EP spec
*/
const TagDefinition_t TIFF_EP_TAG_DEFINITIONS[] = {
@@ -731,7 +743,7 @@
{ // BlackLevel
"BlackLevel",
0xC61Au,
- LONG,
+ RATIONAL,
RAW_IFD,
0,
UNDEFINED_ENDIAN
diff --git a/media/img_utils/src/DngUtils.cpp b/media/img_utils/src/DngUtils.cpp
index 9473dce..9dc5f05 100644
--- a/media/img_utils/src/DngUtils.cpp
+++ b/media/img_utils/src/DngUtils.cpp
@@ -18,6 +18,7 @@
#include <inttypes.h>
+#include <vector>
#include <math.h>
namespace android {
@@ -63,10 +64,17 @@
double spacingV = 1.0 / lsmHeight;
double spacingH = 1.0 / lsmWidth;
- float redMap[lsmWidth * lsmHeight];
- float greenEvenMap[lsmWidth * lsmHeight];
- float greenOddMap[lsmWidth * lsmHeight];
- float blueMap[lsmWidth * lsmHeight];
+ std::vector<float> redMapVector(lsmWidth * lsmHeight);
+ float *redMap = redMapVector.data();
+
+ std::vector<float> greenEvenMapVector(lsmWidth * lsmHeight);
+ float *greenEvenMap = greenEvenMapVector.data();
+
+ std::vector<float> greenOddMapVector(lsmWidth * lsmHeight);
+ float *greenOddMap = greenOddMapVector.data();
+
+ std::vector<float> blueMapVector(lsmWidth * lsmHeight);
+ float *blueMap = blueMapVector.data();
size_t lsmMapSize = lsmWidth * lsmHeight * 4;
diff --git a/media/img_utils/src/NOTICE b/media/img_utils/src/NOTICE
new file mode 100644
index 0000000..90cea57
--- /dev/null
+++ b/media/img_utils/src/NOTICE
@@ -0,0 +1,2 @@
+This product includes DNG technology under license by Adobe Systems
+Incorporated.
diff --git a/media/libcpustats/Android.mk b/media/libcpustats/Android.mk
index ee283a6..57fe527 100644
--- a/media/libcpustats/Android.mk
+++ b/media/libcpustats/Android.mk
@@ -8,6 +8,6 @@
LOCAL_MODULE := libcpustats
-LOCAL_CFLAGS := -std=gnu++11 -Werror
+LOCAL_CFLAGS := -std=gnu++11 -Werror -Wall
include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libeffects/downmix/EffectDownmix.c b/media/libeffects/downmix/EffectDownmix.c
index 4a41037..9823c55 100644
--- a/media/libeffects/downmix/EffectDownmix.c
+++ b/media/libeffects/downmix/EffectDownmix.c
@@ -141,6 +141,37 @@
}
#endif
+static bool Downmix_validChannelMask(uint32_t mask)
+{
+ if (!mask) {
+ return false;
+ }
+ // check against unsupported channels
+ if (mask & kUnsupported) {
+ ALOGE("Unsupported channels (top or front left/right of center)");
+ return false;
+ }
+ // verify has FL/FR
+ if ((mask & AUDIO_CHANNEL_OUT_STEREO) != AUDIO_CHANNEL_OUT_STEREO) {
+ ALOGE("Front channels must be present");
+ return false;
+ }
+ // verify uses SIDE as a pair (ok if not using SIDE at all)
+ if ((mask & kSides) != 0) {
+ if ((mask & kSides) != kSides) {
+ ALOGE("Side channels must be used as a pair");
+ return false;
+ }
+ }
+ // verify uses BACK as a pair (ok if not using BACK at all)
+ if ((mask & kBacks) != 0) {
+ if ((mask & kBacks) != kBacks) {
+ ALOGE("Back channels must be used as a pair");
+ return false;
+ }
+ }
+ return true;
+}
/*----------------------------------------------------------------------------
* Effect API implementation
@@ -624,9 +655,10 @@
pDownmixer->apply_volume_correction = false;
pDownmixer->input_channel_count = 8; // matches default input of AUDIO_CHANNEL_OUT_7POINT1
} else {
- // when configuring the effect, do not allow a blank channel mask
- if (pConfig->inputCfg.channels == 0) {
- ALOGE("Downmix_Configure error: input channel mask can't be 0");
+ // when configuring the effect, do not allow a blank or unsupported channel mask
+ if (!Downmix_validChannelMask(pConfig->inputCfg.channels)) {
+ ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
+ pConfig->inputCfg.channels);
return -EINVAL;
}
pDownmixer->input_channel_count =
@@ -969,34 +1001,13 @@
*/
bool Downmix_foldGeneric(
uint32_t mask, int16_t *pSrc, int16_t*pDst, size_t numFrames, bool accumulate) {
- // check against unsupported channels
- if (mask & kUnsupported) {
- ALOGE("Unsupported channels (top or front left/right of center)");
+
+ if (!Downmix_validChannelMask(mask)) {
return false;
}
- // verify has FL/FR
- if ((mask & AUDIO_CHANNEL_OUT_STEREO) != AUDIO_CHANNEL_OUT_STEREO) {
- ALOGE("Front channels must be present");
- return false;
- }
- // verify uses SIDE as a pair (ok if not using SIDE at all)
- bool hasSides = false;
- if ((mask & kSides) != 0) {
- if ((mask & kSides) != kSides) {
- ALOGE("Side channels must be used as a pair");
- return false;
- }
- hasSides = true;
- }
- // verify uses BACK as a pair (ok if not using BACK at all)
- bool hasBacks = false;
- if ((mask & kBacks) != 0) {
- if ((mask & kBacks) != kBacks) {
- ALOGE("Back channels must be used as a pair");
- return false;
- }
- hasBacks = true;
- }
+
+ const bool hasSides = (mask & kSides) != 0;
+ const bool hasBacks = (mask & kBacks) != 0;
const int numChan = audio_channel_count_from_out_mask(mask);
const bool hasFC = ((mask & AUDIO_CHANNEL_OUT_FRONT_CENTER) == AUDIO_CHANNEL_OUT_FRONT_CENTER);
diff --git a/media/libmedia/Android.mk b/media/libmedia/Android.mk
index 479ccbb..63f9ed7 100644
--- a/media/libmedia/Android.mk
+++ b/media/libmedia/Android.mk
@@ -7,7 +7,7 @@
LOCAL_MODULE:= libmedia_helper
LOCAL_MODULE_TAGS := optional
-LOCAL_C_FLAGS += -Werror -Wno-error=deprecated-declarations -Wall
+LOCAL_CFLAGS += -Werror -Wno-error=deprecated-declarations -Wall
LOCAL_CLANG := true
include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libmedia/AudioEffect.cpp b/media/libmedia/AudioEffect.cpp
index ff82544..590952f 100644
--- a/media/libmedia/AudioEffect.cpp
+++ b/media/libmedia/AudioEffect.cpp
@@ -47,7 +47,7 @@
int32_t priority,
effect_callback_t cbf,
void* user,
- int sessionId,
+ audio_session_t sessionId,
audio_io_handle_t io
)
: mStatus(NO_INIT), mOpPackageName(opPackageName)
@@ -61,7 +61,7 @@
int32_t priority,
effect_callback_t cbf,
void* user,
- int sessionId,
+ audio_session_t sessionId,
audio_io_handle_t io
)
: mStatus(NO_INIT), mOpPackageName(opPackageName)
@@ -93,7 +93,7 @@
int32_t priority,
effect_callback_t cbf,
void* user,
- int sessionId,
+ audio_session_t sessionId,
audio_io_handle_t io)
{
sp<IEffect> iEffect;
@@ -433,7 +433,7 @@
}
-status_t AudioEffect::queryDefaultPreProcessing(int audioSession,
+status_t AudioEffect::queryDefaultPreProcessing(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count)
{
diff --git a/media/libmedia/AudioPolicy.cpp b/media/libmedia/AudioPolicy.cpp
index ea22b6c..d1f7525 100644
--- a/media/libmedia/AudioPolicy.cpp
+++ b/media/libmedia/AudioPolicy.cpp
@@ -67,7 +67,8 @@
mFormat.channel_mask = (audio_channel_mask_t)parcel->readInt32();
mFormat.format = (audio_format_t)parcel->readInt32();
mRouteFlags = parcel->readInt32();
- mRegistrationId = parcel->readString8();
+ mDeviceType = (audio_devices_t) parcel->readInt32();
+ mDeviceAddress = parcel->readString8();
mCbFlags = (uint32_t)parcel->readInt32();
size_t size = (size_t)parcel->readInt32();
if (size > MAX_CRITERIA_PER_MIX) {
@@ -89,7 +90,8 @@
parcel->writeInt32(mFormat.channel_mask);
parcel->writeInt32(mFormat.format);
parcel->writeInt32(mRouteFlags);
- parcel->writeString8(mRegistrationId);
+ parcel->writeInt32(mDeviceType);
+ parcel->writeString8(mDeviceAddress);
parcel->writeInt32(mCbFlags);
size_t size = mCriteria.size();
if (size > MAX_CRITERIA_PER_MIX) {
diff --git a/media/libmedia/AudioRecord.cpp b/media/libmedia/AudioRecord.cpp
index ec57d96..2976a5c 100644
--- a/media/libmedia/AudioRecord.cpp
+++ b/media/libmedia/AudioRecord.cpp
@@ -66,7 +66,7 @@
// ---------------------------------------------------------------------------
AudioRecord::AudioRecord(const String16 &opPackageName)
- : mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
+ : mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName), mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
{
@@ -82,13 +82,14 @@
callback_t cbf,
void* user,
uint32_t notificationFrames,
- int sessionId,
+ audio_session_t sessionId,
transfer_type transferType,
audio_input_flags_t flags,
int uid,
pid_t pid,
const audio_attributes_t* pAttributes)
- : mStatus(NO_INIT),
+ : mActive(false),
+ mStatus(NO_INIT),
mOpPackageName(opPackageName),
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
@@ -139,7 +140,7 @@
void* user,
uint32_t notificationFrames,
bool threadCanCallJava,
- int sessionId,
+ audio_session_t sessionId,
transfer_type transferType,
audio_input_flags_t flags,
int uid,
@@ -191,10 +192,6 @@
mAttributes.source, mAttributes.flags, mAttributes.tags);
}
- if (sampleRate == 0) {
- ALOGE("Invalid sample rate %u", sampleRate);
- return BAD_VALUE;
- }
mSampleRate = sampleRate;
// these below should probably come from the audioFlinger too...
@@ -231,7 +228,7 @@
// mNotificationFramesAct is initialized in openRecord_l
if (sessionId == AUDIO_SESSION_ALLOCATE) {
- mSessionId = AudioSystem::newAudioUniqueId();
+ mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
} else {
mSessionId = sessionId;
}
@@ -250,7 +247,7 @@
mClientPid = pid;
}
- mFlags = flags;
+ mOrigFlags = mFlags = flags;
mCbf = cbf;
if (cbf != NULL) {
@@ -272,10 +269,9 @@
}
mStatus = NO_ERROR;
- mActive = false;
mUserData = user;
// TODO: add audio hardware input latency here
- mLatency = (1000*mFrameCount) / sampleRate;
+ mLatency = (1000 * mFrameCount) / mSampleRate;
mMarkerPosition = 0;
mMarkerReached = false;
mNewPosition = 0;
@@ -292,7 +288,7 @@
// -------------------------------------------------------------------------
-status_t AudioRecord::start(AudioSystem::sync_event_t event, int triggerSession)
+status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
{
ALOGV("start, sync event %d trigger session %d", event, triggerSession);
@@ -517,28 +513,85 @@
return NO_INIT;
}
- // Fast tracks must be at the primary _output_ [sic] sampling rate,
- // because there is currently no concept of a primary input sampling rate
- uint32_t afSampleRate = AudioSystem::getPrimaryOutputSamplingRate();
- if (afSampleRate == 0) {
- ALOGW("getPrimaryOutputSamplingRate failed");
+ if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
+ AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
+ }
+ audio_io_handle_t input;
+
+ // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
+ // After fast request is denied, we will request again if IAudioRecord is re-created.
+
+ status_t status;
+
+ // Not a conventional loop, but a retry loop for at most two iterations total.
+ // Try first maybe with FAST flag then try again without FAST flag if that fails.
+ // Exits loop normally via a return at the bottom, or with error via a break.
+ // The sp<> references will be dropped when re-entering scope.
+ // The lack of indentation is deliberate, to reduce code churn and ease merges.
+ for (;;) {
+
+ status = AudioSystem::getInputForAttr(&mAttributes, &input,
+ mSessionId,
+ // FIXME compare to AudioTrack
+ IPCThreadState::self()->getCallingUid(),
+ mSampleRate, mFormat, mChannelMask,
+ mFlags, mSelectedDeviceId);
+
+ if (status != NO_ERROR || input == AUDIO_IO_HANDLE_NONE) {
+ ALOGE("Could not get audio input for session %d, record source %d, sample rate %u, "
+ "format %#x, channel mask %#x, flags %#x",
+ mSessionId, mAttributes.source, mSampleRate, mFormat, mChannelMask, mFlags);
+ return BAD_VALUE;
+ }
+
+ // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
+ // we must release it ourselves if anything goes wrong.
+
+#if 0
+ size_t afFrameCount;
+ status = AudioSystem::getFrameCount(input, &afFrameCount);
+ if (status != NO_ERROR) {
+ ALOGE("getFrameCount(input=%d) status %d", input, status);
+ break;
+ }
+#endif
+
+ uint32_t afSampleRate;
+ status = AudioSystem::getSamplingRate(input, &afSampleRate);
+ if (status != NO_ERROR) {
+ ALOGE("getSamplingRate(input=%d) status %d", input, status);
+ break;
+ }
+ if (mSampleRate == 0) {
+ mSampleRate = afSampleRate;
}
// Client can only express a preference for FAST. Server will perform additional tests.
- if ((mFlags & AUDIO_INPUT_FLAG_FAST) && !((
+ if (mFlags & AUDIO_INPUT_FLAG_FAST) {
+ bool useCaseAllowed =
// either of these use cases:
// use case 1: callback transfer mode
(mTransfer == TRANSFER_CALLBACK) ||
// use case 2: obtain/release mode
- (mTransfer == TRANSFER_OBTAIN)) &&
- // matching sample rate
- (mSampleRate == afSampleRate))) {
- ALOGW("AUDIO_INPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, primary %u Hz",
+ (mTransfer == TRANSFER_OBTAIN);
+ // sample rates must also match
+ bool fastAllowed = useCaseAllowed && (mSampleRate == afSampleRate);
+ if (!fastAllowed) {
+ ALOGW("AUDIO_INPUT_FLAG_FAST denied by client; transfer %d, "
+ "track %u Hz, input %u Hz",
mTransfer, mSampleRate, afSampleRate);
- // once denied, do not request again if IAudioRecord is re-created
- mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
+ mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
+ AUDIO_INPUT_FLAG_RAW));
+ AudioSystem::releaseInput(input, mSessionId);
+ continue; // retry
+ }
}
+ // The notification frame count is the period between callbacks, as suggested by the client
+ // but moderated by the server. For record, the calculations are done entirely on server side.
+ size_t notificationFrames = mNotificationFramesReq;
+ size_t frameCount = mReqFrameCount;
+
IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
pid_t tid = -1;
@@ -549,34 +602,9 @@
}
}
- if (mDeviceCallback != 0 && mInput != AUDIO_IO_HANDLE_NONE) {
- AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mInput);
- }
-
- audio_io_handle_t input;
- status_t status = AudioSystem::getInputForAttr(&mAttributes, &input,
- (audio_session_t)mSessionId,
- IPCThreadState::self()->getCallingUid(),
- mSampleRate, mFormat, mChannelMask,
- mFlags, mSelectedDeviceId);
-
- if (status != NO_ERROR) {
- ALOGE("Could not get audio input for record source %d, sample rate %u, format %#x, "
- "channel mask %#x, session %d, flags %#x",
- mAttributes.source, mSampleRate, mFormat, mChannelMask, mSessionId, mFlags);
- return BAD_VALUE;
- }
- {
- // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
- // we must release it ourselves if anything goes wrong.
-
- size_t frameCount = mReqFrameCount;
size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
// but we will still need the original value also
- int originalSessionId = mSessionId;
-
- // The notification frame count is the period between callbacks, as suggested by the server.
- size_t notificationFrames = mNotificationFramesReq;
+ audio_session_t originalSessionId = mSessionId;
sp<IMemory> iMem; // for cblk
sp<IMemory> bufferMem;
@@ -599,13 +627,26 @@
if (status != NO_ERROR) {
ALOGE("AudioFlinger could not create record track, status: %d", status);
- goto release;
+ break;
}
ALOG_ASSERT(record != 0);
// AudioFlinger now owns the reference to the I/O handle,
// so we are no longer responsible for releasing it.
+ mAwaitBoost = false;
+ if (mFlags & AUDIO_INPUT_FLAG_FAST) {
+ if (trackFlags & IAudioFlinger::TRACK_FAST) {
+ ALOGI("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu", frameCount);
+ mAwaitBoost = true;
+ } else {
+ ALOGW("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
+ mFlags = (audio_input_flags_t) (mFlags & ~(AUDIO_INPUT_FLAG_FAST |
+ AUDIO_INPUT_FLAG_RAW));
+ continue; // retry
+ }
+ }
+
if (iMem == 0) {
ALOGE("Could not get control block");
return NO_INIT;
@@ -648,23 +689,13 @@
}
frameCount = temp;
- mAwaitBoost = false;
- if (mFlags & AUDIO_INPUT_FLAG_FAST) {
- if (trackFlags & IAudioFlinger::TRACK_FAST) {
- ALOGV("AUDIO_INPUT_FLAG_FAST successful; frameCount %zu", frameCount);
- mAwaitBoost = true;
- } else {
- ALOGV("AUDIO_INPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
- // once denied, do not request again if IAudioRecord is re-created
- mFlags = (audio_input_flags_t) (mFlags & ~AUDIO_INPUT_FLAG_FAST);
- }
+ // Make sure that application is notified with sufficient margin before overrun.
+ // The computation is done on server side.
+ if (mNotificationFramesReq > 0 && notificationFrames != mNotificationFramesReq) {
+ ALOGW("Server adjusted notificationFrames from %u to %zu for frameCount %zu",
+ mNotificationFramesReq, notificationFrames, frameCount);
}
-
- // Make sure that application is notified with sufficient margin before overrun
- if (notificationFrames == 0 || notificationFrames > frameCount) {
- ALOGW("Received notificationFrames %zu for frameCount %zu", notificationFrames, frameCount);
- }
- mNotificationFramesAct = notificationFrames;
+ mNotificationFramesAct = (uint32_t) notificationFrames;
// We retain a copy of the I/O handle, but don't own the reference
mInput = input;
@@ -690,10 +721,13 @@
}
return NO_ERROR;
+
+ // End of retry loop.
+ // The lack of indentation is deliberate, to reduce code churn and ease merges.
}
-release:
- AudioSystem::releaseInput(input, (audio_session_t)mSessionId);
+// Arrive here on error, via a break
+ AudioSystem::releaseInput(input, mSessionId);
if (status == NO_ERROR) {
status = NO_INIT;
}
@@ -1123,6 +1157,8 @@
ALOGW("dead IAudioRecord, creating a new one from %s()", from);
++mSequence;
+ mFlags = mOrigFlags;
+
// if the new IAudioRecord is created, openRecord_l() will modify the
// following member variables: mAudioRecord, mCblkMemory, mCblk, mBufferMemory.
// It will also delete the strong references on previous IAudioRecord and IMemory
@@ -1133,7 +1169,7 @@
if (mActive) {
// callback thread or sync event hasn't changed
// FIXME this fails if we have a new AudioFlinger instance
- result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, 0);
+ result = mAudioRecord->start(AudioSystem::SYNC_EVENT_SAME, AUDIO_SESSION_NONE);
}
mFramesReadServerOffset = mFramesRead; // server resets to zero so we need an offset.
}
diff --git a/media/libmedia/AudioSystem.cpp b/media/libmedia/AudioSystem.cpp
index 1607af5..3a5dee6 100644
--- a/media/libmedia/AudioSystem.cpp
+++ b/media/libmedia/AudioSystem.cpp
@@ -264,25 +264,23 @@
return getSamplingRate(output, samplingRate);
}
-status_t AudioSystem::getSamplingRate(audio_io_handle_t output,
+status_t AudioSystem::getSamplingRate(audio_io_handle_t ioHandle,
uint32_t* samplingRate)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- sp<AudioIoDescriptor> outputDesc = getIoDescriptor(output);
- if (outputDesc == 0) {
- ALOGV("getOutputSamplingRate() no output descriptor for output %d in gOutputs", output);
- *samplingRate = af->sampleRate(output);
+ sp<AudioIoDescriptor> desc = getIoDescriptor(ioHandle);
+ if (desc == 0) {
+ *samplingRate = af->sampleRate(ioHandle);
} else {
- ALOGV("getOutputSamplingRate() reading from output desc");
- *samplingRate = outputDesc->mSamplingRate;
+ *samplingRate = desc->mSamplingRate;
}
if (*samplingRate == 0) {
- ALOGE("AudioSystem::getSamplingRate failed for output %d", output);
+ ALOGE("AudioSystem::getSamplingRate failed for ioHandle %d", ioHandle);
return BAD_VALUE;
}
- ALOGV("getSamplingRate() output %d, sampling rate %u", output, *samplingRate);
+ ALOGV("getSamplingRate() ioHandle %d, sampling rate %u", ioHandle, *samplingRate);
return NO_ERROR;
}
@@ -303,23 +301,23 @@
return getFrameCount(output, frameCount);
}
-status_t AudioSystem::getFrameCount(audio_io_handle_t output,
+status_t AudioSystem::getFrameCount(audio_io_handle_t ioHandle,
size_t* frameCount)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
- sp<AudioIoDescriptor> outputDesc = getIoDescriptor(output);
- if (outputDesc == 0) {
- *frameCount = af->frameCount(output);
+ sp<AudioIoDescriptor> desc = getIoDescriptor(ioHandle);
+ if (desc == 0) {
+ *frameCount = af->frameCount(ioHandle);
} else {
- *frameCount = outputDesc->mFrameCount;
+ *frameCount = desc->mFrameCount;
}
if (*frameCount == 0) {
- ALOGE("AudioSystem::getFrameCount failed for output %d", output);
+ ALOGE("AudioSystem::getFrameCount failed for ioHandle %d", ioHandle);
return BAD_VALUE;
}
- ALOGV("getFrameCount() output %d, frameCount %zu", output, *frameCount);
+ ALOGV("getFrameCount() ioHandle %d, frameCount %zu", ioHandle, *frameCount);
return NO_ERROR;
}
@@ -394,14 +392,14 @@
return result;
}
-audio_unique_id_t AudioSystem::newAudioUniqueId()
+audio_unique_id_t AudioSystem::newAudioUniqueId(audio_unique_id_use_t use)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return AUDIO_UNIQUE_ID_ALLOCATE;
- return af->newAudioUniqueId();
+ return af->newAudioUniqueId(use);
}
-void AudioSystem::acquireAudioSessionId(int audioSession, pid_t pid)
+void AudioSystem::acquireAudioSessionId(audio_session_t audioSession, pid_t pid)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af != 0) {
@@ -409,7 +407,7 @@
}
}
-void AudioSystem::releaseAudioSessionId(int audioSession, pid_t pid)
+void AudioSystem::releaseAudioSessionId(audio_session_t audioSession, pid_t pid)
{
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af != 0) {
@@ -904,7 +902,7 @@
status_t AudioSystem::registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
uint32_t strategy,
- int session,
+ audio_session_t session,
int id)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
@@ -1246,7 +1244,8 @@
void AudioSystem::AudioPolicyServiceClient::onRecordingConfigurationUpdate(
int event, audio_session_t session, audio_source_t source,
- const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig) {
+ const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
+ audio_patch_handle_t patchHandle) {
record_config_callback cb = NULL;
{
Mutex::Autolock _l(AudioSystem::gLock);
@@ -1254,7 +1253,7 @@
}
if (cb != NULL) {
- cb(event, session, source, clientConfig, deviceConfig);
+ cb(event, session, source, clientConfig, deviceConfig, patchHandle);
}
}
diff --git a/media/libmedia/AudioTrack.cpp b/media/libmedia/AudioTrack.cpp
index f0074b6..e70c611 100644
--- a/media/libmedia/AudioTrack.cpp
+++ b/media/libmedia/AudioTrack.cpp
@@ -163,6 +163,7 @@
AudioTrack::AudioTrack()
: mStatus(NO_INIT),
+ mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
@@ -184,7 +185,7 @@
callback_t cbf,
void* user,
uint32_t notificationFrames,
- int sessionId,
+ audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
int uid,
@@ -192,6 +193,7 @@
const audio_attributes_t* pAttributes,
bool doNotReconnect)
: mStatus(NO_INIT),
+ mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
@@ -213,7 +215,7 @@
callback_t cbf,
void* user,
uint32_t notificationFrames,
- int sessionId,
+ audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
int uid,
@@ -221,6 +223,7 @@
const audio_attributes_t* pAttributes,
bool doNotReconnect)
: mStatus(NO_INIT),
+ mState(STATE_STOPPED),
mPreviousPriority(ANDROID_PRIORITY_NORMAL),
mPreviousSchedulingGroup(SP_DEFAULT),
mPausedPosition(0),
@@ -272,7 +275,7 @@
uint32_t notificationFrames,
const sp<IMemory>& sharedBuffer,
bool threadCanCallJava,
- int sessionId,
+ audio_session_t sessionId,
transfer_type transferType,
const audio_offload_info_t *offloadInfo,
int uid,
@@ -438,7 +441,7 @@
mNotificationFramesReq = notificationFrames;
mNotificationFramesAct = 0;
if (sessionId == AUDIO_SESSION_ALLOCATE) {
- mSessionId = AudioSystem::newAudioUniqueId();
+ mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
} else {
mSessionId = sessionId;
}
@@ -455,7 +458,7 @@
mClientPid = pid;
}
mAuxEffectId = 0;
- mFlags = flags;
+ mOrigFlags = mFlags = flags;
mCbf = cbf;
if (cbf != NULL) {
@@ -477,7 +480,6 @@
}
mStatus = NO_ERROR;
- mState = STATE_STOPPED;
mUserData = user;
mLoopCount = 0;
mLoopStart = 0;
@@ -552,19 +554,6 @@
mNewPosition = mPosition + mUpdatePeriod;
int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
- sp<AudioTrackThread> t = mAudioTrackThread;
- if (t != 0) {
- if (previousState == STATE_STOPPING) {
- mProxy->interrupt();
- } else {
- t->resume();
- }
- } else {
- mPreviousPriority = getpriority(PRIO_PROCESS, 0);
- get_sched_policy(0, &mPreviousSchedulingGroup);
- androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
- }
-
status_t status = NO_ERROR;
if (!(flags & CBLK_INVALID)) {
status = mAudioTrack->start();
@@ -576,7 +565,21 @@
status = restoreTrack_l("start");
}
- if (status != NO_ERROR) {
+ // resume or pause the callback thread as needed.
+ sp<AudioTrackThread> t = mAudioTrackThread;
+ if (status == NO_ERROR) {
+ if (t != 0) {
+ if (previousState == STATE_STOPPING) {
+ mProxy->interrupt();
+ } else {
+ t->resume();
+ }
+ } else {
+ mPreviousPriority = getpriority(PRIO_PROCESS, 0);
+ get_sched_policy(0, &mPreviousSchedulingGroup);
+ androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
+ }
+ } else {
ALOGE("start() status %d", status);
mState = previousState;
if (t != 0) {
@@ -860,7 +863,7 @@
if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
return NO_INIT;
}
- return mProxy->getBufferSizeInFrames();
+ return (ssize_t) mProxy->getBufferSizeInFrames();
}
ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
@@ -873,10 +876,7 @@
if (!audio_is_linear_pcm(mFormat)) {
return INVALID_OPERATION;
}
- // TODO also need to inform the server side (through mAudioTrack) that
- // the buffer count is reduced, otherwise the track may never start
- // because the server thinks it is never filled.
- return mProxy->setBufferSizeInFrames(bufferSizeInFrames);
+ return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
}
status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
@@ -1016,7 +1016,11 @@
}
AutoMutex lock(mLock);
- if (isOffloadedOrDirect_l()) {
+ // FIXME: offloaded and direct tracks call into the HAL for render positions
+ // for compressed/synced data; however, we use proxy position for pure linear pcm data
+ // as we do not know the capability of the HAL for pcm position support and standby.
+ // There may be some latency differences between the HAL position and the proxy position.
+ if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
uint32_t dspFrames = 0;
if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
@@ -1153,9 +1157,12 @@
audio_stream_type_t streamType = mStreamType;
audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
+ // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
+ // After fast request is denied, we will request again if IAudioTrack is re-created.
+
status_t status;
status = AudioSystem::getOutputForAttr(attr, &output,
- (audio_session_t)mSessionId, &streamType, mClientUid,
+ mSessionId, &streamType, mClientUid,
mSampleRate, mFormat, mChannelMask,
mFlags, mSelectedDeviceId, mOffloadInfo);
@@ -1192,6 +1199,7 @@
mSampleRate = mAfSampleRate;
mOriginalSampleRate = mAfSampleRate;
}
+
// Client can only express a preference for FAST. Server will perform additional tests.
if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
bool useCaseAllowed =
@@ -1207,21 +1215,13 @@
// sample rates must also match
bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate);
if (!fastAllowed) {
- ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d,"
+ ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, "
"track %u Hz, output %u Hz",
mTransfer, mSampleRate, mAfSampleRate);
- // once denied, do not request again if IAudioTrack is re-created
mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
}
}
- // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
- // n = 1 fast track with single buffering; nBuffering is ignored
- // n = 2 fast track with double buffering
- // n = 2 normal track, (including those with sample rate conversion)
- // n >= 3 very high latency or very small notification interval (unused).
- const uint32_t nBuffering = 2;
-
mNotificationFramesAct = mNotificationFramesReq;
size_t frameCount = mReqFrameCount;
@@ -1295,7 +1295,7 @@
size_t temp = frameCount; // temp may be replaced by a revised value of frameCount,
// but we will still need the original value also
- int originalSessionId = mSessionId;
+ audio_session_t originalSessionId = mSessionId;
sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
mSampleRate,
mFormat,
@@ -1320,6 +1320,7 @@
// AudioFlinger now owns the reference to the I/O handle,
// so we are no longer responsible for releasing it.
+ // FIXME compare to AudioRecord
sp<IMemory> iMem = track->getCblk();
if (iMem == 0) {
ALOGE("Could not get control block");
@@ -1358,39 +1359,29 @@
mAwaitBoost = true;
}
} else {
- ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
- // once denied, do not request again if IAudioTrack is re-created
+ ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
}
}
- if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
- if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
- ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
- } else {
- ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
- mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
- // FIXME This is a warning, not an error, so don't return error status
- //return NO_INIT;
- }
- }
- if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
- if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
- ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
- } else {
- ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
- mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
- // FIXME This is a warning, not an error, so don't return error status
- //return NO_INIT;
- }
- }
- // Make sure that application is notified with sufficient margin before underrun
+
+ // Make sure that application is notified with sufficient margin before underrun.
+ // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
+ // n = 1 fast track with single buffering; nBuffering is ignored
+ // n = 2 fast track with double buffering
+ // n = 2 normal track, (including those with sample rate conversion)
+ // n >= 3 very high latency or very small notification interval (unused).
+ // FIXME Move the computation from client side to server side,
+ // and allow nBuffering to be larger than 1 for OpenSL ES, like it can be for Java.
if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
- // Theoretically double-buffering is not required for fast tracks,
- // due to tighter scheduling. But in practice, to accommodate kernels with
- // scheduling jitter, and apps with computation jitter, we use double-buffering
- // for fast tracks just like normal streaming tracks.
- if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
- mNotificationFramesAct = frameCount / nBuffering;
+ size_t maxNotificationFrames = frameCount;
+ if (!(trackFlags & IAudioFlinger::TRACK_FAST)) {
+ const uint32_t nBuffering = 2;
+ maxNotificationFrames /= nBuffering;
+ }
+ if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
+ ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
+ mNotificationFramesAct, maxNotificationFrames, frameCount);
+ mNotificationFramesAct = (uint32_t) maxNotificationFrames;
}
}
@@ -1464,7 +1455,7 @@
}
release:
- AudioSystem::releaseOutput(output, streamType, (audio_session_t)mSessionId);
+ AudioSystem::releaseOutput(output, streamType, mSessionId);
if (status == NO_ERROR) {
status = NO_INIT;
}
@@ -1545,6 +1536,10 @@
}
oldSequence = newSequence;
+ if (status == NOT_ENOUGH_DATA) {
+ restartIfDisabled();
+ }
+
// Keep the extra references
proxy = mProxy;
iMem = mCblkMemory;
@@ -1567,8 +1562,7 @@
buffer.mFrameCount = audioBuffer->frameCount;
// FIXME starts the requested timeout and elapsed over from scratch
status = proxy->obtainBuffer(&buffer, requested, elapsed);
-
- } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
+ } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
audioBuffer->frameCount = buffer.mFrameCount;
audioBuffer->size = buffer.mFrameCount * mFrameSize;
@@ -1601,13 +1595,16 @@
mProxy->releaseBuffer(&buffer);
// restart track if it was disabled by audioflinger due to previous underrun
- if (mState == STATE_ACTIVE) {
- audio_track_cblk_t* cblk = mCblk;
- if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
- ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
- // FIXME ignoring status
- mAudioTrack->start();
- }
+ restartIfDisabled();
+}
+
+void AudioTrack::restartIfDisabled()
+{
+ int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+ if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
+ ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
+ // FIXME ignoring status
+ mAudioTrack->start();
}
}
@@ -2083,6 +2080,8 @@
mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
}
+ mFlags = mOrigFlags;
+
// If a new IAudioTrack is successfully created, createTrack_l() will modify the
// following member variables: mAudioTrack, mCblkMemory and mCblk.
// It will also delete the strong references on previous IAudioTrack and IMemory.
diff --git a/media/libmedia/AudioTrackShared.cpp b/media/libmedia/AudioTrackShared.cpp
index 1d15495..f7baa15 100644
--- a/media/libmedia/AudioTrackShared.cpp
+++ b/media/libmedia/AudioTrackShared.cpp
@@ -46,8 +46,10 @@
}
audio_track_cblk_t::audio_track_cblk_t()
- : mServer(0), mFutex(0), mMinimum(0),
- mVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY), mSampleRate(0), mSendLevel(0), mFlags(0)
+ : mServer(0), mFutex(0), mMinimum(0)
+ , mVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY), mSampleRate(0), mSendLevel(0)
+ , mBufferSizeInFrames(0)
+ , mFlags(0)
{
memset(&u, 0, sizeof(u));
}
@@ -67,10 +69,10 @@
ClientProxy::ClientProxy(audio_track_cblk_t* cblk, void *buffers, size_t frameCount,
size_t frameSize, bool isOut, bool clientInServer)
: Proxy(cblk, buffers, frameCount, frameSize, isOut, clientInServer)
- , mBufferSizeInFrames(frameCount)
, mEpoch(0)
, mTimestampObserver(&cblk->mExtendedTimestampQueue)
{
+ setBufferSizeInFrames(frameCount);
}
const struct timespec ClientProxy::kForever = {INT_MAX /*tv_sec*/, 0 /*tv_nsec*/};
@@ -84,6 +86,27 @@
// order of minutes.
#define MAX_SEC 5
+uint32_t ClientProxy::setBufferSizeInFrames(uint32_t size)
+{
+ // The minimum should be greater than zero and less than the size
+ // at which underruns will occur.
+ const uint32_t minimum = 16; // based on AudioMixer::BLOCKSIZE
+ const uint32_t maximum = frameCount();
+ uint32_t clippedSize = size;
+ if (maximum < minimum) {
+ clippedSize = maximum;
+ } else if (clippedSize < minimum) {
+ clippedSize = minimum;
+ } else if (clippedSize > maximum) {
+ clippedSize = maximum;
+ }
+ // for server to read
+ android_atomic_release_store(clippedSize, (int32_t *)&mCblk->mBufferSizeInFrames);
+ // for client to read
+ mBufferSizeInFrames = clippedSize;
+ return clippedSize;
+}
+
status_t ClientProxy::obtainBuffer(Buffer* buffer, const struct timespec *requested,
struct timespec *elapsed)
{
@@ -129,6 +152,11 @@
status = DEAD_OBJECT;
goto end;
}
+ if (flags & CBLK_DISABLED) {
+ ALOGV("Track disabled");
+ status = NOT_ENOUGH_DATA;
+ goto end;
+ }
// check for obtainBuffer interrupted by client
if (!ignoreInitialPendingInterrupt && (flags & CBLK_INTERRUPT)) {
ALOGV("obtainBuffer() interrupted by client");
@@ -174,7 +202,7 @@
// The calculation for avail can go negative if the buffer size
// is suddenly dropped below the amount already in the buffer.
// So use a signed calculation to prevent a numeric overflow abort.
- ssize_t adjustableSize = (ssize_t) mBufferSizeInFrames;
+ ssize_t adjustableSize = (ssize_t) getBufferSizeInFrames();
ssize_t avail = (mIsOut) ? adjustableSize - filled : filled;
if (avail < 0) {
avail = 0;
@@ -357,20 +385,6 @@
(mFrameCountP2 - 1);
}
-size_t ClientProxy::setBufferSizeInFrames(size_t size)
-{
- // TODO set minimum to 2X the fast mixer buffer size.
- size_t minimum = 128 * 2; // arbitrary
- size_t maximum = frameCount();
- if (size < minimum) {
- size = minimum;
- } else if (size > maximum) {
- size = maximum;
- }
- mBufferSizeInFrames = size;
- return size;
-}
-
// ---------------------------------------------------------------------------
void AudioTrackClientProxy::flush()
@@ -425,7 +439,8 @@
status = DEAD_OBJECT;
goto end;
}
- if (flags & CBLK_STREAM_END_DONE) {
+ // a track is not supposed to underrun at this stage but consider it done
+ if (flags & (CBLK_STREAM_END_DONE | CBLK_DISABLED)) {
ALOGV("stream end received");
status = NO_ERROR;
goto end;
@@ -601,6 +616,7 @@
mAvailToClient(0), mFlush(0), mReleased(0)
, mTimestampMutator(&cblk->mExtendedTimestampQueue)
{
+ cblk->mBufferSizeInFrames = frameCount;
}
status_t ServerProxy::obtainBuffer(Buffer* buffer, bool ackFlush)
diff --git a/media/libmedia/IAudioFlinger.cpp b/media/libmedia/IAudioFlinger.cpp
index 0bf503a..042eac5 100644
--- a/media/libmedia/IAudioFlinger.cpp
+++ b/media/libmedia/IAudioFlinger.cpp
@@ -104,7 +104,7 @@
const sp<IMemory>& sharedBuffer,
audio_io_handle_t output,
pid_t tid,
- int *sessionId,
+ audio_session_t *sessionId,
int clientUid,
status_t *status)
{
@@ -128,7 +128,7 @@
}
data.writeInt32((int32_t) output);
data.writeInt32((int32_t) tid);
- int lSessionId = AUDIO_SESSION_ALLOCATE;
+ audio_session_t lSessionId = AUDIO_SESSION_ALLOCATE;
if (sessionId != NULL) {
lSessionId = *sessionId;
}
@@ -146,7 +146,7 @@
if (flags != NULL) {
*flags = lFlags;
}
- lSessionId = reply.readInt32();
+ lSessionId = (audio_session_t) reply.readInt32();
if (sessionId != NULL) {
*sessionId = lSessionId;
}
@@ -180,7 +180,7 @@
track_flags_t *flags,
pid_t tid,
int clientUid,
- int *sessionId,
+ audio_session_t *sessionId,
size_t *notificationFrames,
sp<IMemory>& cblk,
sp<IMemory>& buffers,
@@ -200,7 +200,7 @@
data.writeInt32(lFlags);
data.writeInt32((int32_t) tid);
data.writeInt32((int32_t) clientUid);
- int lSessionId = AUDIO_SESSION_ALLOCATE;
+ audio_session_t lSessionId = AUDIO_SESSION_ALLOCATE;
if (sessionId != NULL) {
lSessionId = *sessionId;
}
@@ -220,7 +220,7 @@
if (flags != NULL) {
*flags = lFlags;
}
- lSessionId = reply.readInt32();
+ lSessionId = (audio_session_t) reply.readInt32();
if (sessionId != NULL) {
*sessionId = lSessionId;
}
@@ -265,11 +265,11 @@
return record;
}
- virtual uint32_t sampleRate(audio_io_handle_t output) const
+ virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) output);
+ data.writeInt32((int32_t) ioHandle);
remote()->transact(SAMPLE_RATE, data, &reply);
return reply.readInt32();
}
@@ -283,11 +283,11 @@
return (audio_format_t) reply.readInt32();
}
- virtual size_t frameCount(audio_io_handle_t output) const
+ virtual size_t frameCount(audio_io_handle_t ioHandle) const
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
- data.writeInt32((int32_t) output);
+ data.writeInt32((int32_t) ioHandle);
remote()->transact(FRAME_COUNT, data, &reply);
return reply.readInt64();
}
@@ -612,10 +612,11 @@
return (uint32_t) reply.readInt32();
}
- virtual audio_unique_id_t newAudioUniqueId()
+ virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
+ data.writeInt32((int32_t) use);
status_t status = remote()->transact(NEW_AUDIO_SESSION_ID, data, &reply);
audio_unique_id_t id = AUDIO_SESSION_ALLOCATE;
if (status == NO_ERROR) {
@@ -624,7 +625,7 @@
return id;
}
- virtual void acquireAudioSessionId(int audioSession, int pid)
+ virtual void acquireAudioSessionId(audio_session_t audioSession, int pid)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -633,7 +634,7 @@
remote()->transact(ACQUIRE_AUDIO_SESSION_ID, data, &reply);
}
- virtual void releaseAudioSessionId(int audioSession, int pid)
+ virtual void releaseAudioSessionId(audio_session_t audioSession, int pid)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
@@ -706,7 +707,7 @@
const sp<IEffectClient>& client,
int32_t priority,
audio_io_handle_t output,
- int sessionId,
+ audio_session_t sessionId,
const String16& opPackageName,
status_t *status,
int *id,
@@ -753,7 +754,7 @@
return effect;
}
- virtual status_t moveEffects(int session, audio_io_handle_t srcOutput,
+ virtual status_t moveEffects(audio_session_t session, audio_io_handle_t srcOutput,
audio_io_handle_t dstOutput)
{
Parcel data, reply;
@@ -935,7 +936,7 @@
}
audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
pid_t tid = (pid_t) data.readInt32();
- int sessionId = data.readInt32();
+ audio_session_t sessionId = (audio_session_t) data.readInt32();
int clientUid = data.readInt32();
status_t status = NO_ERROR;
sp<IAudioTrack> track;
@@ -968,7 +969,7 @@
track_flags_t flags = (track_flags_t) data.readInt32();
pid_t tid = (pid_t) data.readInt32();
int clientUid = data.readInt32();
- int sessionId = data.readInt32();
+ audio_session_t sessionId = (audio_session_t) data.readInt32();
size_t notificationFrames = data.readInt64();
sp<IMemory> cblk;
sp<IMemory> buffers;
@@ -1208,19 +1209,19 @@
} break;
case NEW_AUDIO_SESSION_ID: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32(newAudioUniqueId());
+ reply->writeInt32(newAudioUniqueId((audio_unique_id_use_t) data.readInt32()));
return NO_ERROR;
} break;
case ACQUIRE_AUDIO_SESSION_ID: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int audioSession = data.readInt32();
+ audio_session_t audioSession = (audio_session_t) data.readInt32();
int pid = data.readInt32();
acquireAudioSessionId(audioSession, pid);
return NO_ERROR;
} break;
case RELEASE_AUDIO_SESSION_ID: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int audioSession = data.readInt32();
+ audio_session_t audioSession = (audio_session_t) data.readInt32();
int pid = data.readInt32();
releaseAudioSessionId(audioSession, pid);
return NO_ERROR;
@@ -1266,7 +1267,7 @@
sp<IEffectClient> client = interface_cast<IEffectClient>(data.readStrongBinder());
int32_t priority = data.readInt32();
audio_io_handle_t output = (audio_io_handle_t) data.readInt32();
- int sessionId = data.readInt32();
+ audio_session_t sessionId = (audio_session_t) data.readInt32();
const String16 opPackageName = data.readString16();
status_t status = NO_ERROR;
int id = 0;
@@ -1283,7 +1284,7 @@
} break;
case MOVE_EFFECTS: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- int session = data.readInt32();
+ audio_session_t session = (audio_session_t) data.readInt32();
audio_io_handle_t srcOutput = (audio_io_handle_t) data.readInt32();
audio_io_handle_t dstOutput = (audio_io_handle_t) data.readInt32();
reply->writeInt32(moveEffects(session, srcOutput, dstOutput));
@@ -1410,7 +1411,7 @@
} break;
case GET_AUDIO_HW_SYNC: {
CHECK_INTERFACE(IAudioFlinger, data, reply);
- reply->writeInt32(getAudioHwSyncForSession((audio_session_t)data.readInt32()));
+ reply->writeInt32(getAudioHwSyncForSession((audio_session_t) data.readInt32()));
return NO_ERROR;
} break;
case SYSTEM_READY: {
diff --git a/media/libmedia/IAudioPolicyService.cpp b/media/libmedia/IAudioPolicyService.cpp
index c95d4c4..16e8f11 100644
--- a/media/libmedia/IAudioPolicyService.cpp
+++ b/media/libmedia/IAudioPolicyService.cpp
@@ -247,7 +247,7 @@
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32(output);
data.writeInt32((int32_t) stream);
- data.writeInt32((int32_t)session);
+ data.writeInt32((int32_t) session);
remote()->transact(START_OUTPUT, data, &reply);
return static_cast <status_t> (reply.readInt32());
}
@@ -260,7 +260,7 @@
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32(output);
data.writeInt32((int32_t) stream);
- data.writeInt32((int32_t)session);
+ data.writeInt32((int32_t) session);
remote()->transact(STOP_OUTPUT, data, &reply);
return static_cast <status_t> (reply.readInt32());
}
@@ -420,7 +420,7 @@
virtual status_t registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
uint32_t strategy,
- int session,
+ audio_session_t session,
int id)
{
Parcel data, reply;
@@ -482,7 +482,7 @@
return reply.readInt32();
}
- virtual status_t queryDefaultPreProcessing(int audioSession,
+ virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count)
{
@@ -1065,7 +1065,7 @@
data.read(&desc, sizeof(effect_descriptor_t));
audio_io_handle_t io = data.readInt32();
uint32_t strategy = data.readInt32();
- int session = data.readInt32();
+ audio_session_t session = (audio_session_t) data.readInt32();
int id = data.readInt32();
reply->writeInt32(static_cast <int32_t>(registerEffect(&desc,
io,
@@ -1115,7 +1115,7 @@
case QUERY_DEFAULT_PRE_PROCESSING: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
- int audioSession = data.readInt32();
+ audio_session_t audioSession = (audio_session_t) data.readInt32();
uint32_t count = data.readInt32();
if (count > AudioEffect::kMaxPreProcessing) {
count = AudioEffect::kMaxPreProcessing;
diff --git a/media/libmedia/IAudioPolicyServiceClient.cpp b/media/libmedia/IAudioPolicyServiceClient.cpp
index 0aeaf3c..5f931e5 100644
--- a/media/libmedia/IAudioPolicyServiceClient.cpp
+++ b/media/libmedia/IAudioPolicyServiceClient.cpp
@@ -82,7 +82,7 @@
void onRecordingConfigurationUpdate(int event, audio_session_t session,
audio_source_t source, const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig) {
+ const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle) {
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyServiceClient::getInterfaceDescriptor());
data.writeInt32(event);
@@ -90,6 +90,7 @@
data.writeInt32(source);
writeAudioConfigBaseToParcel(data, clientConfig);
writeAudioConfigBaseToParcel(data, deviceConfig);
+ data.writeInt32(patchHandle);
remote()->transact(RECORDING_CONFIGURATION_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
}
};
@@ -128,7 +129,9 @@
audio_config_base_t deviceConfig;
readAudioConfigBaseFromParcel(data, &clientConfig);
readAudioConfigBaseFromParcel(data, &deviceConfig);
- onRecordingConfigurationUpdate(event, session, source, &clientConfig, &deviceConfig);
+ audio_patch_handle_t patchHandle = (audio_patch_handle_t) data.readInt32();
+ onRecordingConfigurationUpdate(event, session, source, &clientConfig, &deviceConfig,
+ patchHandle);
return NO_ERROR;
} break;
default:
diff --git a/media/libmedia/IAudioRecord.cpp b/media/libmedia/IAudioRecord.cpp
index 9d80753..ae66436 100644
--- a/media/libmedia/IAudioRecord.cpp
+++ b/media/libmedia/IAudioRecord.cpp
@@ -42,7 +42,7 @@
{
}
- virtual status_t start(int /*AudioSystem::sync_event_t*/ event, int triggerSession)
+ virtual status_t start(int /*AudioSystem::sync_event_t*/ event, audio_session_t triggerSession)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioRecord::getInterfaceDescriptor());
@@ -77,7 +77,7 @@
case START: {
CHECK_INTERFACE(IAudioRecord, data, reply);
int /*AudioSystem::sync_event_t*/ event = data.readInt32();
- int triggerSession = data.readInt32();
+ audio_session_t triggerSession = (audio_session_t) data.readInt32();
reply->writeInt32(start(event, triggerSession));
return NO_ERROR;
} break;
diff --git a/media/libmedia/IMediaPlayerService.cpp b/media/libmedia/IMediaPlayerService.cpp
index afc94ab..27b9edd 100644
--- a/media/libmedia/IMediaPlayerService.cpp
+++ b/media/libmedia/IMediaPlayerService.cpp
@@ -68,7 +68,7 @@
}
virtual sp<IMediaPlayer> create(
- const sp<IMediaPlayerClient>& client, int audioSessionId) {
+ const sp<IMediaPlayerClient>& client, audio_session_t audioSessionId) {
Parcel data, reply;
data.writeInterfaceToken(IMediaPlayerService::getInterfaceDescriptor());
data.writeStrongBinder(IInterface::asBinder(client));
@@ -161,7 +161,7 @@
CHECK_INTERFACE(IMediaPlayerService, data, reply);
sp<IMediaPlayerClient> client =
interface_cast<IMediaPlayerClient>(data.readStrongBinder());
- int audioSessionId = data.readInt32();
+ audio_session_t audioSessionId = (audio_session_t) data.readInt32();
sp<IMediaPlayer> player = create(client, audioSessionId);
reply->writeStrongBinder(IInterface::asBinder(player));
return NO_ERROR;
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index 0eea820..cded55c 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -23,7 +23,8 @@
#include <utils/Log.h>
#include <binder/Parcel.h>
-#include <camera/ICamera.h>
+#include <camera/android/hardware/ICamera.h>
+#include <camera/ICameraRecordingProxy.h>
#include <media/IMediaRecorderClient.h>
#include <media/IMediaRecorder.h>
#include <gui/Surface.h>
@@ -67,7 +68,7 @@
{
}
- status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy)
+ status_t setCamera(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy)
{
ALOGV("setCamera(%p,%p)", camera.get(), proxy.get());
Parcel data, reply;
@@ -479,9 +480,10 @@
case SET_CAMERA: {
ALOGV("SET_CAMERA");
CHECK_INTERFACE(IMediaRecorder, data, reply);
- sp<ICamera> camera = interface_cast<ICamera>(data.readStrongBinder());
+ sp<hardware::ICamera> camera =
+ interface_cast<hardware::ICamera>(data.readStrongBinder());
sp<ICameraRecordingProxy> proxy =
- interface_cast<ICameraRecordingProxy>(data.readStrongBinder());
+ interface_cast<ICameraRecordingProxy>(data.readStrongBinder());
reply->writeInt32(setCamera(camera, proxy));
return NO_ERROR;
} break;
diff --git a/media/libmedia/IMediaSource.cpp b/media/libmedia/IMediaSource.cpp
index b988c46..8376c0a 100644
--- a/media/libmedia/IMediaSource.cpp
+++ b/media/libmedia/IMediaSource.cpp
@@ -315,6 +315,9 @@
reply->writeInt32(offset);
reply->writeInt32(usedSize);
buf->meta_data()->writeToParcel(*reply);
+ if (buf->mMemory == NULL) {
+ buf->release();
+ }
} else {
// buffer is small: copy it
if (buf->mMemory != NULL) {
diff --git a/media/libmedia/IOMX.cpp b/media/libmedia/IOMX.cpp
index 0ef6e3e..61fba35 100644
--- a/media/libmedia/IOMX.cpp
+++ b/media/libmedia/IOMX.cpp
@@ -18,10 +18,13 @@
#define LOG_TAG "IOMX"
#include <utils/Log.h>
+#include <sys/mman.h>
+
#include <binder/IMemory.h>
#include <binder/Parcel.h>
#include <media/IOMX.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/openmax/OMX_IndexExt.h>
namespace android {
@@ -311,13 +314,14 @@
}
virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
Parcel data, reply;
status_t err;
data.writeInterfaceToken(IOMX::getInterfaceDescriptor());
data.writeInt32((int32_t)node);
data.writeInt32(port_index);
+ data.writeInt32(dataSpace);
err = remote()->transact(CREATE_INPUT_SURFACE, data, &reply);
if (err != OK) {
ALOGW("binder transaction failed: %d", err);
@@ -710,38 +714,74 @@
size_t size = data.readInt64();
- status_t err = NO_MEMORY;
- void *params = calloc(size, 1);
- if (params) {
- err = data.read(params, size);
- if (err != OK) {
- android_errorWriteLog(0x534e4554, "26914474");
+ status_t err = NOT_ENOUGH_DATA;
+ void *params = NULL;
+ size_t pageSize = 0;
+ size_t allocSize = 0;
+ bool isUsageBits = (index == (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits);
+ if ((isUsageBits && size < 4) ||
+ (!isUsageBits && code != SET_INTERNAL_OPTION && size < 8)) {
+ // we expect the structure to contain at least the size and
+ // version, 8 bytes total
+ ALOGE("b/27207275 (%zu) (%d/%d)", size, int(index), int(code));
+ android_errorWriteLog(0x534e4554, "27207275");
+ } else {
+ err = NO_MEMORY;
+ pageSize = (size_t) sysconf(_SC_PAGE_SIZE);
+ if (size > SIZE_MAX - (pageSize * 2)) {
+ ALOGE("requested param size too big");
} else {
- switch (code) {
- case GET_PARAMETER:
- err = getParameter(node, index, params, size);
- break;
- case SET_PARAMETER:
- err = setParameter(node, index, params, size);
- break;
- case GET_CONFIG:
- err = getConfig(node, index, params, size);
- break;
- case SET_CONFIG:
- err = setConfig(node, index, params, size);
- break;
- case SET_INTERNAL_OPTION:
- {
- InternalOptionType type =
- (InternalOptionType)data.readInt32();
+ allocSize = (size + pageSize * 2) & ~(pageSize - 1);
+ params = mmap(NULL, allocSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1 /* fd */, 0 /* offset */);
+ }
+ if (params != MAP_FAILED) {
+ err = data.read(params, size);
+ if (err != OK) {
+ android_errorWriteLog(0x534e4554, "26914474");
+ } else {
+ err = NOT_ENOUGH_DATA;
+ OMX_U32 declaredSize = *(OMX_U32*)params;
+ if (code != SET_INTERNAL_OPTION &&
+ index != (OMX_INDEXTYPE) OMX_IndexParamConsumerUsageBits &&
+ declaredSize > size) {
+ // the buffer says it's bigger than it actually is
+ ALOGE("b/27207275 (%u/%zu)", declaredSize, size);
+ android_errorWriteLog(0x534e4554, "27207275");
+ } else {
+ // mark the last page as inaccessible, to avoid exploitation
+ // of codecs that access past the end of the allocation because
+ // they didn't check the size
+ mprotect((char*)params + allocSize - pageSize, pageSize, PROT_NONE);
+ switch (code) {
+ case GET_PARAMETER:
+ err = getParameter(node, index, params, size);
+ break;
+ case SET_PARAMETER:
+ err = setParameter(node, index, params, size);
+ break;
+ case GET_CONFIG:
+ err = getConfig(node, index, params, size);
+ break;
+ case SET_CONFIG:
+ err = setConfig(node, index, params, size);
+ break;
+ case SET_INTERNAL_OPTION:
+ {
+ InternalOptionType type =
+ (InternalOptionType)data.readInt32();
- err = setInternalOption(node, index, type, params, size);
- break;
+ err = setInternalOption(node, index, type, params, size);
+ break;
+ }
+
+ default:
+ TRESPASS();
+ }
}
-
- default:
- TRESPASS();
}
+ } else {
+ ALOGE("couldn't map: %s", strerror(errno));
}
}
@@ -751,7 +791,9 @@
reply->write(params, size);
}
- free(params);
+ if (params) {
+ munmap(params, allocSize);
+ }
params = NULL;
return NO_ERROR;
@@ -872,10 +914,11 @@
node_id node = (node_id)data.readInt32();
OMX_U32 port_index = data.readInt32();
+ android_dataspace dataSpace = (android_dataspace)data.readInt32();
sp<IGraphicBufferProducer> bufferProducer;
MetadataBufferType type = kMetadataBufferTypeInvalid;
- status_t err = createInputSurface(node, port_index, &bufferProducer, &type);
+ status_t err = createInputSurface(node, port_index, dataSpace, &bufferProducer, &type);
if ((err != OK) && (type == kMetadataBufferTypeInvalid)) {
android_errorWriteLog(0x534e4554, "26324358");
diff --git a/media/libmedia/MediaResource.cpp b/media/libmedia/MediaResource.cpp
index 40ec0cb..e636a50 100644
--- a/media/libmedia/MediaResource.cpp
+++ b/media/libmedia/MediaResource.cpp
@@ -21,38 +21,36 @@
namespace android {
-const char kResourceSecureCodec[] = "secure-codec";
-const char kResourceNonSecureCodec[] = "non-secure-codec";
-const char kResourceAudioCodec[] = "audio-codec";
-const char kResourceVideoCodec[] = "video-codec";
-const char kResourceGraphicMemory[] = "graphic-memory";
+MediaResource::MediaResource()
+ : mType(kUnspecified),
+ mSubType(kUnspecifiedSubType),
+ mValue(0) {}
-MediaResource::MediaResource() : mValue(0) {}
-
-MediaResource::MediaResource(String8 type, uint64_t value)
+MediaResource::MediaResource(Type type, uint64_t value)
: mType(type),
+ mSubType(kUnspecifiedSubType),
mValue(value) {}
-MediaResource::MediaResource(String8 type, String8 subType, uint64_t value)
+MediaResource::MediaResource(Type type, SubType subType, uint64_t value)
: mType(type),
mSubType(subType),
mValue(value) {}
void MediaResource::readFromParcel(const Parcel &parcel) {
- mType = parcel.readString8();
- mSubType = parcel.readString8();
+ mType = static_cast<Type>(parcel.readInt32());
+ mSubType = static_cast<SubType>(parcel.readInt32());
mValue = parcel.readUint64();
}
void MediaResource::writeToParcel(Parcel *parcel) const {
- parcel->writeString8(mType);
- parcel->writeString8(mSubType);
+ parcel->writeInt32(static_cast<int32_t>(mType));
+ parcel->writeInt32(static_cast<int32_t>(mSubType));
parcel->writeUint64(mValue);
}
String8 MediaResource::toString() const {
String8 str;
- str.appendFormat("%s/%s:%llu", mType.string(), mSubType.string(), (unsigned long long)mValue);
+ str.appendFormat("%s/%s:%llu", asString(mType), asString(mSubType), (unsigned long long)mValue);
return str;
}
diff --git a/media/libmedia/Visualizer.cpp b/media/libmedia/Visualizer.cpp
index f5c1b1f..31e310b 100644
--- a/media/libmedia/Visualizer.cpp
+++ b/media/libmedia/Visualizer.cpp
@@ -38,7 +38,7 @@
int32_t priority,
effect_callback_t cbf,
void* user,
- int sessionId)
+ audio_session_t sessionId)
: AudioEffect(SL_IID_VISUALIZATION, opPackageName, NULL, priority, cbf, user, sessionId),
mCaptureRate(CAPTURE_RATE_DEF),
mCaptureSize(CAPTURE_SIZE_DEF),
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 337e963..2795101 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -63,7 +63,7 @@
mLeftVolume = mRightVolume = 1.0;
mVideoWidth = mVideoHeight = 0;
mLockThreadId = 0;
- mAudioSessionId = AudioSystem::newAudioUniqueId();
+ mAudioSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
AudioSystem::acquireAudioSessionId(mAudioSessionId, -1);
mSendLevel = 0;
mRetransmitEndpointValid = false;
@@ -207,7 +207,7 @@
ALOGV("invoke %zu", request.dataSize());
return mPlayer->invoke(request, reply);
}
- ALOGE("invoke failed: wrong state %X", mCurrentState);
+ ALOGE("invoke failed: wrong state %X, mPlayer(%p)", mCurrentState, mPlayer.get());
return INVALID_OPERATION;
}
@@ -252,7 +252,7 @@
mCurrentState = MEDIA_PLAYER_PREPARING;
return mPlayer->prepareAsync();
}
- ALOGE("prepareAsync called in state %d", mCurrentState);
+ ALOGE("prepareAsync called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
return INVALID_OPERATION;
}
@@ -318,7 +318,7 @@
}
}
} else {
- ALOGE("start called in state %d", mCurrentState);
+ ALOGE("start called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
ret = INVALID_OPERATION;
}
@@ -342,7 +342,7 @@
}
return ret;
}
- ALOGE("stop called in state %d", mCurrentState);
+ ALOGE("stop called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
return INVALID_OPERATION;
}
@@ -361,7 +361,7 @@
}
return ret;
}
- ALOGE("pause called in state %d", mCurrentState);
+ ALOGE("pause called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
return INVALID_OPERATION;
}
@@ -484,7 +484,8 @@
}
return ret;
}
- ALOGE("Attempt to call getDuration without a valid mediaplayer");
+ ALOGE("Attempt to call getDuration in wrong state: mPlayer=%p, mCurrentState=%u",
+ mPlayer.get(), mCurrentState);
return INVALID_OPERATION;
}
@@ -648,7 +649,7 @@
return OK;
}
-status_t MediaPlayer::setAudioSessionId(int sessionId)
+status_t MediaPlayer::setAudioSessionId(audio_session_t sessionId)
{
ALOGV("MediaPlayer::setAudioSessionId(%d)", sessionId);
Mutex::Autolock _l(mLock);
@@ -667,7 +668,7 @@
return NO_ERROR;
}
-int MediaPlayer::getAudioSessionId()
+audio_session_t MediaPlayer::getAudioSessionId()
{
Mutex::Autolock _l(mLock);
return mAudioSessionId;
@@ -691,7 +692,7 @@
if (mPlayer == 0 ||
(mCurrentState & MEDIA_PLAYER_IDLE) ||
(mCurrentState == MEDIA_PLAYER_STATE_ERROR )) {
- ALOGE("attachAuxEffect called in state %d", mCurrentState);
+ ALOGE("attachAuxEffect called in state %d, mPlayer(%p)", mCurrentState, mPlayer.get());
return INVALID_OPERATION;
}
@@ -906,6 +907,7 @@
}
status_t MediaPlayer::setNextMediaPlayer(const sp<MediaPlayer>& next) {
+ Mutex::Autolock _l(mLock);
if (mPlayer == NULL) {
return NO_INIT;
}
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index bfdf41d..de3b214 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -32,7 +32,8 @@
namespace android {
-status_t MediaRecorder::setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy)
+status_t MediaRecorder::setCamera(const sp<hardware::ICamera>& camera,
+ const sp<ICameraRecordingProxy>& proxy)
{
ALOGV("setCamera(%p,%p)", camera.get(), proxy.get());
if (mMediaRecorder == NULL) {
diff --git a/media/libmediaplayerservice/Android.mk b/media/libmediaplayerservice/Android.mk
index 81f2af3..7f41143 100644
--- a/media/libmediaplayerservice/Android.mk
+++ b/media/libmediaplayerservice/Android.mk
@@ -51,6 +51,7 @@
$(TOP)/frameworks/av/media/libstagefright/rtsp \
$(TOP)/frameworks/av/media/libstagefright/wifi-display \
$(TOP)/frameworks/av/media/libstagefright/webm \
+ $(TOP)/frameworks/av/include/camera \
$(TOP)/frameworks/native/include/media/openmax \
$(TOP)/external/tremolo/Tremolo \
diff --git a/media/libmediaplayerservice/MediaPlayerService.cpp b/media/libmediaplayerservice/MediaPlayerService.cpp
index bb24403..0025660 100644
--- a/media/libmediaplayerservice/MediaPlayerService.cpp
+++ b/media/libmediaplayerservice/MediaPlayerService.cpp
@@ -321,7 +321,7 @@
}
sp<IMediaPlayer> MediaPlayerService::create(const sp<IMediaPlayerClient>& client,
- int audioSessionId)
+ audio_session_t audioSessionId)
{
pid_t pid = IPCThreadState::self()->getCallingPid();
int32_t connId = android_atomic_inc(&mNextConnId);
@@ -556,7 +556,7 @@
MediaPlayerService::Client::Client(
const sp<MediaPlayerService>& service, pid_t pid,
int32_t connId, const sp<IMediaPlayerClient>& client,
- int audioSessionId, uid_t uid)
+ audio_session_t audioSessionId, uid_t uid)
{
ALOGV("Client(%d) constructor", connId);
mPid = pid;
@@ -1332,7 +1332,7 @@
#undef LOG_TAG
#define LOG_TAG "AudioSink"
-MediaPlayerService::AudioOutput::AudioOutput(int sessionId, int uid, int pid,
+MediaPlayerService::AudioOutput::AudioOutput(audio_session_t sessionId, int uid, int pid,
const audio_attributes_t* attr)
: mCallback(NULL),
mCallbackCookie(NULL),
@@ -2111,7 +2111,7 @@
data->unlock();
}
-int MediaPlayerService::AudioOutput::getSessionId() const
+audio_session_t MediaPlayerService::AudioOutput::getSessionId() const
{
Mutex::Autolock lock(mLock);
return mSessionId;
diff --git a/media/libmediaplayerservice/MediaPlayerService.h b/media/libmediaplayerservice/MediaPlayerService.h
index bd98ef1..1dd2ddd 100644
--- a/media/libmediaplayerservice/MediaPlayerService.h
+++ b/media/libmediaplayerservice/MediaPlayerService.h
@@ -75,7 +75,7 @@
class CallbackData;
public:
- AudioOutput(int sessionId, int uid, int pid,
+ AudioOutput(audio_session_t sessionId, int uid, int pid,
const audio_attributes_t * attr);
virtual ~AudioOutput();
@@ -90,7 +90,7 @@
virtual status_t getTimestamp(AudioTimestamp &ts) const;
virtual int64_t getPlayedOutDurationUs(int64_t nowUs) const;
virtual status_t getFramesWritten(uint32_t *frameswritten) const;
- virtual int getSessionId() const;
+ virtual audio_session_t getSessionId() const;
virtual uint32_t getSampleRate() const;
virtual status_t open(
@@ -150,7 +150,7 @@
uint32_t mSampleRateHz; // sample rate of the content, as set in open()
float mMsecsPerFrame;
size_t mFrameSize;
- int mSessionId;
+ audio_session_t mSessionId;
int mUid;
int mPid;
float mSendLevel;
@@ -214,7 +214,8 @@
void removeMediaRecorderClient(wp<MediaRecorderClient> client);
virtual sp<IMediaMetadataRetriever> createMetadataRetriever();
- virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client, int audioSessionId);
+ virtual sp<IMediaPlayer> create(const sp<IMediaPlayerClient>& client,
+ audio_session_t audioSessionId);
virtual sp<IMediaCodecList> getCodecList() const;
virtual sp<IOMX> getOMX();
@@ -332,7 +333,7 @@
pid_t pid() const { return mPid; }
virtual status_t dump(int fd, const Vector<String16>& args);
- int getAudioSessionId() { return mAudioSessionId; }
+ audio_session_t getAudioSessionId() { return mAudioSessionId; }
private:
friend class MediaPlayerService;
@@ -340,7 +341,7 @@
pid_t pid,
int32_t connId,
const sp<IMediaPlayerClient>& client,
- int audioSessionId,
+ audio_session_t audioSessionId,
uid_t uid);
Client();
virtual ~Client();
@@ -375,7 +376,7 @@
status_t mStatus;
bool mLoop;
int32_t mConnId;
- int mAudioSessionId;
+ audio_session_t mAudioSessionId;
audio_attributes_t * mAudioAttributes;
uid_t mUID;
sp<ANativeWindow> mConnectedWindow;
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 3b4e148..73abe99 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -76,7 +76,7 @@
-status_t MediaRecorderClient::setCamera(const sp<ICamera>& camera,
+status_t MediaRecorderClient::setCamera(const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy)
{
ALOGV("setCamera");
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index c0d9c4c..5a080df 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -30,7 +30,7 @@
class MediaRecorderClient : public BnMediaRecorder
{
public:
- virtual status_t setCamera(const sp<ICamera>& camera,
+ virtual status_t setCamera(const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy);
virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
virtual status_t setVideoSource(int vs);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index b335d09..78eb3b0 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -22,6 +22,8 @@
#include "WebmWriter.h"
#include "StagefrightRecorder.h"
+#include <android/hardware/ICamera.h>
+
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
@@ -42,7 +44,6 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MediaCodecSource.h>
#include <media/MediaProfiles.h>
-#include <camera/ICamera.h>
#include <camera/CameraParameters.h>
#include <utils/Errors.h>
@@ -215,7 +216,7 @@
return OK;
}
-status_t StagefrightRecorder::setCamera(const sp<ICamera> &camera,
+status_t StagefrightRecorder::setCamera(const sp<hardware::ICamera> &camera,
const sp<ICameraRecordingProxy> &proxy) {
ALOGV("setCamera");
if (camera == 0) {
@@ -1570,6 +1571,9 @@
if (cameraSource == NULL) {
flags |= MediaCodecSource::FLAG_USE_SURFACE_INPUT;
+ } else {
+ // require dataspace setup even if not using surface input
+ format->setInt32("android._using-recorder", 1);
}
sp<MediaCodecSource> encoder = MediaCodecSource::Create(
@@ -1757,12 +1761,16 @@
// 30 ms buffer to avoid timestamp overlap
mTotalPausedDurationUs += (systemTime() / 1000) - mPauseStartTimeUs - 30000;
+ double timeOffset = -mTotalPausedDurationUs;
+ if (mCaptureFpsEnable) {
+ timeOffset *= mCaptureFps / mFrameRate;
+ }
if (mAudioEncoderSource != NULL) {
- mAudioEncoderSource->setInputBufferTimeOffset(-mTotalPausedDurationUs);
+ mAudioEncoderSource->setInputBufferTimeOffset((int64_t)timeOffset);
mAudioEncoderSource->start();
}
if (mVideoEncoderSource != NULL) {
- mVideoEncoderSource->setInputBufferTimeOffset(-mTotalPausedDurationUs);
+ mVideoEncoderSource->setInputBufferTimeOffset((int64_t)timeOffset);
mVideoEncoderSource->start();
}
mPauseStartTimeUs = 0;
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index 761e987..a73197f 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -53,7 +53,7 @@
virtual status_t setVideoEncoder(video_encoder ve);
virtual status_t setVideoSize(int width, int height);
virtual status_t setVideoFrameRate(int frames_per_second);
- virtual status_t setCamera(const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
+ virtual status_t setCamera(const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy);
virtual status_t setPreviewSurface(const sp<IGraphicBufferProducer>& surface);
virtual status_t setInputSurface(const sp<IGraphicBufferConsumer>& surface);
virtual status_t setOutputFile(int fd, int64_t offset, int64_t length);
@@ -73,7 +73,7 @@
virtual sp<IGraphicBufferProducer> querySurfaceMediaSource() const;
private:
- sp<ICamera> mCamera;
+ sp<hardware::ICamera> mCamera;
sp<ICameraRecordingProxy> mCameraProxy;
sp<IGraphicBufferProducer> mPreviewSurface;
sp<IGraphicBufferConsumer> mPersistentSurface;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 44279ce..42a82ac 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -746,7 +746,7 @@
if (!mPaused) {
mRenderer->pause();
}
- restartAudioFromOffload(
+ restartAudio(
currentPositionUs, true /* forceNonOffload */,
true /* needsToCreateAudioDecoder */);
if (!mPaused) {
@@ -1149,9 +1149,9 @@
positionUs = mPreviousSeekTimeUs;
}
- restartAudioFromOffload(
- positionUs, false /* forceNonOffload */,
- reason == Renderer::kDueToError /* needsToCreateAudioDecoder */);
+ restartAudio(
+ positionUs, reason == Renderer::kForceNonOffload /* forceNonOffload */,
+ reason != Renderer::kDueToTimeout /* needsToCreateAudioDecoder */);
}
break;
}
@@ -1488,14 +1488,13 @@
mRenderer->closeAudioSink();
}
-void NuPlayer::restartAudioFromOffload(
+void NuPlayer::restartAudio(
int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder) {
- if (!mOffloadAudio) {
- return;
+ if (mAudioDecoder != NULL) {
+ mAudioDecoder->pause();
+ mAudioDecoder.clear();
+ ++mAudioDecoderGeneration;
}
- mAudioDecoder->pause();
- mAudioDecoder.clear();
- ++mAudioDecoderGeneration;
if (mFlushingAudio == FLUSHING_DECODER) {
mFlushComplete[1 /* audio */][1 /* isDecoder */] = true;
mFlushingAudio = FLUSHED;
@@ -1523,7 +1522,7 @@
mOffloadAudio = false;
}
if (needsToCreateAudioDecoder) {
- instantiateDecoder(true /* audio */, &mAudioDecoder);
+ instantiateDecoder(true /* audio */, &mAudioDecoder, !forceNonOffload);
}
}
@@ -1560,7 +1559,8 @@
}
}
-status_t NuPlayer::instantiateDecoder(bool audio, sp<DecoderBase> *decoder) {
+status_t NuPlayer::instantiateDecoder(
+ bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange) {
// The audio decoder could be cleared by tear down. If still in shut down
// process, no need to create a new audio decoder.
if (*decoder != NULL || (audio && mFlushingAudio == SHUT_DOWN)) {
@@ -1608,7 +1608,9 @@
++mAudioDecoderGeneration;
notify->setInt32("generation", mAudioDecoderGeneration);
- determineAudioModeChange();
+ if (checkAudioModeChange) {
+ determineAudioModeChange();
+ }
if (mOffloadAudio) {
mSource->setOffloadAudio(true /* offload */);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.h b/media/libmediaplayerservice/nuplayer/NuPlayer.h
index 5e48b30..369590b 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.h
@@ -233,11 +233,12 @@
void tryOpenAudioSinkForOffload(const sp<AMessage> &format, bool hasVideo);
void closeAudioSink();
- void restartAudioFromOffload(
- int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder);
+ void restartAudio(
+ int64_t currentPositionUs, bool forceNonOffload, bool needsToCreateAudioDecoder);
void determineAudioModeChange();
- status_t instantiateDecoder(bool audio, sp<DecoderBase> *decoder);
+ status_t instantiateDecoder(
+ bool audio, sp<DecoderBase> *decoder, bool checkAudioModeChange = true);
status_t onInstantiateSecureDecoders();
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
index 332fef6..06bb53d 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.cpp
@@ -45,8 +45,7 @@
mPlayerFlags(0),
mAtEOS(false),
mLooping(false),
- mAutoLoop(false),
- mStartupSeekTimeUs(-1) {
+ mAutoLoop(false) {
ALOGV("NuPlayerDriver(%p)", this);
mLooper->setName("NuPlayerDriver Looper");
@@ -261,25 +260,11 @@
case STATE_PAUSED:
case STATE_STOPPED_AND_PREPARED:
- {
- if (mAtEOS && mStartupSeekTimeUs < 0) {
- mStartupSeekTimeUs = 0;
- mPositionUs = -1;
- }
-
- // fall through
- }
-
case STATE_PREPARED:
{
- mAtEOS = false;
mPlayer->start();
- if (mStartupSeekTimeUs >= 0) {
- mPlayer->seekToAsync(mStartupSeekTimeUs);
- mStartupSeekTimeUs = -1;
- }
- break;
+ // fall through
}
case STATE_RUNNING:
@@ -330,6 +315,7 @@
}
status_t NuPlayerDriver::pause() {
+ ALOGD("pause(%p)", this);
// The NuPlayerRenderer may get flushed if pause for long enough, e.g. the pause timeout tear
// down for audio offload mode. If that happens, the NuPlayerRenderer will no longer know the
// current position. So similar to seekTo, update |mPositionUs| to the pause position by calling
@@ -400,8 +386,6 @@
case STATE_PREPARED:
case STATE_STOPPED_AND_PREPARED:
case STATE_PAUSED:
- mStartupSeekTimeUs = seekTimeUs;
- // fall through.
case STATE_RUNNING:
{
mAtEOS = false;
@@ -502,7 +486,6 @@
mDurationUs = -1;
mPositionUs = -1;
- mStartupSeekTimeUs = -1;
mLooping = false;
return OK;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
index d009fd7..d5b4ba1 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDriver.h
@@ -122,8 +122,6 @@
bool mLooping;
bool mAutoLoop;
- int64_t mStartupSeekTimeUs;
-
status_t prepare_l();
void notifyListener_l(int msg, int ext1 = 0, int ext2 = 0, const Parcel *in = NULL);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
index 0e6a6e6..cbb9d95 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.cpp
@@ -647,7 +647,10 @@
case kWhatAudioTearDown:
{
- onAudioTearDown(kDueToError);
+ int32_t reason;
+ CHECK(msg->findInt32("reason", &reason));
+
+ onAudioTearDown((AudioTearDownReason)reason);
break;
}
@@ -741,7 +744,7 @@
case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
{
ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
- me->notifyAudioTearDown();
+ me->notifyAudioTearDown(kDueToError);
break;
}
}
@@ -946,7 +949,7 @@
ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
// This can only happen when AudioSink was opened with doNotReconnect flag set to
// true, in which case the NuPlayer will handle the reconnect.
- notifyAudioTearDown();
+ notifyAudioTearDown(kDueToError);
}
break;
}
@@ -1299,8 +1302,10 @@
notify->post(delayUs);
}
-void NuPlayer::Renderer::notifyAudioTearDown() {
- (new AMessage(kWhatAudioTearDown, this))->post();
+void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
+ sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
+ msg->setInt32("reason", reason);
+ msg->post();
}
void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
@@ -1630,7 +1635,7 @@
status_t err = mAudioSink->start();
if (err != OK) {
ALOGE("cannot start AudioSink err %d", err);
- notifyAudioTearDown();
+ notifyAudioTearDown(kDueToError);
}
}
@@ -1823,6 +1828,9 @@
onDisableOffloadAudio();
mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
ALOGV("openAudioSink: offload failed");
+ if (offloadOnly) {
+ notifyAudioTearDown(kForceNonOffload);
+ }
} else {
mUseAudioCallback = true; // offload mode transfers data through callback
++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message.
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
index c3ce511..004e21c 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerRenderer.h
@@ -92,8 +92,9 @@
};
enum AudioTearDownReason {
- kDueToError = 0,
+ kDueToError = 0, // Could restart with either offload or non-offload.
kDueToTimeout,
+ kForceNonOffload, // Restart only with non-offload.
};
protected:
@@ -262,7 +263,7 @@
void notifyPosition();
void notifyVideoLateBy(int64_t lateByUs);
void notifyVideoRenderingStart();
- void notifyAudioTearDown();
+ void notifyAudioTearDown(AudioTearDownReason reason);
void flushQueue(List<QueueEntry> *queue);
bool dropBufferIfStale(bool audio, const sp<AMessage> &msg);
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
index f53afbd..ee70306 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerStreamListener.cpp
@@ -144,8 +144,17 @@
copy = size;
}
+ if (entry->mIndex >= mBuffers.size()) {
+ return ERROR_MALFORMED;
+ }
+
+ sp<IMemory> mem = mBuffers.editItemAt(entry->mIndex);
+ if (mem == NULL || mem->size() < copy || mem->size() - copy < entry->mOffset) {
+ return ERROR_MALFORMED;
+ }
+
memcpy(data,
- (const uint8_t *)mBuffers.editItemAt(entry->mIndex)->pointer()
+ (const uint8_t *)mem->pointer()
+ entry->mOffset,
copy);
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
index ec33478..ba40876 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.cpp
@@ -32,6 +32,12 @@
const int64_t kNearEOSTimeoutUs = 2000000ll; // 2 secs
+// Buffer Underflow/Prepare/StartServer/Overflow Marks
+const int64_t NuPlayer::RTSPSource::kUnderflowMarkUs = 1000000ll;
+const int64_t NuPlayer::RTSPSource::kPrepareMarkUs = 3000000ll;
+const int64_t NuPlayer::RTSPSource::kStartServerMarkUs = 5000000ll;
+const int64_t NuPlayer::RTSPSource::kOverflowMarkUs = 10000000ll;
+
NuPlayer::RTSPSource::RTSPSource(
const sp<AMessage> ¬ify,
const sp<IMediaHTTPService> &httpService,
@@ -51,6 +57,7 @@
mFinalResult(OK),
mDisconnectReplyID(0),
mBuffering(false),
+ mInPreparationPhase(true),
mSeekGeneration(0),
mEOSTimeoutAudio(0),
mEOSTimeoutVideo(0) {
@@ -127,29 +134,6 @@
msg->postAndAwaitResponse(&dummy);
}
-void NuPlayer::RTSPSource::pause() {
- int64_t mediaDurationUs = 0;
- getDuration(&mediaDurationUs);
- for (size_t index = 0; index < mTracks.size(); index++) {
- TrackInfo *info = &mTracks.editItemAt(index);
- sp<AnotherPacketSource> source = info->mSource;
-
- // Check if EOS or ERROR is received
- if (source != NULL && source->isFinished(mediaDurationUs)) {
- return;
- }
- }
- if (mHandler != NULL) {
- mHandler->pause();
- }
-}
-
-void NuPlayer::RTSPSource::resume() {
- if (mHandler != NULL) {
- mHandler->resume();
- }
-}
-
status_t NuPlayer::RTSPSource::feedMoreTSData() {
Mutex::Autolock _l(mBufferingLock);
return mFinalResult;
@@ -324,6 +308,73 @@
mHandler->seek(seekTimeUs);
}
+void NuPlayer::RTSPSource::schedulePollBuffering() {
+ sp<AMessage> msg = new AMessage(kWhatPollBuffering, this);
+ msg->post(1000000ll); // 1 second intervals
+}
+
+void NuPlayer::RTSPSource::checkBuffering(
+ bool *prepared, bool *underflow, bool *overflow, bool *startServer) {
+ size_t numTracks = mTracks.size();
+ size_t preparedCount, underflowCount, overflowCount, startCount;
+ preparedCount = underflowCount = overflowCount = startCount = 0;
+ for (size_t i = 0; i < numTracks; ++i) {
+ status_t finalResult;
+ TrackInfo *info = &mTracks.editItemAt(i);
+ sp<AnotherPacketSource> src = info->mSource;
+ int64_t bufferedDurationUs = src->getBufferedDurationUs(&finalResult);
+
+ // isFinished when duration is 0 checks for EOS result only
+ if (bufferedDurationUs > kPrepareMarkUs || src->isFinished(/* duration */ 0)) {
+ ++preparedCount;
+ }
+
+ if (src->isFinished(/* duration */ 0)) {
+ ++overflowCount;
+ } else {
+ if (bufferedDurationUs < kUnderflowMarkUs) {
+ ++underflowCount;
+ }
+ if (bufferedDurationUs > kOverflowMarkUs) {
+ ++overflowCount;
+ }
+ if (bufferedDurationUs < kStartServerMarkUs) {
+ ++startCount;
+ }
+ }
+ }
+
+ *prepared = (preparedCount == numTracks);
+ *underflow = (underflowCount > 0);
+ *overflow = (overflowCount == numTracks);
+ *startServer = (startCount > 0);
+}
+
+void NuPlayer::RTSPSource::onPollBuffering() {
+ bool prepared, underflow, overflow, startServer;
+ checkBuffering(&prepared, &underflow, &overflow, &startServer);
+
+ if (prepared && mInPreparationPhase) {
+ mInPreparationPhase = false;
+ notifyPrepared();
+ }
+
+ if (!mInPreparationPhase && underflow) {
+ startBufferingIfNecessary();
+ }
+
+ if (overflow && mHandler != NULL) {
+ stopBufferingIfNecessary();
+ mHandler->pause();
+ }
+
+ if (startServer && mHandler != NULL) {
+ mHandler->resume();
+ }
+
+ schedulePollBuffering();
+}
+
void NuPlayer::RTSPSource::onMessageReceived(const sp<AMessage> &msg) {
if (msg->what() == kWhatDisconnect) {
sp<AReplyToken> replyID;
@@ -348,6 +399,9 @@
performSeek(seekTimeUs);
return;
+ } else if (msg->what() == kWhatPollBuffering) {
+ onPollBuffering();
+ return;
}
CHECK_EQ(msg->what(), (int)kWhatNotify);
@@ -372,7 +426,7 @@
}
notifyFlagsChanged(flags);
- notifyPrepared();
+ schedulePollBuffering();
break;
}
diff --git a/media/libmediaplayerservice/nuplayer/RTSPSource.h b/media/libmediaplayerservice/nuplayer/RTSPSource.h
index 6438a1e..a6a7644 100644
--- a/media/libmediaplayerservice/nuplayer/RTSPSource.h
+++ b/media/libmediaplayerservice/nuplayer/RTSPSource.h
@@ -43,8 +43,6 @@
virtual void prepareAsync();
virtual void start();
virtual void stop();
- virtual void pause();
- virtual void resume();
virtual status_t feedMoreTSData();
@@ -65,6 +63,7 @@
kWhatNotify = 'noti',
kWhatDisconnect = 'disc',
kWhatPerformSeek = 'seek',
+ kWhatPollBuffering = 'poll',
};
enum State {
@@ -79,6 +78,12 @@
kFlagIncognito = 1,
};
+ // Buffer Prepare/Underflow/Overflow/Resume Marks
+ static const int64_t kPrepareMarkUs;
+ static const int64_t kUnderflowMarkUs;
+ static const int64_t kOverflowMarkUs;
+ static const int64_t kStartServerMarkUs;
+
struct TrackInfo {
sp<AnotherPacketSource> mSource;
@@ -100,6 +105,7 @@
sp<AReplyToken> mDisconnectReplyID;
Mutex mBufferingLock;
bool mBuffering;
+ bool mInPreparationPhase;
sp<ALooper> mLooper;
sp<MyHandler> mHandler;
@@ -126,6 +132,9 @@
void finishDisconnectIfPossible();
void performSeek(int64_t seekTimeUs);
+ void schedulePollBuffering();
+ void checkBuffering(bool *prepared, bool *underflow, bool *overflow, bool *startServer);
+ void onPollBuffering();
bool haveSufficientDataOnAllTracks();
diff --git a/media/libnbaio/Android.mk b/media/libnbaio/Android.mk
index 16c5040..e2f416b 100644
--- a/media/libnbaio/Android.mk
+++ b/media/libnbaio/Android.mk
@@ -20,9 +20,6 @@
#LOCAL_C_INCLUDES += path/to/libsndfile/src
#LOCAL_STATIC_LIBRARIES += libsndfile
-# uncomment for systrace
-# LOCAL_CFLAGS += -DATRACE_TAG=ATRACE_TAG_AUDIO
-
LOCAL_MODULE := libnbaio
LOCAL_SHARED_LIBRARIES := \
@@ -34,4 +31,6 @@
LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/media/libnbaio/PipeReader.cpp b/media/libnbaio/PipeReader.cpp
index b096903..a879647 100644
--- a/media/libnbaio/PipeReader.cpp
+++ b/media/libnbaio/PipeReader.cpp
@@ -36,7 +36,12 @@
PipeReader::~PipeReader()
{
- int32_t readers = android_atomic_dec(&mPipe.mReaders);
+#if !LOG_NDEBUG
+ int32_t readers =
+#else
+ (void)
+#endif
+ android_atomic_dec(&mPipe.mReaders);
ALOG_ASSERT(readers > 0);
}
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index ee573f0..d6a9f53 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -52,6 +52,8 @@
#include <OMX_AsString.h>
#include "include/avc_utils.h"
+#include "include/DataConverter.h"
+#include "omx/OMXUtils.h"
namespace android {
@@ -101,15 +103,6 @@
}
}
-template<class T>
-static void InitOMXParams(T *params) {
- params->nSize = sizeof(T);
- params->nVersion.s.nVersionMajor = 1;
- params->nVersion.s.nVersionMinor = 0;
- params->nVersion.s.nRevision = 0;
- params->nVersion.s.nStep = 0;
-}
-
struct MessageList : public RefBase {
MessageList() {
}
@@ -122,6 +115,13 @@
DISALLOW_EVIL_CONSTRUCTORS(MessageList);
};
+static sp<DataConverter> getCopyConverter() {
+ static pthread_once_t once = PTHREAD_ONCE_INIT; // const-inited
+ static sp<DataConverter> sCopyConverter; // zero-inited
+ pthread_once(&once, [](){ sCopyConverter = new DataConverter(); });
+ return sCopyConverter;
+}
+
struct CodecObserver : public BnOMXObserver {
CodecObserver() {}
@@ -496,8 +496,8 @@
ACodec::ACodec()
: mQuirks(0),
mNode(0),
+ mUsingNativeWindow(false),
mNativeWindowUsageBits(0),
- mSentFormat(false),
mIsVideo(false),
mIsEncoder(false),
mFatalError(false),
@@ -513,13 +513,15 @@
mOutputMetadataType(kMetadataBufferTypeInvalid),
mLegacyAdaptiveExperiment(false),
mMetadataBuffersToSubmit(0),
+ mNumUndequeuedBuffers(0),
mRepeatFrameDelayUs(-1ll),
mMaxPtsGapUs(-1ll),
mMaxFps(-1),
mTimePerFrameUs(-1ll),
mTimePerCaptureUs(-1ll),
mCreateInputBuffersSuspended(false),
- mTunneled(false) {
+ mTunneled(false),
+ mDescribeColorAspectsIndex((OMX_INDEXTYPE)0) {
mUninitializedState = new UninitializedState(this);
mLoadedState = new LoadedState(this);
mLoadedToIdleState = new LoadedToIdleState(this);
@@ -788,7 +790,7 @@
if (err == OK) {
MetadataBufferType type =
portIndex == kPortIndexOutput ? mOutputMetadataType : mInputMetadataType;
- int32_t bufSize = def.nBufferSize;
+ size_t bufSize = def.nBufferSize;
if (type == kMetadataBufferTypeGrallocSource) {
bufSize = sizeof(VideoGrallocMetadata);
} else if (type == kMetadataBufferTypeANWBuffer) {
@@ -799,23 +801,47 @@
// metadata size as we prefer to generate native source metadata, but component
// may require gralloc source. For camera source, allocate at least enough
// size for native metadata buffers.
- int32_t allottedSize = bufSize;
+ size_t allottedSize = bufSize;
if (portIndex == kPortIndexInput && type >= kMetadataBufferTypeGrallocSource) {
bufSize = max(sizeof(VideoGrallocMetadata), sizeof(VideoNativeMetadata));
} else if (portIndex == kPortIndexInput && type == kMetadataBufferTypeCameraSource) {
- bufSize = max(bufSize, (int32_t)sizeof(VideoNativeMetadata));
+ bufSize = max(bufSize, sizeof(VideoNativeMetadata));
}
- ALOGV("[%s] Allocating %u buffers of size %d/%d (from %u using %s) on %s port",
+ size_t conversionBufferSize = 0;
+
+ sp<DataConverter> converter = mConverter[portIndex];
+ if (converter != NULL) {
+ // here we assume sane conversions of max 4:1, so result fits in int32
+ if (portIndex == kPortIndexInput) {
+ conversionBufferSize = converter->sourceSize(bufSize);
+ } else {
+ conversionBufferSize = converter->targetSize(bufSize);
+ }
+ }
+
+ size_t alignment = MemoryDealer::getAllocationAlignment();
+
+ ALOGV("[%s] Allocating %u buffers of size %zu/%zu (from %u using %s) on %s port",
mComponentName.c_str(),
def.nBufferCountActual, bufSize, allottedSize, def.nBufferSize, asString(type),
portIndex == kPortIndexInput ? "input" : "output");
- if (bufSize == 0 || def.nBufferCountActual > SIZE_MAX / bufSize) {
+ // verify buffer sizes to avoid overflow in align()
+ if (bufSize == 0 || max(bufSize, conversionBufferSize) > kMaxCodecBufferSize) {
ALOGE("b/22885421");
return NO_MEMORY;
}
- size_t totalSize = def.nBufferCountActual * bufSize;
+
+ // don't modify bufSize as OMX may not expect it to increase after negotiation
+ size_t alignedSize = align(bufSize, alignment);
+ size_t alignedConvSize = align(conversionBufferSize, alignment);
+ if (def.nBufferCountActual > SIZE_MAX / (alignedSize + alignedConvSize)) {
+ ALOGE("b/22885421");
+ return NO_MEMORY;
+ }
+
+ size_t totalSize = def.nBufferCountActual * (alignedSize + alignedConvSize);
mDealer[portIndex] = new MemoryDealer(totalSize, "ACodec");
for (OMX_U32 i = 0; i < def.nBufferCountActual && err == OK; ++i) {
@@ -854,6 +880,7 @@
// because Widevine source only receives these base addresses.
info.mData = new ABuffer(ptr != NULL ? ptr : (void *)native_handle, bufSize);
info.mNativeHandle = NativeHandle::create(native_handle, true /* ownsHandle */);
+ info.mCodecData = info.mData;
} else if (mQuirks & requiresAllocateBufferBit) {
err = mOMX->allocateBufferWithBackup(
mNode, portIndex, mem, &info.mBufferID, allottedSize);
@@ -862,11 +889,27 @@
}
if (mem != NULL) {
- info.mData = new ABuffer(mem->pointer(), bufSize);
+ info.mCodecData = new ABuffer(mem->pointer(), bufSize);
+ info.mCodecRef = mem;
+
if (type == kMetadataBufferTypeANWBuffer) {
((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
}
- info.mMemRef = mem;
+
+ // if we require conversion, allocate conversion buffer for client use;
+ // otherwise, reuse codec buffer
+ if (mConverter[portIndex] != NULL) {
+ CHECK_GT(conversionBufferSize, (size_t)0);
+ mem = mDealer[portIndex]->allocate(conversionBufferSize);
+ if (mem == NULL|| mem->pointer() == NULL) {
+ return NO_MEMORY;
+ }
+ info.mData = new ABuffer(mem->pointer(), conversionBufferSize);
+ info.mMemRef = mem;
+ } else {
+ info.mData = info.mCodecData;
+ info.mMemRef = info.mCodecRef;
+ }
}
mBuffers[portIndex].push(info);
@@ -1059,6 +1102,7 @@
info.mIsReadFence = false;
info.mRenderInfo = NULL;
info.mData = new ABuffer(NULL /* data */, bufferSize /* capacity */);
+ info.mCodecData = info.mData;
info.mGraphicBuffer = graphicBuffer;
mBuffers[kPortIndexOutput].push(info);
@@ -1123,7 +1167,7 @@
size_t bufSize = mOutputMetadataType == kMetadataBufferTypeANWBuffer ?
sizeof(struct VideoNativeMetadata) : sizeof(struct VideoGrallocMetadata);
- size_t totalSize = bufferCount * bufSize;
+ size_t totalSize = bufferCount * align(bufSize, MemoryDealer::getAllocationAlignment());
mDealer[kPortIndexOutput] = new MemoryDealer(totalSize, "ACodec");
// Dequeue buffers and send them to OMX
@@ -1143,11 +1187,13 @@
((VideoNativeMetadata *)mem->pointer())->nFenceFd = -1;
}
info.mData = new ABuffer(mem->pointer(), mem->size());
+ info.mMemRef = mem;
+ info.mCodecData = info.mData;
+ info.mCodecRef = mem;
// we use useBuffer for metadata regardless of quirks
err = mOMX->useBuffer(
mNode, kPortIndexOutput, mem, &info.mBufferID, mem->size());
- info.mMemRef = mem;
mBuffers[kPortIndexOutput].push(info);
ALOGV("[%s] allocated meta buffer with ID %u (pointer = %p)",
@@ -1653,8 +1699,9 @@
encoder = false;
}
- sp<AMessage> inputFormat = new AMessage();
- sp<AMessage> outputFormat = mNotify->dup(); // will use this for kWhatOutputFormatChanged
+ sp<AMessage> inputFormat = new AMessage;
+ sp<AMessage> outputFormat = new AMessage;
+ mConfigFormat = msg;
mIsEncoder = encoder;
@@ -1772,6 +1819,7 @@
sp<RefBase> obj;
bool haveNativeWindow = msg->findObject("native-window", &obj)
&& obj != NULL && video && !encoder;
+ mUsingNativeWindow = haveNativeWindow;
mLegacyAdaptiveExperiment = false;
if (video && !encoder) {
inputFormat->setInt32("adaptive-playback", false);
@@ -1939,6 +1987,10 @@
}
}
+ AudioEncoding pcmEncoding = kAudioEncodingPcm16bit;
+ (void)msg->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
+ // invalid encodings will default to PCM-16bit in setupRawAudioFormat.
+
if (video) {
// determine need for software renderer
bool usingSwRenderer = false;
@@ -1948,9 +2000,9 @@
}
if (encoder) {
- err = setupVideoEncoder(mime, msg);
+ err = setupVideoEncoder(mime, msg, outputFormat, inputFormat);
} else {
- err = setupVideoDecoder(mime, msg, haveNativeWindow);
+ err = setupVideoDecoder(mime, msg, haveNativeWindow, usingSwRenderer, outputFormat);
}
if (err != OK) {
@@ -2011,7 +2063,8 @@
// fallback is not supported for protected playback
err = PERMISSION_DENIED;
} else if (err == OK) {
- err = setupVideoDecoder(mime, msg, false);
+ err = setupVideoDecoder(
+ mime, msg, haveNativeWindow, usingSwRenderer, outputFormat);
}
}
}
@@ -2142,7 +2195,7 @@
|| !msg->findInt32("sample-rate", &sampleRate)) {
err = INVALID_OPERATION;
} else {
- err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels);
+ err = setupRawAudioFormat(kPortIndexInput, sampleRate, numChannels, pcmEncoding);
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AC3)) {
int32_t numChannels;
@@ -2204,7 +2257,10 @@
err = setOperatingRate(rateFloat, video);
}
+ // NOTE: both mBaseOutputFormat and mOutputFormat are outputFormat to signal first frame.
mBaseOutputFormat = outputFormat;
+ // trigger a kWhatOutputFormatChanged msg on first buffer
+ mLastOutputFormat.clear();
err = getPortFormat(kPortIndexInput, inputFormat);
if (err == OK) {
@@ -2214,6 +2270,25 @@
mOutputFormat = outputFormat;
}
}
+
+ // create data converters if needed
+ if (!video && err == OK) {
+ AudioEncoding codecPcmEncoding = kAudioEncodingPcm16bit;
+ if (encoder) {
+ (void)mInputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
+ mConverter[kPortIndexInput] = AudioConverter::Create(pcmEncoding, codecPcmEncoding);
+ if (mConverter[kPortIndexInput] != NULL) {
+ mInputFormat->setInt32("pcm-encoding", pcmEncoding);
+ }
+ } else {
+ (void)mOutputFormat->findInt32("pcm-encoding", (int32_t*)&codecPcmEncoding);
+ mConverter[kPortIndexOutput] = AudioConverter::Create(codecPcmEncoding, pcmEncoding);
+ if (mConverter[kPortIndexOutput] != NULL) {
+ mOutputFormat->setInt32("pcm-encoding", pcmEncoding);
+ }
+ }
+ }
+
return err;
}
@@ -2557,6 +2632,7 @@
: OMX_AUDIO_AACStreamFormatMP4FF;
OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE presentation;
+ InitOMXParams(&presentation);
presentation.nMaxOutputChannels = maxOutputChannelCount;
presentation.nDrcCut = drc.drcCut;
presentation.nDrcBoost = drc.drcBoost;
@@ -2763,7 +2839,7 @@
}
status_t ACodec::setupRawAudioFormat(
- OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels) {
+ OMX_U32 portIndex, int32_t sampleRate, int32_t numChannels, AudioEncoding encoding) {
OMX_PARAM_PORTDEFINITIONTYPE def;
InitOMXParams(&def);
def.nPortIndex = portIndex;
@@ -2796,9 +2872,23 @@
}
pcmParams.nChannels = numChannels;
- pcmParams.eNumData = OMX_NumericalDataSigned;
+ switch (encoding) {
+ case kAudioEncodingPcm8bit:
+ pcmParams.eNumData = OMX_NumericalDataUnsigned;
+ pcmParams.nBitPerSample = 8;
+ break;
+ case kAudioEncodingPcmFloat:
+ pcmParams.eNumData = OMX_NumericalDataFloat;
+ pcmParams.nBitPerSample = 32;
+ break;
+ case kAudioEncodingPcm16bit:
+ pcmParams.eNumData = OMX_NumericalDataSigned;
+ pcmParams.nBitPerSample = 16;
+ break;
+ default:
+ return BAD_VALUE;
+ }
pcmParams.bInterleaved = OMX_TRUE;
- pcmParams.nBitPerSample = 16;
pcmParams.nSamplingRate = sampleRate;
pcmParams.ePCMMode = OMX_AUDIO_PCMModeLinear;
@@ -2806,8 +2896,17 @@
return OMX_ErrorNone;
}
- return mOMX->setParameter(
+ err = mOMX->setParameter(
mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+ // if we could not set up raw format to non-16-bit, try with 16-bit
+ // NOTE: we will also verify this via readback, in case codec ignores these fields
+ if (err != OK && encoding != kAudioEncodingPcm16bit) {
+ pcmParams.eNumData = OMX_NumericalDataSigned;
+ pcmParams.nBitPerSample = 16;
+ err = mOMX->setParameter(
+ mNode, OMX_IndexParamAudioPcm, &pcmParams, sizeof(pcmParams));
+ }
+ return err;
}
status_t ACodec::configureTunneledVideoPlayback(
@@ -3018,7 +3117,8 @@
}
status_t ACodec::setupVideoDecoder(
- const char *mime, const sp<AMessage> &msg, bool haveNativeWindow) {
+ const char *mime, const sp<AMessage> &msg, bool haveNativeWindow,
+ bool usingSwRenderer, sp<AMessage> &outputFormat) {
int32_t width, height;
if (!msg->findInt32("width", &width)
|| !msg->findInt32("height", &height)) {
@@ -3081,10 +3181,262 @@
return err;
}
+ err = setColorAspectsForVideoDecoder(
+ width, height, haveNativeWindow | usingSwRenderer, msg, outputFormat);
+ if (err == ERROR_UNSUPPORTED) { // support is optional
+ err = OK;
+ }
+ return err;
+}
+
+status_t ACodec::initDescribeColorAspectsIndex() {
+ status_t err = mOMX->getExtensionIndex(
+ mNode, "OMX.google.android.index.describeColorAspects", &mDescribeColorAspectsIndex);
+ if (err != OK) {
+ mDescribeColorAspectsIndex = (OMX_INDEXTYPE)0;
+ }
+ return err;
+}
+
+status_t ACodec::setCodecColorAspects(DescribeColorAspectsParams ¶ms, bool verify) {
+ status_t err = ERROR_UNSUPPORTED;
+ if (mDescribeColorAspectsIndex) {
+ err = mOMX->setConfig(mNode, mDescribeColorAspectsIndex, ¶ms, sizeof(params));
+ }
+ ALOGV("[%s] setting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+ mComponentName.c_str(),
+ params.sAspects.mRange, asString(params.sAspects.mRange),
+ params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+ params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+ params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+ err, asString(err));
+
+ if (verify && err == OK) {
+ err = getCodecColorAspects(params);
+ }
+
+ ALOGW_IF(err == ERROR_UNSUPPORTED && mDescribeColorAspectsIndex,
+ "[%s] getting color aspects failed even though codec advertises support",
+ mComponentName.c_str());
+ return err;
+}
+
+status_t ACodec::setColorAspectsForVideoDecoder(
+ int32_t width, int32_t height, bool usingNativeWindow,
+ const sp<AMessage> &configFormat, sp<AMessage> &outputFormat) {
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexOutput;
+
+ getColorAspectsFromFormat(configFormat, params.sAspects);
+ if (usingNativeWindow) {
+ setDefaultCodecColorAspectsIfNeeded(params.sAspects, width, height);
+ // The default aspects will be set back to the output format during the
+ // getFormat phase of configure(). Set non-Unspecified values back into the
+ // format, in case component does not support this enumeration.
+ setColorAspectsIntoFormat(params.sAspects, outputFormat);
+ }
+
+ (void)initDescribeColorAspectsIndex();
+
+ // communicate color aspects to codec
+ return setCodecColorAspects(params);
+}
+
+status_t ACodec::getCodecColorAspects(DescribeColorAspectsParams ¶ms) {
+ status_t err = ERROR_UNSUPPORTED;
+ if (mDescribeColorAspectsIndex) {
+ err = mOMX->getConfig(mNode, mDescribeColorAspectsIndex, ¶ms, sizeof(params));
+ }
+ ALOGV("[%s] got color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+ mComponentName.c_str(),
+ params.sAspects.mRange, asString(params.sAspects.mRange),
+ params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+ params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+ params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+ err, asString(err));
+ if (params.bRequestingDataSpace) {
+ ALOGV("for dataspace %#x", params.nDataSpace);
+ }
+ if (err == ERROR_UNSUPPORTED && mDescribeColorAspectsIndex
+ && !params.bRequestingDataSpace && !params.bDataSpaceChanged) {
+ ALOGW("[%s] getting color aspects failed even though codec advertises support",
+ mComponentName.c_str());
+ }
+ return err;
+}
+
+status_t ACodec::getInputColorAspectsForVideoEncoder(sp<AMessage> &format) {
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexInput;
+ status_t err = getCodecColorAspects(params);
+ if (err == OK) {
+ // we only set encoder input aspects if codec supports them
+ setColorAspectsIntoFormat(params.sAspects, format, true /* force */);
+ }
+ return err;
+}
+
+status_t ACodec::getDataSpace(
+ DescribeColorAspectsParams ¶ms, android_dataspace *dataSpace /* nonnull */,
+ bool tryCodec) {
+ status_t err = OK;
+ if (tryCodec) {
+ // request dataspace guidance from codec.
+ params.bRequestingDataSpace = OMX_TRUE;
+ err = getCodecColorAspects(params);
+ params.bRequestingDataSpace = OMX_FALSE;
+ if (err == OK && params.nDataSpace != HAL_DATASPACE_UNKNOWN) {
+ *dataSpace = (android_dataspace)params.nDataSpace;
+ return err;
+ } else if (err == ERROR_UNSUPPORTED) {
+ // ignore not-implemented error for dataspace requests
+ err = OK;
+ }
+ }
+
+ // this returns legacy versions if available
+ *dataSpace = getDataSpaceForColorAspects(params.sAspects, true /* mayexpand */);
+ ALOGV("[%s] using color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+ "and dataspace %#x",
+ mComponentName.c_str(),
+ params.sAspects.mRange, asString(params.sAspects.mRange),
+ params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+ params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+ params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+ *dataSpace);
+ return err;
+}
+
+
+status_t ACodec::getColorAspectsAndDataSpaceForVideoDecoder(
+ int32_t width, int32_t height, const sp<AMessage> &configFormat, sp<AMessage> &outputFormat,
+ android_dataspace *dataSpace) {
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexOutput;
+
+ // reset default format and get resulting format
+ getColorAspectsFromFormat(configFormat, params.sAspects);
+ if (dataSpace != NULL) {
+ setDefaultCodecColorAspectsIfNeeded(params.sAspects, width, height);
+ }
+ status_t err = setCodecColorAspects(params, true /* readBack */);
+
+ // we always set specified aspects for decoders
+ setColorAspectsIntoFormat(params.sAspects, outputFormat);
+
+ if (dataSpace != NULL) {
+ status_t res = getDataSpace(params, dataSpace, err == OK /* tryCodec */);
+ if (err == OK) {
+ err = res;
+ }
+ }
+
+ return err;
+}
+
+// initial video encoder setup for bytebuffer mode
+status_t ACodec::setColorAspectsForVideoEncoder(
+ const sp<AMessage> &configFormat, sp<AMessage> &outputFormat, sp<AMessage> &inputFormat) {
+ // copy config to output format as this is not exposed via getFormat
+ copyColorConfig(configFormat, outputFormat);
+
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexInput;
+ getColorAspectsFromFormat(configFormat, params.sAspects);
+
+ (void)initDescribeColorAspectsIndex();
+
+ int32_t usingRecorder;
+ if (configFormat->findInt32("android._using-recorder", &usingRecorder) && usingRecorder) {
+ android_dataspace dataSpace = HAL_DATASPACE_BT709;
+ int32_t width, height;
+ if (configFormat->findInt32("width", &width)
+ && configFormat->findInt32("height", &height)) {
+ setDefaultCodecColorAspectsIfNeeded(params.sAspects, width, height);
+ status_t err = getDataSpace(
+ params, &dataSpace, mDescribeColorAspectsIndex /* tryCodec */);
+ if (err != OK) {
+ return err;
+ }
+ setColorAspectsIntoFormat(params.sAspects, outputFormat);
+ }
+ inputFormat->setInt32("android._dataspace", (int32_t)dataSpace);
+ }
+
+ // communicate color aspects to codec, but do not allow change of the platform aspects
+ ColorAspects origAspects = params.sAspects;
+ for (int triesLeft = 2; --triesLeft >= 0; ) {
+ status_t err = setCodecColorAspects(params, true /* readBack */);
+ if (err != OK
+ || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+ params.sAspects, origAspects, true /* usePlatformAspects */)) {
+ return err;
+ }
+ ALOGW_IF(triesLeft == 0, "[%s] Codec repeatedly changed requested ColorAspects.",
+ mComponentName.c_str());
+ }
return OK;
}
-status_t ACodec::setupVideoEncoder(const char *mime, const sp<AMessage> &msg) {
+// subsequent initial video encoder setup for surface mode
+status_t ACodec::setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(
+ android_dataspace *dataSpace /* nonnull */) {
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexInput;
+ ColorAspects &aspects = params.sAspects;
+
+ // reset default format and store resulting format into both input and output formats
+ getColorAspectsFromFormat(mConfigFormat, aspects);
+ int32_t width, height;
+ if (mInputFormat->findInt32("width", &width) && mInputFormat->findInt32("height", &height)) {
+ setDefaultCodecColorAspectsIfNeeded(aspects, width, height);
+ }
+ setColorAspectsIntoFormat(aspects, mInputFormat);
+ setColorAspectsIntoFormat(aspects, mOutputFormat);
+
+ // communicate color aspects to codec, but do not allow any change
+ ColorAspects origAspects = aspects;
+ status_t err = OK;
+ for (int triesLeft = 2; mDescribeColorAspectsIndex && --triesLeft >= 0; ) {
+ status_t err = setCodecColorAspects(params, true /* readBack */);
+ if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(aspects, origAspects)) {
+ break;
+ }
+ ALOGW_IF(triesLeft == 0, "[%s] Codec repeatedly changed requested ColorAspects.",
+ mComponentName.c_str());
+ }
+
+ *dataSpace = HAL_DATASPACE_BT709;
+ aspects = origAspects; // restore desired color aspects
+ status_t res = getDataSpace(
+ params, dataSpace, err == OK && mDescribeColorAspectsIndex /* tryCodec */);
+ if (err == OK) {
+ err = res;
+ }
+ mInputFormat->setInt32("android._dataspace", (int32_t)*dataSpace);
+ mInputFormat->setBuffer(
+ "android._color-aspects", ABuffer::CreateAsCopy(&aspects, sizeof(aspects)));
+
+ // update input format with codec supported color aspects (basically set unsupported
+ // aspects to Unspecified)
+ if (err == OK) {
+ (void)getInputColorAspectsForVideoEncoder(mInputFormat);
+ }
+
+ ALOGV("set default color aspects, updated input format to %s, output format to %s",
+ mInputFormat->debugString(4).c_str(), mOutputFormat->debugString(4).c_str());
+
+ return err;
+}
+
+status_t ACodec::setupVideoEncoder(
+ const char *mime, const sp<AMessage> &msg,
+ sp<AMessage> &outputFormat, sp<AMessage> &inputFormat) {
int32_t tmp;
if (!msg->findInt32("color-format", &tmp)) {
return INVALID_OPERATION;
@@ -3255,6 +3607,14 @@
break;
}
+ // Set up color aspects on input, but propagate them to the output format, as they will
+ // not be read back from encoder.
+ err = setColorAspectsForVideoEncoder(msg, outputFormat, inputFormat);
+ if (err == ERROR_UNSUPPORTED) {
+ ALOGI("[%s] cannot encode color aspects. Ignoring.", mComponentName.c_str());
+ err = OK;
+ }
+
if (err == OK) {
ALOGI("setupVideoEncoder succeeded");
}
@@ -3985,11 +4345,11 @@
}
// static
-bool ACodec::describeDefaultColorFormat(DescribeColorFormatParams ¶ms) {
- MediaImage &image = params.sMediaImage;
+bool ACodec::describeDefaultColorFormat(DescribeColorFormat2Params ¶ms) {
+ MediaImage2 &image = params.sMediaImage;
memset(&image, 0, sizeof(image));
- image.mType = MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN;
+ image.mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
image.mNumPlanes = 0;
const OMX_COLOR_FORMATTYPE fmt = params.eColorFormat;
@@ -4024,9 +4384,10 @@
}
// set-up YUV format
- image.mType = MediaImage::MEDIA_IMAGE_TYPE_YUV;
+ image.mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
image.mNumPlanes = 3;
image.mBitDepth = 8;
+ image.mBitDepthAllocated = 8;
image.mPlane[image.Y].mOffset = 0;
image.mPlane[image.Y].mColInc = 1;
image.mPlane[image.Y].mRowInc = params.nStride;
@@ -4099,26 +4460,34 @@
// static
bool ACodec::describeColorFormat(
const sp<IOMX> &omx, IOMX::node_id node,
- DescribeColorFormatParams &describeParams)
+ DescribeColorFormat2Params &describeParams)
{
OMX_INDEXTYPE describeColorFormatIndex;
if (omx->getExtensionIndex(
node, "OMX.google.android.index.describeColorFormat",
- &describeColorFormatIndex) != OK ||
- omx->getParameter(
- node, describeColorFormatIndex,
- &describeParams, sizeof(describeParams)) != OK) {
- return describeDefaultColorFormat(describeParams);
+ &describeColorFormatIndex) == OK) {
+ DescribeColorFormatParams describeParamsV1(describeParams);
+ if (omx->getParameter(
+ node, describeColorFormatIndex,
+ &describeParamsV1, sizeof(describeParamsV1)) == OK) {
+ describeParams.initFromV1(describeParamsV1);
+ return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+ }
+ } else if (omx->getExtensionIndex(
+ node, "OMX.google.android.index.describeColorFormat2", &describeColorFormatIndex) == OK
+ && omx->getParameter(
+ node, describeColorFormatIndex, &describeParams, sizeof(describeParams)) == OK) {
+ return describeParams.sMediaImage.mType != MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
}
- return describeParams.sMediaImage.mType !=
- MediaImage::MEDIA_IMAGE_TYPE_UNKNOWN;
+
+ return describeDefaultColorFormat(describeParams);
}
// static
bool ACodec::isFlexibleColorFormat(
const sp<IOMX> &omx, IOMX::node_id node,
uint32_t colorFormat, bool usingNativeBuffers, OMX_U32 *flexibleEquivalent) {
- DescribeColorFormatParams describeParams;
+ DescribeColorFormat2Params describeParams;
InitOMXParams(&describeParams);
describeParams.eColorFormat = (OMX_COLOR_FORMATTYPE)colorFormat;
// reasonable dummy values
@@ -4134,11 +4503,11 @@
return false;
}
- const MediaImage &img = describeParams.sMediaImage;
- if (img.mType == MediaImage::MEDIA_IMAGE_TYPE_YUV) {
- if (img.mNumPlanes != 3 ||
- img.mPlane[img.Y].mHorizSubsampling != 1 ||
- img.mPlane[img.Y].mVertSubsampling != 1) {
+ const MediaImage2 &img = describeParams.sMediaImage;
+ if (img.mType == MediaImage2::MEDIA_IMAGE_TYPE_YUV) {
+ if (img.mNumPlanes != 3
+ || img.mPlane[img.Y].mHorizSubsampling != 1
+ || img.mPlane[img.Y].mVertSubsampling != 1) {
return false;
}
@@ -4188,7 +4557,7 @@
notify->setInt32("color-format", videoDef->eColorFormat);
if (mNativeWindow == NULL) {
- DescribeColorFormatParams describeParams;
+ DescribeColorFormat2Params describeParams;
InitOMXParams(&describeParams);
describeParams.eColorFormat = videoDef->eColorFormat;
describeParams.nFrameWidth = videoDef->nFrameWidth;
@@ -4204,54 +4573,68 @@
&describeParams.sMediaImage,
sizeof(describeParams.sMediaImage)));
- MediaImage *img = &describeParams.sMediaImage;
- ALOGV("[%s] MediaImage { F(%ux%u) @%u+%u+%u @%u+%u+%u @%u+%u+%u }",
- mComponentName.c_str(), img->mWidth, img->mHeight,
- img->mPlane[0].mOffset, img->mPlane[0].mColInc, img->mPlane[0].mRowInc,
- img->mPlane[1].mOffset, img->mPlane[1].mColInc, img->mPlane[1].mRowInc,
- img->mPlane[2].mOffset, img->mPlane[2].mColInc, img->mPlane[2].mRowInc);
+ MediaImage2 &img = describeParams.sMediaImage;
+ MediaImage2::PlaneInfo *plane = img.mPlane;
+ ALOGV("[%s] MediaImage { F(%ux%u) @%u+%d+%d @%u+%d+%d @%u+%d+%d }",
+ mComponentName.c_str(), img.mWidth, img.mHeight,
+ plane[0].mOffset, plane[0].mColInc, plane[0].mRowInc,
+ plane[1].mOffset, plane[1].mColInc, plane[1].mRowInc,
+ plane[2].mOffset, plane[2].mColInc, plane[2].mRowInc);
}
}
- if (portIndex != kPortIndexOutput) {
- // TODO: also get input crop
- break;
+ int32_t width = (int32_t)videoDef->nFrameWidth;
+ int32_t height = (int32_t)videoDef->nFrameHeight;
+
+ if (portIndex == kPortIndexOutput) {
+ OMX_CONFIG_RECTTYPE rect;
+ InitOMXParams(&rect);
+ rect.nPortIndex = portIndex;
+
+ if (mOMX->getConfig(
+ mNode,
+ (portIndex == kPortIndexOutput ?
+ OMX_IndexConfigCommonOutputCrop :
+ OMX_IndexConfigCommonInputCrop),
+ &rect, sizeof(rect)) != OK) {
+ rect.nLeft = 0;
+ rect.nTop = 0;
+ rect.nWidth = videoDef->nFrameWidth;
+ rect.nHeight = videoDef->nFrameHeight;
+ }
+
+ if (rect.nLeft < 0 ||
+ rect.nTop < 0 ||
+ rect.nLeft + rect.nWidth > videoDef->nFrameWidth ||
+ rect.nTop + rect.nHeight > videoDef->nFrameHeight) {
+ ALOGE("Wrong cropped rect (%d, %d) - (%u, %u) vs. frame (%u, %u)",
+ rect.nLeft, rect.nTop,
+ rect.nLeft + rect.nWidth, rect.nTop + rect.nHeight,
+ videoDef->nFrameWidth, videoDef->nFrameHeight);
+ return BAD_VALUE;
+ }
+
+ notify->setRect(
+ "crop",
+ rect.nLeft,
+ rect.nTop,
+ rect.nLeft + rect.nWidth - 1,
+ rect.nTop + rect.nHeight - 1);
+
+ width = rect.nWidth;
+ height = rect.nHeight;
+
+ android_dataspace dataSpace = HAL_DATASPACE_UNKNOWN;
+ (void)getColorAspectsAndDataSpaceForVideoDecoder(
+ width, height, mConfigFormat, notify,
+ mUsingNativeWindow ? &dataSpace : NULL);
+ if (mUsingNativeWindow) {
+ notify->setInt32("android._dataspace", dataSpace);
+ }
+ } else {
+ (void)getInputColorAspectsForVideoEncoder(notify);
}
- OMX_CONFIG_RECTTYPE rect;
- InitOMXParams(&rect);
- rect.nPortIndex = portIndex;
-
- if (mOMX->getConfig(
- mNode,
- (portIndex == kPortIndexOutput ?
- OMX_IndexConfigCommonOutputCrop :
- OMX_IndexConfigCommonInputCrop),
- &rect, sizeof(rect)) != OK) {
- rect.nLeft = 0;
- rect.nTop = 0;
- rect.nWidth = videoDef->nFrameWidth;
- rect.nHeight = videoDef->nFrameHeight;
- }
-
- if (rect.nLeft < 0 ||
- rect.nTop < 0 ||
- rect.nLeft + rect.nWidth > videoDef->nFrameWidth ||
- rect.nTop + rect.nHeight > videoDef->nFrameHeight) {
- ALOGE("Wrong cropped rect (%d, %d) - (%u, %u) vs. frame (%u, %u)",
- rect.nLeft, rect.nTop,
- rect.nLeft + rect.nWidth, rect.nTop + rect.nHeight,
- videoDef->nFrameWidth, videoDef->nFrameHeight);
- return BAD_VALUE;
- }
-
- notify->setRect(
- "crop",
- rect.nLeft,
- rect.nTop,
- rect.nLeft + rect.nWidth - 1,
- rect.nTop + rect.nHeight - 1);
-
break;
}
@@ -4350,15 +4733,11 @@
if (params.nChannels <= 0
|| (params.nChannels != 1 && !params.bInterleaved)
- || params.nBitPerSample != 16u
- || params.eNumData != OMX_NumericalDataSigned
|| params.ePCMMode != OMX_AUDIO_PCMModeLinear) {
- ALOGE("unsupported PCM port: %u channels%s, %u-bit, %s(%d), %s(%d) mode ",
+ ALOGE("unsupported PCM port: %u channels%s, %u-bit",
params.nChannels,
params.bInterleaved ? " interleaved" : "",
- params.nBitPerSample,
- asString(params.eNumData), params.eNumData,
- asString(params.ePCMMode), params.ePCMMode);
+ params.nBitPerSample);
return FAILED_TRANSACTION;
}
@@ -4366,6 +4745,22 @@
notify->setInt32("channel-count", params.nChannels);
notify->setInt32("sample-rate", params.nSamplingRate);
+ AudioEncoding encoding = kAudioEncodingPcm16bit;
+ if (params.eNumData == OMX_NumericalDataUnsigned
+ && params.nBitPerSample == 8u) {
+ encoding = kAudioEncodingPcm8bit;
+ } else if (params.eNumData == OMX_NumericalDataFloat
+ && params.nBitPerSample == 32u) {
+ encoding = kAudioEncodingPcmFloat;
+ } else if (params.nBitPerSample != 16u
+ || params.eNumData != OMX_NumericalDataSigned) {
+ ALOGE("unsupported PCM port: %s(%d), %s(%d) mode ",
+ asString(params.eNumData), params.eNumData,
+ asString(params.ePCMMode), params.ePCMMode);
+ return FAILED_TRANSACTION;
+ }
+ notify->setInt32("pcm-encoding", encoding);
+
if (mChannelMaskPresent) {
notify->setInt32("channel-mask", mChannelMask);
}
@@ -4547,6 +4942,7 @@
notify->setString("mime", mime);
notify->setInt32("channel-count", params.nChannels);
notify->setInt32("sample-rate", params.nSamplingRate);
+ notify->setInt32("pcm-encoding", kAudioEncodingPcm16bit);
break;
}
@@ -4584,29 +4980,107 @@
return OK;
}
-void ACodec::sendFormatChange(const sp<AMessage> &reply) {
- sp<AMessage> notify = mBaseOutputFormat->dup();
- notify->setInt32("what", kWhatOutputFormatChanged);
+void ACodec::onDataSpaceChanged(android_dataspace dataSpace, const ColorAspects &aspects) {
+ // aspects are normally communicated in ColorAspects
+ int32_t range, standard, transfer;
+ convertCodecColorAspectsToPlatformAspects(aspects, &range, &standard, &transfer);
- if (getPortFormat(kPortIndexOutput, notify) != OK) {
+ // if some aspects are unspecified, use dataspace fields
+ if (range != 0) {
+ range = (dataSpace & HAL_DATASPACE_RANGE_MASK) >> HAL_DATASPACE_RANGE_SHIFT;
+ }
+ if (standard != 0) {
+ standard = (dataSpace & HAL_DATASPACE_STANDARD_MASK) >> HAL_DATASPACE_STANDARD_SHIFT;
+ }
+ if (transfer != 0) {
+ transfer = (dataSpace & HAL_DATASPACE_TRANSFER_MASK) >> HAL_DATASPACE_TRANSFER_SHIFT;
+ }
+
+ mOutputFormat = mOutputFormat->dup(); // trigger an output format changed event
+ if (range != 0) {
+ mOutputFormat->setInt32("color-range", range);
+ }
+ if (standard != 0) {
+ mOutputFormat->setInt32("color-standard", standard);
+ }
+ if (transfer != 0) {
+ mOutputFormat->setInt32("color-transfer", transfer);
+ }
+
+ ALOGD("dataspace changed to %#x (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+ "(R:%d(%s), S:%d(%s), T:%d(%s))",
+ dataSpace,
+ aspects.mRange, asString(aspects.mRange),
+ aspects.mPrimaries, asString(aspects.mPrimaries),
+ aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+ aspects.mTransfer, asString(aspects.mTransfer),
+ range, asString((ColorRange)range),
+ standard, asString((ColorStandard)standard),
+ transfer, asString((ColorTransfer)transfer));
+}
+
+void ACodec::onOutputFormatChanged(sp<const AMessage> expectedFormat) {
+ // store new output format, at the same time mark that this is no longer the first frame
+ mOutputFormat = mBaseOutputFormat->dup();
+
+ if (getPortFormat(kPortIndexOutput, mOutputFormat) != OK) {
ALOGE("[%s] Failed to get port format to send format change", mComponentName.c_str());
return;
}
- AString mime;
- CHECK(notify->findString("mime", &mime));
+ if (expectedFormat != NULL) {
+ sp<const AMessage> changes = expectedFormat->changesFrom(mOutputFormat);
+ sp<const AMessage> to = mOutputFormat->changesFrom(expectedFormat);
+ if (changes->countEntries() != 0 || to->countEntries() != 0) {
+ ALOGW("[%s] BAD CODEC: Output format changed unexpectedly from (diff) %s to (diff) %s",
+ mComponentName.c_str(),
+ changes->debugString(4).c_str(), to->debugString(4).c_str());
+ }
+ }
- int32_t left, top, right, bottom;
- if (mime == MEDIA_MIMETYPE_VIDEO_RAW &&
- mNativeWindow != NULL &&
- notify->findRect("crop", &left, &top, &right, &bottom)) {
- // notify renderer of the crop change
+ if (!mIsVideo && !mIsEncoder) {
+ AudioEncoding pcmEncoding = kAudioEncodingPcm16bit;
+ (void)mConfigFormat->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
+ AudioEncoding codecPcmEncoding = kAudioEncodingPcm16bit;
+ (void)mOutputFormat->findInt32("pcm-encoding", (int32_t*)&pcmEncoding);
+
+ mConverter[kPortIndexOutput] = AudioConverter::Create(codecPcmEncoding, pcmEncoding);
+ if (mConverter[kPortIndexOutput] != NULL) {
+ mOutputFormat->setInt32("pcm-encoding", pcmEncoding);
+ }
+ }
+
+ if (mTunneled) {
+ sendFormatChange();
+ }
+}
+
+void ACodec::addKeyFormatChangesToRenderBufferNotification(sp<AMessage> ¬ify) {
+ AString mime;
+ CHECK(mOutputFormat->findString("mime", &mime));
+
+ if (mime == MEDIA_MIMETYPE_VIDEO_RAW && mNativeWindow != NULL) {
+ // notify renderer of the crop change and dataspace change
// NOTE: native window uses extended right-bottom coordinate
- reply->setRect("crop", left, top, right + 1, bottom + 1);
- } else if (mime == MEDIA_MIMETYPE_AUDIO_RAW &&
- (mEncoderDelay || mEncoderPadding)) {
+ int32_t left, top, right, bottom;
+ if (mOutputFormat->findRect("crop", &left, &top, &right, &bottom)) {
+ notify->setRect("crop", left, top, right + 1, bottom + 1);
+ }
+
+ int32_t dataSpace;
+ if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
+ notify->setInt32("dataspace", dataSpace);
+ }
+ }
+}
+
+void ACodec::sendFormatChange() {
+ AString mime;
+ CHECK(mOutputFormat->findString("mime", &mime));
+
+ if (mime == MEDIA_MIMETYPE_AUDIO_RAW && (mEncoderDelay || mEncoderPadding)) {
int32_t channelCount;
- CHECK(notify->findInt32("channel-count", &channelCount));
+ CHECK(mOutputFormat->findInt32("channel-count", &channelCount));
if (mSkipCutBuffer != NULL) {
size_t prevbufsize = mSkipCutBuffer->size();
if (prevbufsize != 0) {
@@ -4616,9 +5090,13 @@
mSkipCutBuffer = new SkipCutBuffer(mEncoderDelay, mEncoderPadding, channelCount);
}
+ sp<AMessage> notify = mNotify->dup();
+ notify->setInt32("what", kWhatOutputFormatChanged);
+ notify->setMessage("format", mOutputFormat);
notify->post();
- mSentFormat = true;
+ // mLastOutputFormat is not used when tunneled; doing this just to stay consistent
+ mLastOutputFormat = mOutputFormat;
}
void ACodec::signalError(OMX_ERRORTYPE error, status_t internalError) {
@@ -4919,6 +5397,17 @@
bool ACodec::BaseState::onOMXEvent(
OMX_EVENTTYPE event, OMX_U32 data1, OMX_U32 data2) {
+ if (event == OMX_EventDataSpaceChanged) {
+ ColorAspects aspects;
+ aspects.mRange = (ColorAspects::Range)((data2 >> 24) & 0xFF);
+ aspects.mPrimaries = (ColorAspects::Primaries)((data2 >> 16) & 0xFF);
+ aspects.mMatrixCoeffs = (ColorAspects::MatrixCoeffs)((data2 >> 8) & 0xFF);
+ aspects.mTransfer = (ColorAspects::Transfer)(data2 & 0xFF);
+
+ mCodec->onDataSpaceChanged((android_dataspace)data1, aspects);
+ return true;
+ }
+
if (event != OMX_EventError) {
ALOGV("[%s] EVENT(%d, 0x%08x, 0x%08x)",
mCodec->mComponentName.c_str(), event, data1, data2);
@@ -5088,20 +5577,21 @@
flags |= OMX_BUFFERFLAG_EOS;
}
- if (buffer != info->mData) {
+ if (buffer != info->mCodecData) {
ALOGV("[%s] Needs to copy input data for buffer %u. (%p != %p)",
mCodec->mComponentName.c_str(),
bufferID,
- buffer.get(), info->mData.get());
+ buffer.get(), info->mCodecData.get());
- if (buffer->size() > info->mData->capacity()) {
- ALOGE("data size (%zu) is greated than buffer capacity (%zu)",
- buffer->size(), // this is the data received
- info->mData->capacity()); // this is out buffer size
- mCodec->signalError(OMX_ErrorUndefined, FAILED_TRANSACTION);
+ sp<DataConverter> converter = mCodec->mConverter[kPortIndexInput];
+ if (converter == NULL) {
+ converter = getCopyConverter();
+ }
+ status_t err = converter->convert(buffer, info->mCodecData);
+ if (err != OK) {
+ mCodec->signalError(OMX_ErrorUndefined, err);
return;
}
- memcpy(info->mData->data(), buffer->data(), buffer->size());
}
if (flags & OMX_BUFFERFLAG_CODECCONFIG) {
@@ -5144,7 +5634,7 @@
mCodec->mNode,
bufferID,
0,
- buffer->size(),
+ info->mCodecData->size(),
flags,
timeUs,
info->mFenceFd);
@@ -5322,9 +5812,15 @@
sp<AMessage> reply =
new AMessage(kWhatOutputBufferDrained, mCodec);
- if (!mCodec->mSentFormat && rangeLength > 0) {
- mCodec->sendFormatChange(reply);
+ if (mCodec->mOutputFormat != mCodec->mLastOutputFormat && rangeLength > 0) {
+ // pretend that output format has changed on the first frame (we used to do this)
+ if (mCodec->mBaseOutputFormat == mCodec->mOutputFormat) {
+ mCodec->onOutputFormatChanged(mCodec->mOutputFormat);
+ }
+ mCodec->addKeyFormatChangesToRenderBufferNotification(reply);
+ mCodec->sendFormatChange();
}
+
if (mCodec->usingMetadataOnEncoderOutput()) {
native_handle_t *handle = NULL;
VideoGrallocMetadata &grallocMeta = *(VideoGrallocMetadata *)info->mData->data();
@@ -5344,8 +5840,17 @@
info->mData->meta()->setPointer("handle", handle);
info->mData->meta()->setInt32("rangeOffset", rangeOffset);
info->mData->meta()->setInt32("rangeLength", rangeLength);
- } else {
+ } else if (info->mData == info->mCodecData) {
info->mData->setRange(rangeOffset, rangeLength);
+ } else {
+ info->mCodecData->setRange(rangeOffset, rangeLength);
+ // in this case we know that mConverter is not null
+ status_t err = mCodec->mConverter[kPortIndexOutput]->convert(
+ info->mCodecData, info->mData);
+ if (err != OK) {
+ mCodec->signalError(OMX_ErrorUndefined, makeNoSideEffectStatus(err));
+ return true;
+ }
}
#if 0
if (mCodec->mNativeWindow == NULL) {
@@ -5422,6 +5927,13 @@
ALOGW_IF(err != NO_ERROR, "failed to set crop: %d", err);
}
+ int32_t dataSpace;
+ if (msg->findInt32("dataspace", &dataSpace)) {
+ status_t err = native_window_set_buffers_data_space(
+ mCodec->mNativeWindow.get(), (android_dataspace)dataSpace);
+ ALOGW_IF(err != NO_ERROR, "failed to set dataspace: %d", err);
+ }
+
int32_t render;
if (mCodec->mNativeWindow != NULL
&& msg->findInt32("render", &render) && render != 0
@@ -5545,6 +6057,7 @@
mDeathNotifier.clear();
}
+ mCodec->mUsingNativeWindow = false;
mCodec->mNativeWindow.clear();
mCodec->mNativeWindowUsageBits = 0;
mCodec->mNode = 0;
@@ -5553,6 +6066,8 @@
mCodec->mFlags = 0;
mCodec->mInputMetadataType = kMetadataBufferTypeInvalid;
mCodec->mOutputMetadataType = kMetadataBufferTypeInvalid;
+ mCodec->mConverter[0].clear();
+ mCodec->mConverter[1].clear();
mCodec->mComponentName.clear();
}
@@ -5977,6 +6492,17 @@
"using-sw-read-often", !!(usageBits & GRALLOC_USAGE_SW_READ_OFTEN));
}
+ sp<ABuffer> colorAspectsBuffer;
+ if (mCodec->mInputFormat->findBuffer("android._color-aspects", &colorAspectsBuffer)) {
+ err = mCodec->mOMX->setInternalOption(
+ mCodec->mNode, kPortIndexInput, IOMX::INTERNAL_OPTION_COLOR_ASPECTS,
+ colorAspectsBuffer->base(), colorAspectsBuffer->capacity());
+ if (err != OK) {
+ ALOGE("[%s] Unable to configure color aspects (err %d)",
+ mCodec->mComponentName.c_str(), err);
+ return err;
+ }
+ }
return OK;
}
@@ -5987,9 +6513,17 @@
sp<AMessage> notify = mCodec->mNotify->dup();
notify->setInt32("what", CodecBase::kWhatInputSurfaceCreated);
+ android_dataspace dataSpace;
+ status_t err =
+ mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
+ notify->setMessage("input-format", mCodec->mInputFormat);
+ notify->setMessage("output-format", mCodec->mOutputFormat);
+
sp<IGraphicBufferProducer> bufferProducer;
- status_t err = mCodec->mOMX->createInputSurface(
- mCodec->mNode, kPortIndexInput, &bufferProducer, &mCodec->mInputMetadataType);
+ if (err == OK) {
+ err = mCodec->mOMX->createInputSurface(
+ mCodec->mNode, kPortIndexInput, dataSpace, &bufferProducer, &mCodec->mInputMetadataType);
+ }
if (err == OK) {
err = setupInputSurface();
@@ -6020,11 +6554,20 @@
CHECK(msg->findObject("input-surface", &obj));
sp<PersistentSurface> surface = static_cast<PersistentSurface *>(obj.get());
- status_t err = mCodec->mOMX->setInputSurface(
- mCodec->mNode, kPortIndexInput, surface->getBufferConsumer(),
- &mCodec->mInputMetadataType);
+ android_dataspace dataSpace;
+ status_t err =
+ mCodec->setInitialColorAspectsForVideoEncoderSurfaceAndGetDataSpace(&dataSpace);
+ notify->setMessage("input-format", mCodec->mInputFormat);
+ notify->setMessage("output-format", mCodec->mOutputFormat);
if (err == OK) {
+ err = mCodec->mOMX->setInputSurface(
+ mCodec->mNode, kPortIndexInput, surface->getBufferConsumer(),
+ &mCodec->mInputMetadataType);
+ }
+
+ if (err == OK) {
+ surface->getBufferConsumer()->setDefaultBufferDataSpace(dataSpace);
err = setupInputSurface();
}
@@ -6566,6 +7109,8 @@
{
CHECK_EQ(data1, (OMX_U32)kPortIndexOutput);
+ mCodec->onOutputFormatChanged();
+
if (data2 == 0 || data2 == OMX_IndexParamPortDefinition) {
mCodec->mMetadataBuffersToSubmit = 0;
CHECK_EQ(mCodec->mOMX->sendCommand(
@@ -6576,15 +7121,8 @@
mCodec->freeOutputBuffersNotOwnedByComponent();
mCodec->changeState(mCodec->mOutputPortSettingsChangedState);
- } else if (data2 == OMX_IndexConfigCommonOutputCrop
- || data2 == OMX_IndexConfigAndroidIntraRefresh) {
- mCodec->mSentFormat = false;
-
- if (mCodec->mTunneled) {
- sp<AMessage> dummy = new AMessage(kWhatOutputBufferDrained, mCodec);
- mCodec->sendFormatChange(dummy);
- }
- } else {
+ } else if (data2 != OMX_IndexConfigCommonOutputCrop
+ && data2 != OMX_IndexConfigAndroidIntraRefresh) {
ALOGV("[%s] OMX_EventPortSettingsChanged 0x%08x",
mCodec->mComponentName.c_str(), data2);
}
@@ -6711,13 +7249,6 @@
return false;
}
- mCodec->mSentFormat = false;
-
- if (mCodec->mTunneled) {
- sp<AMessage> dummy = new AMessage(kWhatOutputBufferDrained, mCodec);
- mCodec->sendFormatChange(dummy);
- }
-
ALOGV("[%s] Output port now reenabled.", mCodec->mComponentName.c_str());
if (mCodec->mExecutingState->active()) {
@@ -6776,7 +7307,7 @@
ALOGV("[%s] Now Executing->Idle", mCodec->mComponentName.c_str());
mComponentNowIdle = false;
- mCodec->mSentFormat = false;
+ mCodec->mLastOutputFormat.clear();
}
bool ACodec::ExecutingToIdleState::onOMXEvent(
diff --git a/media/libstagefright/Android.mk b/media/libstagefright/Android.mk
index 68e02e7..2445842 100644
--- a/media/libstagefright/Android.mk
+++ b/media/libstagefright/Android.mk
@@ -14,6 +14,7 @@
CameraSource.cpp \
CameraSourceTimeLapse.cpp \
CodecBase.cpp \
+ DataConverter.cpp \
DataSource.cpp \
DataURISource.cpp \
DRMExtractor.cpp \
@@ -72,8 +73,10 @@
$(TOP)/external/tremolo \
$(TOP)/external/libvpx/libwebm \
$(TOP)/system/netd/include \
+ $(call include-path-for, audio-utils)
LOCAL_SHARED_LIBRARIES := \
+ libaudioutils \
libbinder \
libcamera_client \
libcutils \
diff --git a/media/libstagefright/AudioPlayer.cpp b/media/libstagefright/AudioPlayer.cpp
index cb42847..b3fb8d4 100644
--- a/media/libstagefright/AudioPlayer.cpp
+++ b/media/libstagefright/AudioPlayer.cpp
@@ -25,6 +25,7 @@
#include <media/AudioTrack.h>
#include <media/openmax/OMX_Audio.h>
#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALookup.h>
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/AudioPlayer.h>
#include <media/stagefright/MediaDefs.h>
@@ -71,6 +72,14 @@
mSource = source;
}
+ALookup<audio_format_t, int32_t> sAudioFormatToPcmEncoding {
+ {
+ { AUDIO_FORMAT_PCM_16_BIT, kAudioEncodingPcm16bit },
+ { AUDIO_FORMAT_PCM_8_BIT, kAudioEncodingPcm8bit },
+ { AUDIO_FORMAT_PCM_FLOAT, kAudioEncodingPcmFloat },
+ }
+};
+
status_t AudioPlayer::start(bool sourceAlreadyStarted) {
CHECK(!mStarted);
CHECK(mSource != NULL);
@@ -129,6 +138,10 @@
}
audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
+ int32_t pcmEncoding;
+ if (format->findInt32(kKeyPcmEncoding, &pcmEncoding)) {
+ sAudioFormatToPcmEncoding.map(pcmEncoding, &audioFormat);
+ }
if (useOffload()) {
if (mapMimeToAudioFormat(audioFormat, mime) != OK) {
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 6e4a1dd..f28ac58 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -184,6 +184,7 @@
meta->setInt32(kKeySampleRate, mSampleRate);
meta->setInt32(kKeyChannelCount, mRecord->channelCount());
meta->setInt32(kKeyMaxInputSize, kMaxBufferSize);
+ meta->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
return meta;
}
diff --git a/media/libstagefright/CameraSource.cpp b/media/libstagefright/CameraSource.cpp
index 64d4302..cb974ae 100644
--- a/media/libstagefright/CameraSource.cpp
+++ b/media/libstagefright/CameraSource.cpp
@@ -141,14 +141,14 @@
size.width = -1;
size.height = -1;
- sp<ICamera> camera;
+ sp<hardware::ICamera> camera;
return new CameraSource(camera, NULL, 0, clientName, Camera::USE_CALLING_UID,
Camera::USE_CALLING_PID, size, -1, NULL, false);
}
// static
CameraSource *CameraSource::CreateFromCamera(
- const sp<ICamera>& camera,
+ const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId,
const String16& clientName,
@@ -166,7 +166,7 @@
}
CameraSource::CameraSource(
- const sp<ICamera>& camera,
+ const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId,
const String16& clientName,
@@ -206,7 +206,7 @@
}
status_t CameraSource::isCameraAvailable(
- const sp<ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
+ const sp<hardware::ICamera>& camera, const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId, const String16& clientName, uid_t clientUid, pid_t clientPid) {
if (camera == 0) {
@@ -489,7 +489,7 @@
* @return OK if no error.
*/
status_t CameraSource::init(
- const sp<ICamera>& camera,
+ const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId,
const String16& clientName,
@@ -581,7 +581,7 @@
}
status_t CameraSource::initWithCameraAccess(
- const sp<ICamera>& camera,
+ const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId,
const String16& clientName,
@@ -630,18 +630,18 @@
}
// By default, store real data in video buffers.
- mVideoBufferMode = ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV;
+ mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV;
if (storeMetaDataInVideoBuffers) {
- if (OK == mCamera->setVideoBufferMode(ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE)) {
- mVideoBufferMode = ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE;
+ if (OK == mCamera->setVideoBufferMode(hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE)) {
+ mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE;
} else if (OK == mCamera->setVideoBufferMode(
- ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA)) {
- mVideoBufferMode = ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA;
+ hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA)) {
+ mVideoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA;
}
}
- if (mVideoBufferMode == ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV) {
- err = mCamera->setVideoBufferMode(ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV);
+ if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV) {
+ err = mCamera->setVideoBufferMode(hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV);
if (err != OK) {
ALOGE("%s: Setting video buffer mode to VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV failed: "
"%s (err=%d)", __FUNCTION__, strerror(-err), err);
@@ -686,7 +686,7 @@
int64_t token = IPCThreadState::self()->clearCallingIdentity();
status_t err;
- if (mVideoBufferMode == ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
+ if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
// Initialize buffer queue.
err = initBufferQueue(mVideoSize.width, mVideoSize.height, mEncoderFormat,
(android_dataspace_t)mEncoderDataSpace,
@@ -758,7 +758,7 @@
mStartTimeUs = 0;
mNumInputBuffers = 0;
mEncoderFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- mEncoderDataSpace = HAL_DATASPACE_BT709;
+ mEncoderDataSpace = HAL_DATASPACE_V0_BT709;
if (meta) {
int64_t startTimeUs;
@@ -774,10 +774,10 @@
// apply encoder color format if specified
if (meta->findInt32(kKeyPixelFormat, &mEncoderFormat)) {
- ALOGV("Using encoder format: %#x", mEncoderFormat);
+ ALOGI("Using encoder format: %#x", mEncoderFormat);
}
if (meta->findInt32(kKeyColorSpace, &mEncoderDataSpace)) {
- ALOGV("Using encoder data space: %#x", mEncoderDataSpace);
+ ALOGI("Using encoder data space: %#x", mEncoderDataSpace);
}
}
@@ -894,7 +894,7 @@
void CameraSource::releaseRecordingFrame(const sp<IMemory>& frame) {
ALOGV("releaseRecordingFrame");
- if (mVideoBufferMode == ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
+ if (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE) {
// Return the buffer to buffer queue in VIDEO_BUFFER_MODE_BUFFER_QUEUE mode.
ssize_t offset;
size_t size;
@@ -919,6 +919,7 @@
mReceivedBufferItemMap.removeItemsAt(index);
mVideoBufferConsumer->releaseBuffer(buffer);
mMemoryBases.push_back(frame);
+ mMemoryBaseAvailableCond.signal();
} else if (mCameraRecordingProxy != NULL) {
mCameraRecordingProxy->releaseRecordingFrame(frame);
} else if (mCamera != NULL) {
@@ -1113,7 +1114,7 @@
return true;
}
-void CameraSource::processBufferQueueFrame(const BufferItem& buffer) {
+void CameraSource::processBufferQueueFrame(BufferItem& buffer) {
Mutex::Autolock autoLock(mLock);
int64_t timestampUs = buffer.mTimestamp / 1000;
@@ -1122,10 +1123,13 @@
return;
}
- if (mMemoryBases.empty()) {
- ALOGW("%s: No available memory base. Dropping a recording frame.", __FUNCTION__);
- mVideoBufferConsumer->releaseBuffer(buffer);
- return;
+ while (mMemoryBases.empty()) {
+ if (mMemoryBaseAvailableCond.waitRelative(mLock, kMemoryBaseAvailableTimeoutNs) ==
+ TIMED_OUT) {
+ ALOGW("Waiting on an available memory base timed out. Dropping a recording frame.");
+ mVideoBufferConsumer->releaseBuffer(buffer);
+ return;
+ }
}
++mNumFramesReceived;
@@ -1161,8 +1165,8 @@
// Output buffers will contain metadata if camera sends us buffer in metadata mode or via
// buffer queue.
- return (mVideoBufferMode == ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA ||
- mVideoBufferMode == ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE);
+ return (mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_METADATA ||
+ mVideoBufferMode == hardware::ICamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE);
}
CameraSource::ProxyListener::ProxyListener(const sp<CameraSource>& source) {
diff --git a/media/libstagefright/CameraSourceTimeLapse.cpp b/media/libstagefright/CameraSourceTimeLapse.cpp
index 202ec42..d52567c 100644
--- a/media/libstagefright/CameraSourceTimeLapse.cpp
+++ b/media/libstagefright/CameraSourceTimeLapse.cpp
@@ -35,7 +35,7 @@
// static
CameraSourceTimeLapse *CameraSourceTimeLapse::CreateFromCamera(
- const sp<ICamera> &camera,
+ const sp<hardware::ICamera> &camera,
const sp<ICameraRecordingProxy> &proxy,
int32_t cameraId,
const String16& clientName,
@@ -64,7 +64,7 @@
}
CameraSourceTimeLapse::CameraSourceTimeLapse(
- const sp<ICamera>& camera,
+ const sp<hardware::ICamera>& camera,
const sp<ICameraRecordingProxy>& proxy,
int32_t cameraId,
const String16& clientName,
@@ -308,4 +308,12 @@
CameraSource::dataCallbackTimestamp(timestampUs, msgType, data);
}
+void CameraSourceTimeLapse::processBufferQueueFrame(BufferItem& buffer) {
+ ALOGV("processBufferQueueFrame");
+ int64_t timestampUs = buffer.mTimestamp / 1000;
+ mSkipCurrentFrame = skipFrameAndModifyTimeStamp(×tampUs);
+ buffer.mTimestamp = timestampUs * 1000;
+ CameraSource::processBufferQueueFrame(buffer);
+}
+
} // namespace android
diff --git a/media/libstagefright/DataConverter.cpp b/media/libstagefright/DataConverter.cpp
new file mode 100644
index 0000000..aea47f3
--- /dev/null
+++ b/media/libstagefright/DataConverter.cpp
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "DataConverter"
+
+#include "include/DataConverter.h"
+
+#include <audio_utils/primitives.h>
+
+#include <media/stagefright/foundation/ABuffer.h>
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/AUtils.h>
+
+namespace android {
+
+status_t DataConverter::convert(const sp<ABuffer> &source, sp<ABuffer> &target) {
+ CHECK(source->base() != target->base());
+ size_t size = targetSize(source->size());
+ status_t err = OK;
+ if (size > target->capacity()) {
+ ALOGE("data size (%zu) is greater than buffer capacity (%zu)",
+ size, // this is the data received/to be converted
+ target->capacity()); // this is out buffer size
+ err = FAILED_TRANSACTION;
+ } else {
+ err = safeConvert(source, target);
+ }
+ target->setRange(0, err == OK ? size : 0);
+ return err;
+}
+
+status_t DataConverter::safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target) {
+ memcpy(target->base(), source->data(), source->size());
+ return OK;
+}
+
+size_t DataConverter::sourceSize(size_t targetSize) {
+ return targetSize;
+}
+
+size_t DataConverter::targetSize(size_t sourceSize) {
+ return sourceSize;
+}
+
+DataConverter::~DataConverter() { }
+
+
+size_t SampleConverterBase::sourceSize(size_t targetSize) {
+ size_t numSamples = targetSize / mTargetSampleSize;
+ if (numSamples > SIZE_MAX / mSourceSampleSize) {
+ ALOGW("limiting source size due to overflow (%zu*%zu/%zu)",
+ targetSize, mSourceSampleSize, mTargetSampleSize);
+ return SIZE_MAX;
+ }
+ return numSamples * mSourceSampleSize;
+}
+
+size_t SampleConverterBase::targetSize(size_t sourceSize) {
+ // we round up on conversion
+ size_t numSamples = divUp(sourceSize, (size_t)mSourceSampleSize);
+ if (numSamples > SIZE_MAX / mTargetSampleSize) {
+ ALOGW("limiting target size due to overflow (%zu*%zu/%zu)",
+ sourceSize, mTargetSampleSize, mSourceSampleSize);
+ return SIZE_MAX;
+ }
+ return numSamples * mTargetSampleSize;
+}
+
+
+static size_t getAudioSampleSize(AudioEncoding e) {
+ switch (e) {
+ case kAudioEncodingPcm16bit: return 2;
+ case kAudioEncodingPcm8bit: return 1;
+ case kAudioEncodingPcmFloat: return 4;
+ default: return 0;
+ }
+}
+
+
+// static
+AudioConverter* AudioConverter::Create(AudioEncoding source, AudioEncoding target) {
+ uint32_t sourceSampleSize = getAudioSampleSize(source);
+ uint32_t targetSampleSize = getAudioSampleSize(target);
+ if (sourceSampleSize && targetSampleSize && sourceSampleSize != targetSampleSize) {
+ return new AudioConverter(source, sourceSampleSize, target, targetSampleSize);
+ }
+ return NULL;
+}
+
+status_t AudioConverter::safeConvert(const sp<ABuffer> &src, sp<ABuffer> &tgt) {
+ if (mTo == kAudioEncodingPcm8bit && mFrom == kAudioEncodingPcm16bit) {
+ memcpy_to_u8_from_i16((uint8_t*)tgt->base(), (const int16_t*)src->data(), src->size() / 2);
+ } else if (mTo == kAudioEncodingPcm8bit && mFrom == kAudioEncodingPcmFloat) {
+ memcpy_to_u8_from_float((uint8_t*)tgt->base(), (const float*)src->data(), src->size() / 4);
+ } else if (mTo == kAudioEncodingPcm16bit && mFrom == kAudioEncodingPcm8bit) {
+ memcpy_to_i16_from_u8((int16_t*)tgt->base(), (const uint8_t*)src->data(), src->size());
+ } else if (mTo == kAudioEncodingPcm16bit && mFrom == kAudioEncodingPcmFloat) {
+ memcpy_to_i16_from_float((int16_t*)tgt->base(), (const float*)src->data(), src->size() / 4);
+ } else if (mTo == kAudioEncodingPcmFloat && mFrom == kAudioEncodingPcm8bit) {
+ memcpy_to_float_from_u8((float*)tgt->base(), (const uint8_t*)src->data(), src->size());
+ } else if (mTo == kAudioEncodingPcmFloat && mFrom == kAudioEncodingPcm16bit) {
+ memcpy_to_float_from_i16((float*)tgt->base(), (const int16_t*)src->data(), src->size() / 2);
+ } else {
+ return INVALID_OPERATION;
+ }
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libstagefright/FLACExtractor.cpp b/media/libstagefright/FLACExtractor.cpp
index 6e99d02..13b66f3 100644
--- a/media/libstagefright/FLACExtractor.cpp
+++ b/media/libstagefright/FLACExtractor.cpp
@@ -615,6 +615,7 @@
mTrackMetadata->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
mTrackMetadata->setInt32(kKeyChannelCount, getChannels());
mTrackMetadata->setInt32(kKeySampleRate, getSampleRate());
+ mTrackMetadata->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
// sample rate is non-zero, so division by zero not possible
mTrackMetadata->setInt64(kKeyDuration,
(getTotalSamples() * 1000000LL) / getSampleRate());
diff --git a/media/libstagefright/MPEG4Extractor.cpp b/media/libstagefright/MPEG4Extractor.cpp
index d5a869d..f5549e4 100644
--- a/media/libstagefright/MPEG4Extractor.cpp
+++ b/media/libstagefright/MPEG4Extractor.cpp
@@ -34,6 +34,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/foundation/AUtils.h>
+#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaBufferGroup.h>
#include <media/stagefright/MediaDefs.h>
@@ -755,13 +756,22 @@
}
// Given a time in seconds since Jan 1 1904, produce a human-readable string.
-static void convertTimeToDate(int64_t time_1904, String8 *s) {
- time_t time_1970 = time_1904 - (((66 * 365 + 17) * 24) * 3600);
+static bool convertTimeToDate(int64_t time_1904, String8 *s) {
+ // delta between mpeg4 time and unix epoch time
+ static const int64_t delta = (((66 * 365 + 17) * 24) * 3600);
+ if (time_1904 < INT64_MIN + delta) {
+ return false;
+ }
+ time_t time_1970 = time_1904 - delta;
char tmp[32];
- strftime(tmp, sizeof(tmp), "%Y%m%dT%H%M%S.000Z", gmtime(&time_1970));
-
- s->setTo(tmp);
+ struct tm* tm = gmtime(&time_1970);
+ if (tm != NULL &&
+ strftime(tmp, sizeof(tmp), "%Y%m%dT%H%M%S.000Z", tm) > 0) {
+ s->setTo(tmp);
+ return true;
+ }
+ return false;
}
status_t MPEG4Extractor::parseChunk(off64_t *offset, int depth) {
@@ -1879,14 +1889,15 @@
}
duration = d32;
}
- if (duration != 0 && mHeaderTimescale != 0) {
+ if (duration != 0 && mHeaderTimescale != 0 && duration < UINT64_MAX / 1000000) {
mFileMetaData->setInt64(kKeyDuration, duration * 1000000 / mHeaderTimescale);
}
String8 s;
- convertTimeToDate(creationTime, &s);
+ if (convertTimeToDate(creationTime, &s)) {
+ mFileMetaData->setCString(kKeyDate, s.string());
+ }
- mFileMetaData->setCString(kKeyDate, s.string());
break;
}
@@ -2083,6 +2094,21 @@
break;
}
+ case FOURCC('c', 'o', 'l', 'r'):
+ {
+ *offset += chunk_size;
+ // this must be in a VisualSampleEntry box under the Sample Description Box ('stsd')
+ // ignore otherwise
+ if (depth >= 2 && mPath[depth - 2] == FOURCC('s', 't', 's', 'd')) {
+ status_t err = parseColorInfo(data_offset, chunk_data_size);
+ if (err != OK) {
+ return err;
+ }
+ }
+
+ break;
+ }
+
case FOURCC('t', 'i', 't', 'l'):
case FOURCC('p', 'e', 'r', 'f'):
case FOURCC('a', 'u', 't', 'h'):
@@ -2663,6 +2689,49 @@
return OK;
}
+status_t MPEG4Extractor::parseColorInfo(off64_t offset, size_t size) {
+ if (size < 4 || size == SIZE_MAX || mLastTrack == NULL) {
+ return ERROR_MALFORMED;
+ }
+
+ uint8_t *buffer = new (std::nothrow) uint8_t[size + 1];
+ if (buffer == NULL) {
+ return ERROR_MALFORMED;
+ }
+ if (mDataSource->readAt(offset, buffer, size) != (ssize_t)size) {
+ delete[] buffer;
+ buffer = NULL;
+
+ return ERROR_IO;
+ }
+
+ int32_t type = U32_AT(&buffer[0]);
+ if ((type == FOURCC('n', 'c', 'l', 'x') && size >= 11)
+ || (type == FOURCC('n', 'c', 'l', 'c' && size >= 10))) {
+ int32_t primaries = U16_AT(&buffer[4]);
+ int32_t transfer = U16_AT(&buffer[6]);
+ int32_t coeffs = U16_AT(&buffer[8]);
+ bool fullRange = (type == FOURCC('n', 'c', 'l', 'x')) && (buffer[10] & 128);
+
+ ColorAspects aspects;
+ ColorUtils::convertIsoColorAspectsToCodecAspects(
+ primaries, transfer, coeffs, fullRange, aspects);
+
+ // only store the first color specification
+ if (!mLastTrack->meta->hasData(kKeyColorPrimaries)) {
+ mLastTrack->meta->setInt32(kKeyColorPrimaries, aspects.mPrimaries);
+ mLastTrack->meta->setInt32(kKeyTransferFunction, aspects.mTransfer);
+ mLastTrack->meta->setInt32(kKeyColorMatrix, aspects.mMatrixCoeffs);
+ mLastTrack->meta->setInt32(kKeyColorRange, aspects.mRange);
+ }
+ }
+
+ delete[] buffer;
+ buffer = NULL;
+
+ return OK;
+}
+
status_t MPEG4Extractor::parse3GPPMetaData(off64_t offset, size_t size, int depth) {
if (size < 4 || size == SIZE_MAX) {
return ERROR_MALFORMED;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index d520cb3..58bfa67 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -30,6 +30,7 @@
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/MPEG4Writer.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MetaData.h>
@@ -371,6 +372,7 @@
void writeVmhdBox();
void writeHdlrBox();
void writeTkhdBox(uint32_t now);
+ void writeColrBox();
void writeMp4aEsdsBox();
void writeMp4vEsdsBox();
void writeAudioFourCCBox();
@@ -2300,6 +2302,7 @@
const bool hasMultipleTracks = (mOwner->numTracks() > 1);
int64_t chunkTimestampUs = 0;
int32_t nChunks = 0;
+ int32_t nActualFrames = 0; // frames containing non-CSD data (non-0 length)
int32_t nZeroLengthFrames = 0;
int64_t lastTimestampUs = 0; // Previous sample time stamp
int64_t lastDurationUs = 0; // Between the previous two samples
@@ -2352,23 +2355,31 @@
int32_t isCodecConfig;
if (buffer->meta_data()->findInt32(kKeyIsCodecConfig, &isCodecConfig)
&& isCodecConfig) {
- CHECK(!mGotAllCodecSpecificData);
+ // if config format (at track addition) already had CSD, keep that
+ // UNLESS we have not received any frames yet.
+ // TODO: for now the entire CSD has to come in one frame for encoders, even though
+ // they need to be spread out for decoders.
+ if (mGotAllCodecSpecificData && nActualFrames > 0) {
+ ALOGI("ignoring additional CSD for video track after first frame");
+ } else {
+ mMeta = mSource->getFormat(); // get output format after format change
- if (mIsAvc) {
- status_t err = makeAVCCodecSpecificData(
- (const uint8_t *)buffer->data()
- + buffer->range_offset(),
- buffer->range_length());
- CHECK_EQ((status_t)OK, err);
- } else if (mIsHevc) {
- status_t err = makeHEVCCodecSpecificData(
- (const uint8_t *)buffer->data()
- + buffer->range_offset(),
- buffer->range_length());
- CHECK_EQ((status_t)OK, err);
- } else if (mIsMPEG4) {
- copyCodecSpecificData((const uint8_t *)buffer->data() + buffer->range_offset(),
- buffer->range_length());
+ if (mIsAvc) {
+ status_t err = makeAVCCodecSpecificData(
+ (const uint8_t *)buffer->data()
+ + buffer->range_offset(),
+ buffer->range_length());
+ CHECK_EQ((status_t)OK, err);
+ } else if (mIsHevc) {
+ status_t err = makeHEVCCodecSpecificData(
+ (const uint8_t *)buffer->data()
+ + buffer->range_offset(),
+ buffer->range_length());
+ CHECK_EQ((status_t)OK, err);
+ } else if (mIsMPEG4) {
+ copyCodecSpecificData((const uint8_t *)buffer->data() + buffer->range_offset(),
+ buffer->range_length());
+ }
}
buffer->release();
@@ -2378,6 +2389,8 @@
continue;
}
+ ++nActualFrames;
+
// Make a deep copy of the MediaBuffer and Metadata and release
// the original as soon as we can
MediaBuffer *copy = new MediaBuffer(buffer->range_length());
@@ -2404,10 +2417,14 @@
updateTrackSizeEstimate();
if (mOwner->exceedsFileSizeLimit()) {
+ ALOGW("Recorded file size exceeds limit %" PRId64 "bytes",
+ mOwner->mMaxFileSizeLimitBytes);
mOwner->notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_FILESIZE_REACHED, 0);
break;
}
if (mOwner->exceedsFileDurationLimit()) {
+ ALOGW("Recorded file duration exceeds limit %" PRId64 "microseconds",
+ mOwner->mMaxFileDurationLimitUs);
mOwner->notify(MEDIA_RECORDER_EVENT_INFO, MEDIA_RECORDER_INFO_MAX_DURATION_REACHED, 0);
break;
}
@@ -2956,9 +2973,32 @@
}
writePaspBox();
+ writeColrBox();
mOwner->endBox(); // mp4v, s263 or avc1
}
+void MPEG4Writer::Track::writeColrBox() {
+ ColorAspects aspects;
+ memset(&aspects, 0, sizeof(aspects));
+ // TRICKY: using | instead of || because we want to execute all findInt32-s
+ if (mMeta->findInt32(kKeyColorPrimaries, (int32_t*)&aspects.mPrimaries)
+ | mMeta->findInt32(kKeyTransferFunction, (int32_t*)&aspects.mTransfer)
+ | mMeta->findInt32(kKeyColorMatrix, (int32_t*)&aspects.mMatrixCoeffs)
+ | mMeta->findInt32(kKeyColorRange, (int32_t*)&aspects.mRange)) {
+ int32_t primaries, transfer, coeffs;
+ bool fullRange;
+ ColorUtils::convertCodecColorAspectsToIsoAspects(
+ aspects, &primaries, &transfer, &coeffs, &fullRange);
+ mOwner->beginBox("colr");
+ mOwner->writeFourcc("nclx");
+ mOwner->writeInt16(primaries);
+ mOwner->writeInt16(transfer);
+ mOwner->writeInt16(coeffs);
+ mOwner->writeInt8(fullRange ? 128 : 0);
+ mOwner->endBox(); // colr
+ }
+}
+
void MPEG4Writer::Track::writeAudioFourCCBox() {
const char *mime;
bool success = mMeta->findCString(kKeyMIMEType, &mime);
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index fb1f401..e8cd58a 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -399,9 +399,11 @@
status_t err;
Vector<MediaResource> resources;
- const char *type = secureCodec ? kResourceSecureCodec : kResourceNonSecureCodec;
- const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
- resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+ MediaResource::Type type =
+ secureCodec ? MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
+ MediaResource::SubType subtype =
+ mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
+ resources.push_back(MediaResource(type, subtype, 1));
for (int i = 0; i <= kMaxRetry; ++i) {
if (i > 0) {
// Don't try to reclaim resource for the first time.
@@ -468,13 +470,14 @@
status_t err;
Vector<MediaResource> resources;
- const char *type = (mFlags & kFlagIsSecure) ?
- kResourceSecureCodec : kResourceNonSecureCodec;
- const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
- resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+ MediaResource::Type type = (mFlags & kFlagIsSecure) ?
+ MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
+ MediaResource::SubType subtype =
+ mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
+ resources.push_back(MediaResource(type, subtype, 1));
// Don't know the buffer size at this point, but it's fine to use 1 because
// the reclaimResource call doesn't consider the requester's buffer size for now.
- resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
+ resources.push_back(MediaResource(MediaResource::kGraphicMemory, 1));
for (int i = 0; i <= kMaxRetry; ++i) {
if (i > 0) {
// Don't try to reclaim resource for the first time.
@@ -553,7 +556,8 @@
return size;
}
-void MediaCodec::addResource(const String8 &type, const String8 &subtype, uint64_t value) {
+void MediaCodec::addResource(
+ MediaResource::Type type, MediaResource::SubType subtype, uint64_t value) {
Vector<MediaResource> resources;
resources.push_back(MediaResource(type, subtype, value));
mResourceManagerService->addResource(
@@ -565,13 +569,14 @@
status_t err;
Vector<MediaResource> resources;
- const char *type = (mFlags & kFlagIsSecure) ?
- kResourceSecureCodec : kResourceNonSecureCodec;
- const char *subtype = mIsVideo ? kResourceVideoCodec : kResourceAudioCodec;
- resources.push_back(MediaResource(String8(type), String8(subtype), 1));
+ MediaResource::Type type = (mFlags & kFlagIsSecure) ?
+ MediaResource::kSecureCodec : MediaResource::kNonSecureCodec;
+ MediaResource::SubType subtype =
+ mIsVideo ? MediaResource::kVideoCodec : MediaResource::kAudioCodec;
+ resources.push_back(MediaResource(type, subtype, 1));
// Don't know the buffer size at this point, but it's fine to use 1 because
// the reclaimResource call doesn't consider the requester's buffer size for now.
- resources.push_back(MediaResource(String8(kResourceGraphicMemory), 1));
+ resources.push_back(MediaResource(MediaResource::kGraphicMemory, 1));
for (int i = 0; i <= kMaxRetry; ++i) {
if (i > 0) {
// Don't try to reclaim resource for the first time.
@@ -1228,18 +1233,18 @@
mFlags &= ~kFlagUsesSoftwareRenderer;
}
- String8 resourceType;
+ MediaResource::Type resourceType;
if (mComponentName.endsWith(".secure")) {
mFlags |= kFlagIsSecure;
- resourceType = String8(kResourceSecureCodec);
+ resourceType = MediaResource::kSecureCodec;
} else {
mFlags &= ~kFlagIsSecure;
- resourceType = String8(kResourceNonSecureCodec);
+ resourceType = MediaResource::kNonSecureCodec;
}
if (mIsVideo) {
// audio codec is currently ignored.
- addResource(resourceType, String8(kResourceVideoCodec), 1);
+ addResource(resourceType, MediaResource::kVideoCodec, 1);
}
(new AMessage)->postReply(mReplyID);
@@ -1261,7 +1266,10 @@
CHECK(msg->findMessage("input-format", &mInputFormat));
CHECK(msg->findMessage("output-format", &mOutputFormat));
-
+ ALOGV("[%s] configured as input format: %s, output format: %s",
+ mComponentName.c_str(),
+ mInputFormat->debugString(4).c_str(),
+ mOutputFormat->debugString(4).c_str());
int32_t usingSwRenderer;
if (mOutputFormat->findInt32("using-sw-renderer", &usingSwRenderer)
&& usingSwRenderer) {
@@ -1280,6 +1288,12 @@
if (!msg->findInt32("err", &err)) {
sp<RefBase> obj;
msg->findObject("input-surface", &obj);
+ CHECK(msg->findMessage("input-format", &mInputFormat));
+ CHECK(msg->findMessage("output-format", &mOutputFormat));
+ ALOGV("[%s] input surface created as input format: %s, output format: %s",
+ mComponentName.c_str(),
+ mInputFormat->debugString(4).c_str(),
+ mOutputFormat->debugString(4).c_str());
CHECK(obj != NULL);
response->setObject("input-surface", obj);
mHaveInputSurface = true;
@@ -1376,10 +1390,9 @@
// allocating input buffers, so this is a good
// indication that now all buffers are allocated.
if (mIsVideo) {
- String8 subtype;
addResource(
- String8(kResourceGraphicMemory),
- subtype,
+ MediaResource::kGraphicMemory,
+ MediaResource::kUnspecifiedSubType,
getGraphicBufferSize());
}
setState(STARTED);
@@ -1394,21 +1407,34 @@
case CodecBase::kWhatOutputFormatChanged:
{
- ALOGV("codec output format changed");
+ CHECK(msg->findMessage("format", &mOutputFormat));
+
+ ALOGV("[%s] output format changed to: %s",
+ mComponentName.c_str(), mOutputFormat->debugString(4).c_str());
if (mSoftRenderer == NULL &&
mSurface != NULL &&
(mFlags & kFlagUsesSoftwareRenderer)) {
AString mime;
- CHECK(msg->findString("mime", &mime));
+ CHECK(mOutputFormat->findString("mime", &mime));
+
+ // TODO: propagate color aspects to software renderer to allow better
+ // color conversion to RGB. For now, just mark dataspace for YUV
+ // rendering.
+ int32_t dataSpace;
+ if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
+ ALOGD("[%s] setting dataspace on output surface to #%x",
+ mComponentName.c_str(), dataSpace);
+ int err = native_window_set_buffers_data_space(
+ mSurface.get(), (android_dataspace)dataSpace);
+ ALOGW_IF(err != 0, "failed to set dataspace on surface (%d)", err);
+ }
if (mime.startsWithIgnoreCase("video/")) {
mSoftRenderer = new SoftwareRenderer(mSurface, mRotationDegrees);
}
}
- mOutputFormat = msg;
-
if (mFlags & kFlagIsEncoder) {
// Before we announce the format change we should
// collect codec specific data and amend the output
@@ -2498,7 +2524,7 @@
ICrypto::DestinationType dst_type = ICrypto::kDestinationTypeOpaqueHandle;
if (info->mNativeHandle != NULL) {
- dst_pointer = (void *)info->mNativeHandle.get();
+ dst_pointer = (void *)info->mNativeHandle->handle();
dst_type = ICrypto::kDestinationTypeNativeHandle;
} else if ((mFlags & kFlagIsSecure) == 0) {
dst_type = ICrypto::kDestinationTypeVmPointer;
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index bdfddc7..1acfca0 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -40,8 +40,9 @@
namespace android {
-const int kDefaultSwVideoEncoderFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
-const int kDefaultSwVideoEncoderDataSpace = HAL_DATASPACE_BT709;
+const int32_t kDefaultSwVideoEncoderFormat = HAL_PIXEL_FORMAT_YCbCr_420_888;
+const int32_t kDefaultHwVideoEncoderFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
+const int32_t kDefaultVideoEncoderDataSpace = HAL_DATASPACE_V0_BT709;
const int kStopTimeoutUs = 300000; // allow 1 sec for shutting down encoder
@@ -286,6 +287,7 @@
}
if (queue->mPulling && err == OK) {
msg->post(); // if simply paused, keep pulling source
+ break;
} else if (err == ERROR_END_OF_STREAM) {
ALOGV("stream ended, mbuf %p", mbuf);
} else if (err != OK) {
@@ -356,6 +358,11 @@
return OK;
}
+sp<MetaData> MediaCodecSource::getFormat() {
+ Mutexed<sp<MetaData>>::Locked meta(mMeta);
+ return *meta;
+}
+
sp<IGraphicBufferProducer> MediaCodecSource::getGraphicBufferProducer() {
CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
return mGraphicBufferProducer;
@@ -491,7 +498,9 @@
}
mEncoder->getOutputFormat(&mOutputFormat);
- convertMessageToMetaData(mOutputFormat, mMeta);
+ sp<MetaData> meta = new MetaData;
+ convertMessageToMetaData(mOutputFormat, meta);
+ mMeta.lock().set(meta);
if (mFlags & FLAG_USE_SURFACE_INPUT) {
CHECK(mIsVideo);
@@ -514,13 +523,19 @@
sp<AMessage> inputFormat;
int32_t usingSwReadOften;
mSetEncoderFormat = false;
- if (mEncoder->getInputFormat(&inputFormat) == OK
- && inputFormat->findInt32("using-sw-read-often", &usingSwReadOften)
- && usingSwReadOften) {
- // this is a SW encoder; signal source to allocate SW readable buffers
+ if (mEncoder->getInputFormat(&inputFormat) == OK) {
mSetEncoderFormat = true;
- mEncoderFormat = kDefaultSwVideoEncoderFormat;
- mEncoderDataSpace = kDefaultSwVideoEncoderDataSpace;
+ if (inputFormat->findInt32("using-sw-read-often", &usingSwReadOften)
+ && usingSwReadOften) {
+ // this is a SW encoder; signal source to allocate SW readable buffers
+ mEncoderFormat = kDefaultSwVideoEncoderFormat;
+ } else {
+ mEncoderFormat = kDefaultHwVideoEncoderFormat;
+ }
+ if (!inputFormat->findInt32("android._dataspace", &mEncoderDataSpace)) {
+ mEncoderDataSpace = kDefaultVideoEncoderDataSpace;
+ }
+ ALOGV("setting dataspace %#x, format %#x", mEncoderDataSpace, mEncoderFormat);
}
err = mEncoder->start();
@@ -773,6 +788,15 @@
mAvailEncoderInputIndices.push_back(index);
feedEncoderInputBuffers();
+ } else if (cbID == MediaCodec::CB_OUTPUT_FORMAT_CHANGED) {
+ status_t err = mEncoder->getOutputFormat(&mOutputFormat);
+ if (err != OK) {
+ signalEOS(err);
+ break;
+ }
+ sp<MetaData> meta = new MetaData;
+ convertMessageToMetaData(mOutputFormat, meta);
+ mMeta.lock().set(meta);
} else if (cbID == MediaCodec::CB_OUTPUT_AVAILABLE) {
int32_t index;
size_t offset;
diff --git a/media/libstagefright/MidiExtractor.cpp b/media/libstagefright/MidiExtractor.cpp
index 7525f57..7930bbb 100644
--- a/media/libstagefright/MidiExtractor.cpp
+++ b/media/libstagefright/MidiExtractor.cpp
@@ -178,6 +178,7 @@
mEasConfig = EAS_Config();
trackMetadata->setInt32(kKeySampleRate, mEasConfig->sampleRate);
trackMetadata->setInt32(kKeyChannelCount, mEasConfig->numChannels);
+ trackMetadata->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
}
mIsInitialized = true;
}
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index dd7f6b9..6d1a460 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -121,9 +121,10 @@
return ERROR_UNSUPPORTED;
}
- mDataSource = dataSource;
-
- updateDurationAndBitrate();
+ status_t err = updateDurationAndBitrate();
+ if (err == OK) {
+ mDataSource = dataSource;
+ }
return OK;
}
@@ -152,9 +153,10 @@
return ERROR_UNSUPPORTED;
}
- mDataSource = fileSource;
-
- updateDurationAndBitrate();
+ err = updateDurationAndBitrate();
+ if (err == OK) {
+ mDataSource = fileSource;
+ }
return OK;
}
@@ -177,14 +179,19 @@
return ERROR_UNSUPPORTED;
}
- mDataSource = source;
+ err = updateDurationAndBitrate();
+ if (err == OK) {
+ mDataSource = source;
+ }
- updateDurationAndBitrate();
-
- return OK;
+ return err;
}
-void NuMediaExtractor::updateDurationAndBitrate() {
+status_t NuMediaExtractor::updateDurationAndBitrate() {
+ if (mImpl->countTracks() > kMaxTrackCount) {
+ return ERROR_UNSUPPORTED;
+ }
+
mTotalBitrate = 0ll;
mDurationUs = -1ll;
@@ -212,6 +219,7 @@
mDurationUs = durationUs;
}
}
+ return OK;
}
size_t NuMediaExtractor::countTracks() const {
@@ -235,6 +243,12 @@
}
sp<MetaData> meta = mImpl->getTrackMetaData(index);
+ // Extractors either support trackID-s or not, so either all tracks have trackIDs or none.
+ // Generate trackID if missing.
+ int32_t trackID;
+ if (meta != NULL && !meta->findInt32(kKeyTrackID, &trackID)) {
+ meta->setInt32(kKeyTrackID, (int32_t)index + 1);
+ }
return convertMetaDataToMessage(meta, format);
}
diff --git a/media/libstagefright/OMXClient.cpp b/media/libstagefright/OMXClient.cpp
index 8e72405..a523d0e 100644
--- a/media/libstagefright/OMXClient.cpp
+++ b/media/libstagefright/OMXClient.cpp
@@ -112,7 +112,7 @@
const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type);
virtual status_t createPersistentInputSurface(
@@ -388,10 +388,10 @@
}
status_t MuxOMX::createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
status_t err = getOMX(node)->createInputSurface(
- node, port_index, bufferProducer, type);
+ node, port_index, dataSpace, bufferProducer, type);
return err;
}
diff --git a/media/libstagefright/SampleTable.cpp b/media/libstagefright/SampleTable.cpp
index f5d9ec7..1bdd812 100644
--- a/media/libstagefright/SampleTable.cpp
+++ b/media/libstagefright/SampleTable.cpp
@@ -298,6 +298,7 @@
mDefaultSampleSize = U32_AT(&header[4]);
mNumSampleSizes = U32_AT(&header[8]);
if (mNumSampleSizes > (UINT32_MAX - 12) / 16) {
+ ALOGE("b/23247055, mNumSampleSizes(%u)", mNumSampleSizes);
return ERROR_MALFORMED;
}
@@ -532,6 +533,9 @@
Mutex::Autolock autoLock(mLock);
if (mSampleTimeEntries != NULL || mNumSampleSizes == 0) {
+ if (mNumSampleSizes == 0) {
+ ALOGE("b/23247055, mNumSampleSizes(%u)", mNumSampleSizes);
+ }
return;
}
diff --git a/media/libstagefright/StagefrightMetadataRetriever.cpp b/media/libstagefright/StagefrightMetadataRetriever.cpp
index 45fb785..7027780 100644
--- a/media/libstagefright/StagefrightMetadataRetriever.cpp
+++ b/media/libstagefright/StagefrightMetadataRetriever.cpp
@@ -215,6 +215,7 @@
if (err != OK) {
ALOGW("failed to get input buffers: %d (%s)", err, asString(err));
decoder->release();
+ source->stop();
return NULL;
}
@@ -223,6 +224,7 @@
if (err != OK) {
ALOGW("failed to get output buffers: %d (%s)", err, asString(err));
decoder->release();
+ source->stop();
return NULL;
}
@@ -328,7 +330,6 @@
if (err != OK || size <= 0 || outputFormat == NULL) {
ALOGE("Failed to decode thumbnail frame");
source->stop();
- decoder->stop();
decoder->release();
return NULL;
}
@@ -401,7 +402,6 @@
videoFrameBuffer.clear();
source->stop();
decoder->releaseOutputBuffer(index);
- decoder->stop();
decoder->release();
if (err != OK) {
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index dcc29fe..7daae20 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -27,6 +27,7 @@
#include <arpa/inet.h>
#include <cutils/properties.h>
#include <media/openmax/OMX_Audio.h>
+#include <media/stagefright/CodecBase.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
@@ -90,6 +91,49 @@
return OK;
}
+static void convertMetaDataToMessageInt32(
+ const sp<MetaData> &meta, sp<AMessage> &msg, uint32_t key, const char *name) {
+ int32_t value;
+ if (meta->findInt32(key, &value)) {
+ msg->setInt32(name, value);
+ }
+}
+
+static void convertMetaDataToMessageColorAspects(const sp<MetaData> &meta, sp<AMessage> &msg) {
+ // 0 values are unspecified
+ int32_t range = 0;
+ int32_t primaries = 0;
+ int32_t transferFunction = 0;
+ int32_t colorMatrix = 0;
+ meta->findInt32(kKeyColorRange, &range);
+ meta->findInt32(kKeyColorPrimaries, &primaries);
+ meta->findInt32(kKeyTransferFunction, &transferFunction);
+ meta->findInt32(kKeyColorMatrix, &colorMatrix);
+ ColorAspects colorAspects;
+ memset(&colorAspects, 0, sizeof(colorAspects));
+ colorAspects.mRange = (ColorAspects::Range)range;
+ colorAspects.mPrimaries = (ColorAspects::Primaries)primaries;
+ colorAspects.mTransfer = (ColorAspects::Transfer)transferFunction;
+ colorAspects.mMatrixCoeffs = (ColorAspects::MatrixCoeffs)colorMatrix;
+
+ int32_t rangeMsg, standardMsg, transferMsg;
+ if (CodecBase::convertCodecColorAspectsToPlatformAspects(
+ colorAspects, &rangeMsg, &standardMsg, &transferMsg) != OK) {
+ return;
+ }
+
+ // save specified values to msg
+ if (rangeMsg != 0) {
+ msg->setInt32("color-range", rangeMsg);
+ }
+ if (standardMsg != 0) {
+ msg->setInt32("color-standard", standardMsg);
+ }
+ if (transferMsg != 0) {
+ msg->setInt32("color-transfer", transferMsg);
+ }
+}
+
status_t convertMetaDataToMessage(
const sp<MetaData> &meta, sp<AMessage> *format) {
@@ -123,6 +167,12 @@
msg->setInt32("is-sync-frame", 1);
}
+ // this only needs to be translated from meta to message as it is an extractor key
+ int32_t trackID;
+ if (meta->findInt32(kKeyTrackID, &trackID)) {
+ msg->setInt32("track-id", trackID);
+ }
+
if (!strncasecmp("video/", mime, 6)) {
int32_t width, height;
if (!meta->findInt32(kKeyWidth, &width)
@@ -158,6 +208,10 @@
if (meta->findInt32(kKeyRotation, &rotationDegrees)) {
msg->setInt32("rotation-degrees", rotationDegrees);
}
+
+ convertMetaDataToMessageInt32(meta, msg, kKeyMinLuminance, "min-luminance");
+ convertMetaDataToMessageInt32(meta, msg, kKeyMaxLuminance, "max-luminance");
+ convertMetaDataToMessageColorAspects(meta, msg);
} else if (!strncasecmp("audio/", mime, 6)) {
int32_t numChannels, sampleRate;
if (!meta->findInt32(kKeyChannelCount, &numChannels)
@@ -184,13 +238,18 @@
int32_t isADTS;
if (meta->findInt32(kKeyIsADTS, &isADTS)) {
- msg->setInt32("is-adts", true);
+ msg->setInt32("is-adts", isADTS);
}
int32_t aacProfile = -1;
if (meta->findInt32(kKeyAACAOT, &aacProfile)) {
msg->setInt32("aac-profile", aacProfile);
}
+
+ int32_t pcmEncoding;
+ if (meta->findInt32(kKeyPcmEncoding, &pcmEncoding)) {
+ msg->setInt32("pcm-encoding", pcmEncoding);
+ }
}
int32_t maxInputSize;
@@ -626,6 +685,43 @@
return size;
}
+static void convertMessageToMetaDataInt32(
+ const sp<AMessage> &msg, sp<MetaData> &meta, uint32_t key, const char *name) {
+ int32_t value;
+ if (msg->findInt32(name, &value)) {
+ meta->setInt32(key, value);
+ }
+}
+
+static void convertMessageToMetaDataColorAspects(const sp<AMessage> &msg, sp<MetaData> &meta) {
+ // 0 values are unspecified
+ int32_t range = 0, standard = 0, transfer = 0;
+ (void)msg->findInt32("color-range", &range);
+ (void)msg->findInt32("color-standard", &standard);
+ (void)msg->findInt32("color-transfer", &transfer);
+
+ ColorAspects colorAspects;
+ memset(&colorAspects, 0, sizeof(colorAspects));
+ if (CodecBase::convertPlatformColorAspectsToCodecAspects(
+ range, standard, transfer, colorAspects) != OK) {
+ return;
+ }
+
+ // save specified values to meta
+ if (colorAspects.mRange != 0) {
+ meta->setInt32(kKeyColorRange, colorAspects.mRange);
+ }
+ if (colorAspects.mPrimaries != 0) {
+ meta->setInt32(kKeyColorPrimaries, colorAspects.mPrimaries);
+ }
+ if (colorAspects.mTransfer != 0) {
+ meta->setInt32(kKeyTransferFunction, colorAspects.mTransfer);
+ }
+ if (colorAspects.mMatrixCoeffs != 0) {
+ meta->setInt32(kKeyColorMatrix, colorAspects.mMatrixCoeffs);
+ }
+}
+
void convertMessageToMetaData(const sp<AMessage> &msg, sp<MetaData> &meta) {
AString mime;
if (msg->findString("mime", &mime)) {
@@ -679,6 +775,10 @@
if (msg->findInt32("rotation-degrees", &rotationDegrees)) {
meta->setInt32(kKeyRotation, rotationDegrees);
}
+
+ convertMessageToMetaDataInt32(msg, meta, kKeyMinLuminance, "min-luminance");
+ convertMessageToMetaDataInt32(msg, meta, kKeyMaxLuminance, "max-luminance");
+ convertMessageToMetaDataColorAspects(msg, meta);
} else if (mime.startsWith("audio/")) {
int32_t numChannels;
if (msg->findInt32("channel-count", &numChannels)) {
@@ -705,6 +805,11 @@
if (msg->findInt32("is-adts", &isADTS)) {
meta->setInt32(kKeyIsADTS, isADTS);
}
+
+ int32_t pcmEncoding;
+ if (msg->findInt32("pcm-encoding", &pcmEncoding)) {
+ meta->setInt32(kKeyPcmEncoding, pcmEncoding);
+ }
}
int32_t maxInputSize;
diff --git a/media/libstagefright/VideoFrameScheduler.cpp b/media/libstagefright/VideoFrameScheduler.cpp
index 5564926..03226c7 100644
--- a/media/libstagefright/VideoFrameScheduler.cpp
+++ b/media/libstagefright/VideoFrameScheduler.cpp
@@ -156,12 +156,12 @@
lastTime = time;
}
- int64_t div = numSamplesToUse * sumXX - sumX * sumX;
+ int64_t div = (int64_t)numSamplesToUse * sumXX - sumX * sumX;
if (div == 0) {
return false;
}
- int64_t a_nom = numSamplesToUse * sumXY - sumX * sumY;
+ int64_t a_nom = (int64_t)numSamplesToUse * sumXY - sumX * sumY;
int64_t b_nom = sumXX * sumY - sumX * sumXY;
*a = divRound(a_nom, div);
*b = divRound(b_nom, div);
@@ -437,10 +437,10 @@
(renderTime + mTimeCorrection + videoPeriod * i - mVsyncTime) % mVsyncPeriod;
edgeRemainder += (videoPeriod * i) % mVsyncPeriod;
}
- mTimeCorrection += mVsyncPeriod / 2 - offset / N;
+ mTimeCorrection += mVsyncPeriod / 2 - offset / (nsecs_t)N;
renderTime += mTimeCorrection;
nsecs_t correctionLimit = mVsyncPeriod * 3 / 5;
- edgeRemainder = abs(edgeRemainder / N - mVsyncPeriod / 2);
+ edgeRemainder = abs(edgeRemainder / (nsecs_t)N - mVsyncPeriod / 2);
if (edgeRemainder <= mVsyncPeriod / 3) {
correctionLimit /= 2;
}
diff --git a/media/libstagefright/WAVExtractor.cpp b/media/libstagefright/WAVExtractor.cpp
index 680c0c6..38a2a06 100644
--- a/media/libstagefright/WAVExtractor.cpp
+++ b/media/libstagefright/WAVExtractor.cpp
@@ -20,6 +20,7 @@
#include "include/WAVExtractor.h"
+#include <audio_utils/primitives.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaBufferGroup.h>
@@ -36,6 +37,7 @@
enum {
WAVE_FORMAT_PCM = 0x0001,
+ WAVE_FORMAT_IEEE_FLOAT = 0x0003,
WAVE_FORMAT_ALAW = 0x0006,
WAVE_FORMAT_MULAW = 0x0007,
WAVE_FORMAT_MSGSM = 0x0031,
@@ -177,6 +179,7 @@
mWaveFormat = U16_LE_AT(formatSpec);
if (mWaveFormat != WAVE_FORMAT_PCM
+ && mWaveFormat != WAVE_FORMAT_IEEE_FLOAT
&& mWaveFormat != WAVE_FORMAT_ALAW
&& mWaveFormat != WAVE_FORMAT_MULAW
&& mWaveFormat != WAVE_FORMAT_MSGSM
@@ -214,24 +217,6 @@
mBitsPerSample = U16_LE_AT(&formatSpec[14]);
- if (mWaveFormat == WAVE_FORMAT_PCM
- || mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
- if (mBitsPerSample != 8 && mBitsPerSample != 16
- && mBitsPerSample != 24) {
- return ERROR_UNSUPPORTED;
- }
- } else if (mWaveFormat == WAVE_FORMAT_MSGSM) {
- if (mBitsPerSample != 0) {
- return ERROR_UNSUPPORTED;
- }
- } else {
- CHECK(mWaveFormat == WAVE_FORMAT_MULAW
- || mWaveFormat == WAVE_FORMAT_ALAW);
- if (mBitsPerSample != 8) {
- return ERROR_UNSUPPORTED;
- }
- }
-
if (mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
uint16_t validBitsPerSample = U16_LE_AT(&formatSpec[18]);
if (validBitsPerSample != mBitsPerSample) {
@@ -263,17 +248,34 @@
// In a WAVE_EXT header, the first two bytes of the GUID stored at byte 24 contain
// the sample format, using the same definitions as a regular WAV header
mWaveFormat = U16_LE_AT(&formatSpec[24]);
- if (mWaveFormat != WAVE_FORMAT_PCM
- && mWaveFormat != WAVE_FORMAT_ALAW
- && mWaveFormat != WAVE_FORMAT_MULAW) {
- return ERROR_UNSUPPORTED;
- }
if (memcmp(&formatSpec[26], WAVEEXT_SUBFORMAT, 14)) {
ALOGE("unsupported GUID");
return ERROR_UNSUPPORTED;
}
}
+ if (mWaveFormat == WAVE_FORMAT_PCM) {
+ if (mBitsPerSample != 8 && mBitsPerSample != 16
+ && mBitsPerSample != 24 && mBitsPerSample != 32) {
+ return ERROR_UNSUPPORTED;
+ }
+ } else if (mWaveFormat == WAVE_FORMAT_IEEE_FLOAT) {
+ if (mBitsPerSample != 32) { // TODO we don't support double
+ return ERROR_UNSUPPORTED;
+ }
+ }
+ else if (mWaveFormat == WAVE_FORMAT_MSGSM) {
+ if (mBitsPerSample != 0) {
+ return ERROR_UNSUPPORTED;
+ }
+ } else if (mWaveFormat == WAVE_FORMAT_MULAW || mWaveFormat == WAVE_FORMAT_ALAW) {
+ if (mBitsPerSample != 8) {
+ return ERROR_UNSUPPORTED;
+ }
+ } else {
+ return ERROR_UNSUPPORTED;
+ }
+
mValidFormat = true;
} else if (!memcmp(chunkHeader, "data", 4)) {
if (mValidFormat) {
@@ -284,6 +286,7 @@
switch (mWaveFormat) {
case WAVE_FORMAT_PCM:
+ case WAVE_FORMAT_IEEE_FLOAT:
mTrackMeta->setCString(
kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
break;
@@ -305,6 +308,7 @@
mTrackMeta->setInt32(kKeyChannelCount, mNumChannels);
mTrackMeta->setInt32(kKeyChannelMask, mChannelMask);
mTrackMeta->setInt32(kKeySampleRate, mSampleRate);
+ mTrackMeta->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
int64_t durationUs = 0;
if (mWaveFormat == WAVE_FORMAT_MSGSM) {
@@ -475,46 +479,39 @@
buffer->set_range(0, n);
- if (mWaveFormat == WAVE_FORMAT_PCM || mWaveFormat == WAVE_FORMAT_EXTENSIBLE) {
+ // TODO: add capability to return data as float PCM instead of 16 bit PCM.
+ if (mWaveFormat == WAVE_FORMAT_PCM) {
if (mBitsPerSample == 8) {
// Convert 8-bit unsigned samples to 16-bit signed.
+ // Create new buffer with 2 byte wide samples
MediaBuffer *tmp;
CHECK_EQ(mGroup->acquire_buffer(&tmp), (status_t)OK);
-
- // The new buffer holds the sample number of samples, but each
- // one is 2 bytes wide.
tmp->set_range(0, 2 * n);
- int16_t *dst = (int16_t *)tmp->data();
- const uint8_t *src = (const uint8_t *)buffer->data();
- ssize_t numBytes = n;
-
- while (numBytes-- > 0) {
- *dst++ = ((int16_t)(*src) - 128) * 256;
- ++src;
- }
-
+ memcpy_to_i16_from_u8((int16_t *)tmp->data(), (const uint8_t *)buffer->data(), n);
buffer->release();
buffer = tmp;
} else if (mBitsPerSample == 24) {
- // Convert 24-bit signed samples to 16-bit signed.
+ // Convert 24-bit signed samples to 16-bit signed in place
+ const size_t numSamples = n / 3;
- const uint8_t *src =
- (const uint8_t *)buffer->data() + buffer->range_offset();
- int16_t *dst = (int16_t *)src;
+ memcpy_to_i16_from_p24((int16_t *)buffer->data(), (const uint8_t *)buffer->data(), numSamples);
+ buffer->set_range(0, 2 * numSamples);
+ } else if (mBitsPerSample == 32) {
+ // Convert 32-bit signed samples to 16-bit signed in place
+ const size_t numSamples = n / 4;
- size_t numSamples = buffer->range_length() / 3;
- for (size_t i = 0; i < numSamples; ++i) {
- int32_t x = (int32_t)(src[0] | src[1] << 8 | src[2] << 16);
- x = (x << 8) >> 8; // sign extension
+ memcpy_to_i16_from_i32((int16_t *)buffer->data(), (const int32_t *)buffer->data(), numSamples);
+ buffer->set_range(0, 2 * numSamples);
+ }
+ } else if (mWaveFormat == WAVE_FORMAT_IEEE_FLOAT) {
+ if (mBitsPerSample == 32) {
+ // Convert 32-bit float samples to 16-bit signed in place
+ const size_t numSamples = n / 4;
- x = x >> 8;
- *dst++ = (int16_t)x;
- src += 3;
- }
-
- buffer->set_range(buffer->range_offset(), 2 * numSamples);
+ memcpy_to_i16_from_float((int16_t *)buffer->data(), (const float *)buffer->data(), numSamples);
+ buffer->set_range(0, 2 * numSamples);
}
}
diff --git a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
index 965c55e..520ecb4 100644
--- a/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
+++ b/media/libstagefright/codecs/aacdec/SoftAAC2.cpp
@@ -209,6 +209,10 @@
OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+ if (!isValidOMXParam(aacParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (aacParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
@@ -244,6 +248,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -284,6 +292,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.aac",
OMX_MAX_STRINGNAME_SIZE - 1)) {
@@ -298,6 +310,10 @@
const OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(const OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+ if (!isValidOMXParam(aacParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (aacParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
@@ -318,6 +334,11 @@
{
const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *aacPresParams =
(const OMX_AUDIO_PARAM_ANDROID_AACPRESENTATIONTYPE *)params;
+
+ if (!isValidOMXParam(aacPresParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
// for the following parameters of the OMX_AUDIO_PARAM_AACPROFILETYPE structure,
// a value of -1 implies the parameter is not set by the application:
// nMaxOutputChannels uses default platform properties, see configureDownmix()
@@ -384,6 +405,10 @@
const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp
index c6724c2..ab0a228 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder.cpp
@@ -154,6 +154,10 @@
OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -174,6 +178,10 @@
OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+ if (!isValidOMXParam(aacParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (aacParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -198,6 +206,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
@@ -229,6 +241,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
"audio_encoder.aac",
OMX_MAX_STRINGNAME_SIZE - 1)) {
@@ -243,6 +259,10 @@
const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -266,6 +286,10 @@
OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+ if (!isValidOMXParam(aacParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (aacParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -286,6 +310,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
index 96e2f87..63215ec 100644
--- a/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
+++ b/media/libstagefright/codecs/aacenc/SoftAACEncoder2.cpp
@@ -133,6 +133,10 @@
OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -153,6 +157,10 @@
OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+ if (!isValidOMXParam(aacParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (aacParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -212,6 +220,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
@@ -235,6 +247,10 @@
OMX_AUDIO_PARAM_ANDROID_PROFILETYPE *profileParams =
(OMX_AUDIO_PARAM_ANDROID_PROFILETYPE *)params;
+ if (!isValidOMXParam(profileParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (profileParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -262,6 +278,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
"audio_encoder.aac",
OMX_MAX_STRINGNAME_SIZE - 1)) {
@@ -276,6 +296,10 @@
const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -299,6 +323,10 @@
OMX_AUDIO_PARAM_AACPROFILETYPE *aacParams =
(OMX_AUDIO_PARAM_AACPROFILETYPE *)params;
+ if (!isValidOMXParam(aacParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (aacParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -339,6 +367,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
index a9723ea..edf648d 100644
--- a/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
+++ b/media/libstagefright/codecs/amrnb/dec/SoftAMR.cpp
@@ -148,6 +148,10 @@
OMX_AUDIO_PARAM_AMRTYPE *amrParams =
(OMX_AUDIO_PARAM_AMRTYPE *)params;
+ if (!isValidOMXParam(amrParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (amrParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
@@ -174,6 +178,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -207,6 +215,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (mMode == MODE_NARROW) {
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.amrnb",
@@ -229,6 +241,10 @@
const OMX_AUDIO_PARAM_AMRTYPE *aacParams =
(const OMX_AUDIO_PARAM_AMRTYPE *)params;
+ if (!isValidOMXParam(aacParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (aacParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
@@ -241,6 +257,10 @@
const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -303,6 +323,13 @@
return;
}
+ if (inHeader->nFilledLen == 0) {
+ inInfo->mOwnedByUs = false;
+ inQueue.erase(inQueue.begin());
+ notifyEmptyBufferDone(inHeader);
+ continue;
+ }
+
if (inHeader->nOffset == 0) {
mAnchorTimeUs = inHeader->nTimeStamp;
mNumSamplesOutput = 0;
@@ -312,6 +339,26 @@
int32_t numBytesRead;
if (mMode == MODE_NARROW) {
+ if (outHeader->nAllocLen < kNumSamplesPerFrameNB * sizeof(int16_t)) {
+ ALOGE("b/27662364: NB expected output buffer %zu bytes vs %u",
+ kNumSamplesPerFrameNB * sizeof(int16_t), outHeader->nAllocLen);
+ android_errorWriteLog(0x534e4554, "27662364");
+ notify(OMX_EventError, OMX_ErrorOverflow, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+
+ int16 mode = ((inputPtr[0] >> 3) & 0x0f);
+ // for WMF since MIME_IETF is used when calling AMRDecode.
+ size_t frameSize = WmfDecBytesPerFrame[mode] + 1;
+
+ if (inHeader->nFilledLen < frameSize) {
+ ALOGE("b/27662364: expected %zu bytes vs %u", frameSize, inHeader->nFilledLen);
+ notify(OMX_EventError, OMX_ErrorStreamCorrupt, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+
numBytesRead =
AMRDecode(mState,
(Frame_Type_3GPP)((inputPtr[0] >> 3) & 0x0f),
@@ -339,6 +386,15 @@
return;
}
} else {
+ if (outHeader->nAllocLen < kNumSamplesPerFrameWB * sizeof(int16_t)) {
+ ALOGE("b/27662364: WB expected output buffer %zu bytes vs %u",
+ kNumSamplesPerFrameWB * sizeof(int16_t), outHeader->nAllocLen);
+ android_errorWriteLog(0x534e4554, "27662364");
+ notify(OMX_EventError, OMX_ErrorOverflow, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
+
int16 mode = ((inputPtr[0] >> 3) & 0x0f);
if (mode >= 10 && mode <= 13) {
@@ -352,7 +408,12 @@
}
size_t frameSize = getFrameSize(mode);
- CHECK_GE(inHeader->nFilledLen, frameSize);
+ if (inHeader->nFilledLen < frameSize) {
+ ALOGE("b/27662364: expected %zu bytes vs %u", frameSize, inHeader->nFilledLen);
+ notify(OMX_EventError, OMX_ErrorStreamCorrupt, 0, NULL);
+ mSignalledError = true;
+ return;
+ }
int16_t *outPtr = (int16_t *)outHeader->pBuffer;
diff --git a/media/libstagefright/codecs/amrnb/dec/src/amrdecode.h b/media/libstagefright/codecs/amrnb/dec/src/amrdecode.h
index 0988e17f..f224fb6 100644
--- a/media/libstagefright/codecs/amrnb/dec/src/amrdecode.h
+++ b/media/libstagefright/codecs/amrnb/dec/src/amrdecode.h
@@ -104,7 +104,6 @@
; INCLUDES
----------------------------------------------------------------------------*/
#include "typedef.h"
-#include "mode.h"
#include "frame_type_3gpp.h"
/*--------------------------------------------------------------------------*/
diff --git a/media/libstagefright/codecs/amrnb/dec/src/gsmamr_dec.h b/media/libstagefright/codecs/amrnb/dec/src/gsmamr_dec.h
index 8f54ee8..dc64d67 100644
--- a/media/libstagefright/codecs/amrnb/dec/src/gsmamr_dec.h
+++ b/media/libstagefright/codecs/amrnb/dec/src/gsmamr_dec.h
@@ -87,6 +87,7 @@
#include "gsm_amr_typedefs.h"
#include "frame_type_3gpp.h"
+#include "amrdecode.h"
/*--------------------------------------------------------------------------*/
#ifdef __cplusplus
@@ -136,19 +137,6 @@
Word8 *id);
/*
- * AMRDecode steps into the part of the library that decodes the raw data
- * speech bits for the decoding process. It returns the address offset of
- * the next frame to be decoded.
- */
- Word16 AMRDecode(
- void *state_data,
- enum Frame_Type_3GPP frame_type,
- UWord8 *speech_bits_ptr,
- Word16 *raw_pcm_buffer,
- Word16 input_format
- );
-
- /*
* This function resets the state memory used by the GSM AMR decoder. This
* function returns zero. It will return negative one if there is an error.
*/
diff --git a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
index 9489457..aaa6731 100644
--- a/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
+++ b/media/libstagefright/codecs/amrnb/enc/SoftAMRNBEncoder.cpp
@@ -120,6 +120,10 @@
OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -140,6 +144,10 @@
OMX_AUDIO_PARAM_AMRTYPE *amrParams =
(OMX_AUDIO_PARAM_AMRTYPE *)params;
+ if (!isValidOMXParam(amrParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (amrParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -158,6 +166,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
@@ -188,6 +200,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
"audio_encoder.amrnb",
OMX_MAX_STRINGNAME_SIZE - 1)) {
@@ -202,6 +218,10 @@
const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -225,6 +245,10 @@
OMX_AUDIO_PARAM_AMRTYPE *amrParams =
(OMX_AUDIO_PARAM_AMRTYPE *)params;
+ if (!isValidOMXParam(amrParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (amrParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -252,6 +276,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
index 91a512d..9d50c4e 100644
--- a/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
+++ b/media/libstagefright/codecs/amrwbenc/SoftAMRWBEncoder.cpp
@@ -155,6 +155,10 @@
OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -175,6 +179,10 @@
OMX_AUDIO_PARAM_AMRTYPE *amrParams =
(OMX_AUDIO_PARAM_AMRTYPE *)params;
+ if (!isValidOMXParam(amrParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (amrParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -196,6 +204,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
@@ -226,6 +238,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
"audio_encoder.amrwb",
OMX_MAX_STRINGNAME_SIZE - 1)) {
@@ -240,6 +256,10 @@
const OMX_AUDIO_PARAM_PORTFORMATTYPE *formatParams =
(const OMX_AUDIO_PARAM_PORTFORMATTYPE *)params;
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (formatParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -263,6 +283,10 @@
OMX_AUDIO_PARAM_AMRTYPE *amrParams =
(OMX_AUDIO_PARAM_AMRTYPE *)params;
+ if (!isValidOMXParam(amrParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (amrParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -299,6 +323,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/codecs/amrwbenc/inc/basic_op.h b/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
index db3e058..8165f69 100644
--- a/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
+++ b/media/libstagefright/codecs/amrwbenc/inc/basic_op.h
@@ -51,11 +51,11 @@
#define vo_shr_r(var1, var2) ((var1+((Word16)(1L<<(var2-1))))>>var2)
#define vo_sub(a,b) (a - b)
#define vo_L_deposit_h(a) ((Word32)((a) << 16))
-#define vo_round(a) ((a + 0x00008000) >> 16)
+#define vo_round(a) ((((a) >> 15) + 1) >> 1)
#define vo_extract_l(a) ((Word16)(a))
#define vo_L_add(a,b) (a + b)
#define vo_L_sub(a,b) (a - b)
-#define vo_mult_r(a,b) ((( a * b ) + 0x4000 ) >> 15 )
+#define vo_mult_r(a,b) (((( a * b ) >> 14) + 1 ) >> 1 )
#define vo_negate(a) (-a)
#define vo_L_shr_r(L_var1, var2) ((L_var1+((Word32)(1L<<(var2-1))))>>var2)
diff --git a/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c b/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
index 8bf15ea..4d877f1 100644
--- a/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
+++ b/media/libstagefright/codecs/amrwbenc/src/c4t64fx.c
@@ -267,13 +267,13 @@
for (i = 0; i < L_SUBFR/4; i++)
{
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ s = L_add((k_cn* (*p0++)), (k_dn * (*p1++)));
*p2++ = s >> 7;
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ s = L_add((k_cn* (*p0++)), (k_dn * (*p1++)));
*p2++ = s >> 7;
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ s = L_add((k_cn* (*p0++)), (k_dn * (*p1++)));
*p2++ = s >> 7;
- s = (k_cn* (*p0++))+(k_dn * (*p1++));
+ s = L_add((k_cn* (*p0++)), (k_dn * (*p1++)));
*p2++ = s >> 7;
}
@@ -342,7 +342,7 @@
{
*h++ = 0;
*h_inv++ = 0;
- L_tmp += (H[i] * H[i]) << 1;
+ L_tmp = L_add(L_tmp, (H[i] * H[i]) << 1);
}
/* scale h[] down (/2) when energy of h[] is high with many pulses used */
val = extract_h(L_tmp);
@@ -386,16 +386,16 @@
cor = 0x00008000L; /* for rounding */
for (i = 0; i < NB_POS; i++)
{
- cor += vo_L_mult((*ptr_h1), (*ptr_h1));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h1)));
ptr_h1++;
*p3-- = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h1));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h1)));
ptr_h1++;
*p2-- = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h1));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h1)));
ptr_h1++;
*p1-- = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h1));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h1)));
ptr_h1++;
*p0-- = extract_h(cor);
}
@@ -425,19 +425,19 @@
for (i = k + 1; i < NB_POS; i++)
{
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p3 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p2 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p1 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p0 = extract_h(cor);
@@ -447,15 +447,15 @@
p1 -= (NB_POS + 1);
p0 -= (NB_POS + 1);
}
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p3 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p2 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p1 = extract_h(cor);
@@ -482,19 +482,19 @@
for (i = k + 1; i < NB_POS; i++)
{
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p3 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p2 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p1 = extract_h(cor);
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p0 = extract_h(cor);
@@ -504,7 +504,7 @@
p1 -= (NB_POS + 1);
p0 -= (NB_POS + 1);
}
- cor += vo_L_mult((*ptr_h1), (*ptr_h2));
+ cor = L_add(cor, vo_L_mult((*ptr_h1), (*ptr_h2)));
ptr_h1++;
ptr_h2++;
*p3 = extract_h(cor);
@@ -698,7 +698,7 @@
}
/* memorise the best codevector */
ps = vo_mult(ps, ps);
- s = vo_L_msu(vo_L_mult(alpk, ps), psk, alp);
+ s = L_sub(vo_L_mult(alpk, ps), vo_L_mult(psk, alp));
if (s > 0)
{
psk = ps;
diff --git a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
index fa6ec40..cce6d15 100644
--- a/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
+++ b/media/libstagefright/codecs/avc/enc/SoftAVCEncoder.cpp
@@ -370,6 +370,10 @@
OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
(OMX_VIDEO_PARAM_BITRATETYPE *) params;
+ if (!isValidOMXParam(bitRate)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (bitRate->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -384,6 +388,10 @@
OMX_VIDEO_PARAM_AVCTYPE *avcParams =
(OMX_VIDEO_PARAM_AVCTYPE *)params;
+ if (!isValidOMXParam(avcParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (avcParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -427,6 +435,10 @@
OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
(OMX_VIDEO_PARAM_BITRATETYPE *) params;
+ if (!isValidOMXParam(bitRate)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (bitRate->nPortIndex != 1 ||
bitRate->eControlRate != OMX_Video_ControlRateVariable) {
return OMX_ErrorUndefined;
@@ -441,6 +453,10 @@
OMX_VIDEO_PARAM_AVCTYPE *avcType =
(OMX_VIDEO_PARAM_AVCTYPE *)params;
+ if (!isValidOMXParam(avcType)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (avcType->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
index afbe230..ae0741d 100644
--- a/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
+++ b/media/libstagefright/codecs/avcdec/SoftAVCDec.cpp
@@ -438,7 +438,7 @@
uint32_t bufferSize = displayStride * displayHeight * 3 / 2;
mFlushOutBuffer = (uint8_t *)memalign(128, bufferSize);
if (NULL == mFlushOutBuffer) {
- ALOGE("Could not allocate flushOutputBuffer of size %zu", bufferSize);
+ ALOGE("Could not allocate flushOutputBuffer of size %u", bufferSize);
return;
}
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
index e378a62..6ec8c41 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.cpp
@@ -208,6 +208,7 @@
mEnableAltRef = DEFAULT_ENABLE_ALT_REF;
mEncSpeed = DEFAULT_ENC_SPEED;
mIntra4x4 = DEFAULT_INTRA4x4;
+ mConstrainedIntraFlag = DEFAULT_CONSTRAINED_INTRA;
mAIRMode = DEFAULT_AIR;
mAIRRefreshPeriod = DEFAULT_AIR_REFRESH_PERIOD;
mPSNREnable = DEFAULT_PSNR_ENABLE;
@@ -305,6 +306,7 @@
s_ipe_params_ip.u4_enable_intra_4x4 = mIntra4x4;
s_ipe_params_ip.u4_enc_speed_preset = mEncSpeed;
+ s_ipe_params_ip.u4_constrained_intra_pred = mConstrainedIntraFlag;
s_ipe_params_ip.u4_timestamp_high = -1;
s_ipe_params_ip.u4_timestamp_low = -1;
@@ -927,6 +929,10 @@
OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
(OMX_VIDEO_PARAM_BITRATETYPE *)params;
+ if (!isValidOMXParam(bitRate)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (bitRate->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -940,6 +946,10 @@
{
OMX_VIDEO_PARAM_AVCTYPE *avcParams = (OMX_VIDEO_PARAM_AVCTYPE *)params;
+ if (!isValidOMXParam(avcParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (avcParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -977,14 +987,24 @@
switch (indexFull) {
case OMX_IndexParamVideoBitrate:
{
- return internalSetBitrateParams(
- (const OMX_VIDEO_PARAM_BITRATETYPE *)params);
+ OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
+ (OMX_VIDEO_PARAM_BITRATETYPE *)params;
+
+ if (!isValidOMXParam(bitRate)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ return internalSetBitrateParams(bitRate);
}
case OMX_IndexParamVideoAvc:
{
OMX_VIDEO_PARAM_AVCTYPE *avcType = (OMX_VIDEO_PARAM_AVCTYPE *)params;
+ if (!isValidOMXParam(avcType)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (avcType->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -1000,6 +1020,7 @@
}
mIInterval = avcType->nPFrames + avcType->nBFrames;
+ mConstrainedIntraFlag = avcType->bconstIpred;
if (OMX_VIDEO_AVCLoopFilterDisable == avcType->eLoopFilterMode)
mDisableDeblkLevel = 4;
@@ -1009,7 +1030,6 @@
|| avcType->nRefIdx10ActiveMinus1 != 0
|| avcType->nRefIdx11ActiveMinus1 != 0
|| avcType->bWeightedPPrediction != OMX_FALSE
- || avcType->bconstIpred != OMX_FALSE
|| avcType->bDirect8x8Inference != OMX_FALSE
|| avcType->bDirectSpatialTemporal != OMX_FALSE
|| avcType->nCabacInitIdc != 0) {
@@ -1035,11 +1055,17 @@
{
OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE *intraRefreshParams =
(OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE *)_params;
+
+ if (!isValidOMXParam(intraRefreshParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (intraRefreshParams->nPortIndex != kOutputPortIndex) {
return OMX_ErrorUndefined;
}
- intraRefreshParams->nRefreshPeriod = mAIRRefreshPeriod;
+ intraRefreshParams->nRefreshPeriod =
+ (mAIRMode == IVE_AIR_MODE_NONE) ? 0 : mAIRRefreshPeriod;
return OMX_ErrorNone;
}
@@ -1056,6 +1082,10 @@
OMX_CONFIG_INTRAREFRESHVOPTYPE *params =
(OMX_CONFIG_INTRAREFRESHVOPTYPE *)_params;
+ if (!isValidOMXParam(params)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (params->nPortIndex != kOutputPortIndex) {
return OMX_ErrorBadPortIndex;
}
@@ -1071,6 +1101,10 @@
OMX_VIDEO_CONFIG_BITRATETYPE *params =
(OMX_VIDEO_CONFIG_BITRATETYPE *)_params;
+ if (!isValidOMXParam(params)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (params->nPortIndex != kOutputPortIndex) {
return OMX_ErrorBadPortIndex;
}
@@ -1086,6 +1120,11 @@
{
const OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE *intraRefreshParams =
(const OMX_VIDEO_CONFIG_ANDROID_INTRAREFRESHTYPE *)_params;
+
+ if (!isValidOMXParam(intraRefreshParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (intraRefreshParams->nPortIndex != kOutputPortIndex) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
index 232c6e0..cf6f899 100644
--- a/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
+++ b/media/libstagefright/codecs/avcenc/SoftAVCEnc.h
@@ -95,8 +95,7 @@
#define DEFAULT_SOC SOC_GENERIC
#define DEFAULT_INTRA4x4 0
#define STRLENGTH 500
-
-
+#define DEFAULT_CONSTRAINED_INTRA 0
#define MIN(a, b) ((a) < (b))? (a) : (b)
#define MAX(a, b) ((a) > (b))? (a) : (b)
@@ -182,6 +181,7 @@
bool mReconEnable;
bool mPSNREnable;
bool mEntropyMode;
+ bool mConstrainedIntraFlag;
IVE_SPEED_CONFIG mEncSpeed;
uint8_t *mConversionBuffers[MAX_CONVERSION_BUFFERS];
diff --git a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
index 9edffd2..caceda9 100644
--- a/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
+++ b/media/libstagefright/codecs/flac/enc/SoftFlacEncoder.cpp
@@ -159,6 +159,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -180,6 +184,11 @@
case OMX_IndexParamAudioFlac:
{
OMX_AUDIO_PARAM_FLACTYPE *flacParams = (OMX_AUDIO_PARAM_FLACTYPE *)params;
+
+ if (!isValidOMXParam(flacParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
flacParams->nCompressionLevel = mCompressionLevel;
flacParams->nChannels = mNumChannels;
flacParams->nSampleRate = mSampleRate;
@@ -199,6 +208,10 @@
ALOGV("SoftFlacEncoder::internalSetParameter(OMX_IndexParamAudioPcm)");
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams = (OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0 && pcmParams->nPortIndex != 1) {
ALOGE("SoftFlacEncoder::internalSetParameter() Error #1");
return OMX_ErrorUndefined;
@@ -221,6 +234,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
"audio_encoder.flac",
OMX_MAX_STRINGNAME_SIZE - 1)) {
@@ -236,6 +253,11 @@
{
// used only for setting the compression level
OMX_AUDIO_PARAM_FLACTYPE *flacParams = (OMX_AUDIO_PARAM_FLACTYPE *)params;
+
+ if (!isValidOMXParam(flacParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
mCompressionLevel = flacParams->nCompressionLevel; // range clamping done inside encoder
return OMX_ErrorNone;
}
@@ -245,6 +267,10 @@
OMX_PARAM_PORTDEFINITIONTYPE *defParams =
(OMX_PARAM_PORTDEFINITIONTYPE *)params;
+ if (!isValidOMXParam(defParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (defParams->nPortIndex == 0) {
if (defParams->nBufferSize > kMaxInputBufferSize) {
ALOGE("Input buffer size must be at most %d bytes",
diff --git a/media/libstagefright/codecs/g711/dec/SoftG711.cpp b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
index e342d7c..958e7c4 100644
--- a/media/libstagefright/codecs/g711/dec/SoftG711.cpp
+++ b/media/libstagefright/codecs/g711/dec/SoftG711.cpp
@@ -110,6 +110,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -148,6 +152,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0 && pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -170,6 +178,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (mIsMLaw) {
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.g711mlaw",
diff --git a/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp b/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp
index bd01a1a..7916c45 100644
--- a/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp
+++ b/media/libstagefright/codecs/gsm/dec/SoftGSM.cpp
@@ -110,6 +110,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -141,6 +145,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0 && pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -161,6 +169,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.gsm",
OMX_MAX_STRINGNAME_SIZE - 1)) {
diff --git a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
index e601125..4aa23c1 100644
--- a/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
+++ b/media/libstagefright/codecs/hevcdec/SoftHEVC.cpp
@@ -399,7 +399,7 @@
uint32_t bufferSize = displayStride * displayHeight * 3 / 2;
mFlushOutBuffer = (uint8_t *)memalign(128, bufferSize);
if (NULL == mFlushOutBuffer) {
- ALOGE("Could not allocate flushOutputBuffer of size %zu", bufferSize);
+ ALOGE("Could not allocate flushOutputBuffer of size %u", bufferSize);
return;
}
diff --git a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
index f2a4e65..7638bb7 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/SoftMPEG4Encoder.cpp
@@ -232,6 +232,10 @@
OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
(OMX_VIDEO_PARAM_BITRATETYPE *) params;
+ if (!isValidOMXParam(bitRate)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (bitRate->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -246,6 +250,10 @@
OMX_VIDEO_PARAM_H263TYPE *h263type =
(OMX_VIDEO_PARAM_H263TYPE *)params;
+ if (!isValidOMXParam(h263type)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (h263type->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -267,6 +275,10 @@
OMX_VIDEO_PARAM_MPEG4TYPE *mpeg4type =
(OMX_VIDEO_PARAM_MPEG4TYPE *)params;
+ if (!isValidOMXParam(mpeg4type)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (mpeg4type->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -301,6 +313,10 @@
OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
(OMX_VIDEO_PARAM_BITRATETYPE *) params;
+ if (!isValidOMXParam(bitRate)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (bitRate->nPortIndex != 1 ||
bitRate->eControlRate != OMX_Video_ControlRateVariable) {
return OMX_ErrorUndefined;
@@ -315,6 +331,10 @@
OMX_VIDEO_PARAM_H263TYPE *h263type =
(OMX_VIDEO_PARAM_H263TYPE *)params;
+ if (!isValidOMXParam(h263type)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (h263type->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -337,6 +357,10 @@
OMX_VIDEO_PARAM_MPEG4TYPE *mpeg4type =
(OMX_VIDEO_PARAM_MPEG4TYPE *)params;
+ if (!isValidOMXParam(mpeg4type)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (mpeg4type->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
index f743b1c..9988015 100644
--- a/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
+++ b/media/libstagefright/codecs/mp3dec/SoftMP3.cpp
@@ -128,6 +128,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -151,6 +155,10 @@
OMX_AUDIO_PARAM_MP3TYPE *mp3Params =
(OMX_AUDIO_PARAM_MP3TYPE *)params;
+ if (!isValidOMXParam(mp3Params)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (mp3Params->nPortIndex > 1) {
return OMX_ErrorUndefined;
}
@@ -176,6 +184,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.mp3",
OMX_MAX_STRINGNAME_SIZE - 1)) {
@@ -190,6 +202,10 @@
const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(const OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
index 6106a93..2a56ed5 100644
--- a/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
+++ b/media/libstagefright/codecs/on2/dec/SoftVPX.cpp
@@ -156,15 +156,20 @@
outHeader->nFlags = 0;
outHeader->nFilledLen = (outputBufferWidth() * outputBufferHeight() * 3) / 2;
outHeader->nTimeStamp = *(OMX_TICKS *)mImg->user_priv;
-
- uint8_t *dst = outHeader->pBuffer;
- const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
- const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
- const uint8_t *srcV = (const uint8_t *)mImg->planes[VPX_PLANE_V];
- size_t srcYStride = mImg->stride[VPX_PLANE_Y];
- size_t srcUStride = mImg->stride[VPX_PLANE_U];
- size_t srcVStride = mImg->stride[VPX_PLANE_V];
- copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
+ if (outHeader->nAllocLen >= outHeader->nFilledLen) {
+ uint8_t *dst = outHeader->pBuffer;
+ const uint8_t *srcY = (const uint8_t *)mImg->planes[VPX_PLANE_Y];
+ const uint8_t *srcU = (const uint8_t *)mImg->planes[VPX_PLANE_U];
+ const uint8_t *srcV = (const uint8_t *)mImg->planes[VPX_PLANE_V];
+ size_t srcYStride = mImg->stride[VPX_PLANE_Y];
+ size_t srcUStride = mImg->stride[VPX_PLANE_U];
+ size_t srcVStride = mImg->stride[VPX_PLANE_V];
+ copyYV12FrameToOutputBuffer(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride);
+ } else {
+ ALOGE("b/27597103, buffer too small");
+ android_errorWriteLog(0x534e4554, "27597103");
+ outHeader->nFilledLen = 0;
+ }
mImg = NULL;
outInfo->mOwnedByUs = false;
diff --git a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
index 0f28e8d..5edfbb5 100644
--- a/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
+++ b/media/libstagefright/codecs/on2/enc/SoftVPXEncoder.cpp
@@ -373,20 +373,24 @@
OMX_VIDEO_PARAM_BITRATETYPE *bitrate =
(OMX_VIDEO_PARAM_BITRATETYPE *)param;
- if (bitrate->nPortIndex != kOutputPortIndex) {
- return OMX_ErrorUnsupportedIndex;
- }
+ if (!isValidOMXParam(bitrate)) {
+ return OMX_ErrorBadParameter;
+ }
- bitrate->nTargetBitrate = mBitrate;
+ if (bitrate->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
- if (mBitrateControlMode == VPX_VBR) {
- bitrate->eControlRate = OMX_Video_ControlRateVariable;
- } else if (mBitrateControlMode == VPX_CBR) {
- bitrate->eControlRate = OMX_Video_ControlRateConstant;
- } else {
- return OMX_ErrorUnsupportedSetting;
- }
- return OMX_ErrorNone;
+ bitrate->nTargetBitrate = mBitrate;
+
+ if (mBitrateControlMode == VPX_VBR) {
+ bitrate->eControlRate = OMX_Video_ControlRateVariable;
+ } else if (mBitrateControlMode == VPX_CBR) {
+ bitrate->eControlRate = OMX_Video_ControlRateConstant;
+ } else {
+ return OMX_ErrorUnsupportedSetting;
+ }
+ return OMX_ErrorNone;
}
// VP8 specific parameters that use extension headers
@@ -394,33 +398,41 @@
OMX_VIDEO_PARAM_VP8TYPE *vp8Params =
(OMX_VIDEO_PARAM_VP8TYPE *)param;
- if (vp8Params->nPortIndex != kOutputPortIndex) {
- return OMX_ErrorUnsupportedIndex;
- }
+ if (!isValidOMXParam(vp8Params)) {
+ return OMX_ErrorBadParameter;
+ }
- vp8Params->eProfile = OMX_VIDEO_VP8ProfileMain;
- vp8Params->eLevel = mLevel;
- vp8Params->nDCTPartitions = mDCTPartitions;
- vp8Params->bErrorResilientMode = mErrorResilience;
- return OMX_ErrorNone;
+ if (vp8Params->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ vp8Params->eProfile = OMX_VIDEO_VP8ProfileMain;
+ vp8Params->eLevel = mLevel;
+ vp8Params->nDCTPartitions = mDCTPartitions;
+ vp8Params->bErrorResilientMode = mErrorResilience;
+ return OMX_ErrorNone;
}
case OMX_IndexParamVideoAndroidVp8Encoder: {
OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
(OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param;
- if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
- return OMX_ErrorUnsupportedIndex;
- }
+ if (!isValidOMXParam(vp8AndroidParams)) {
+ return OMX_ErrorBadParameter;
+ }
- vp8AndroidParams->nKeyFrameInterval = mKeyFrameInterval;
- vp8AndroidParams->eTemporalPattern = mTemporalPatternType;
- vp8AndroidParams->nTemporalLayerCount = mTemporalLayers;
- vp8AndroidParams->nMinQuantizer = mMinQuantizer;
- vp8AndroidParams->nMaxQuantizer = mMaxQuantizer;
- memcpy(vp8AndroidParams->nTemporalLayerBitrateRatio,
- mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
- return OMX_ErrorNone;
+ if (vp8AndroidParams->nPortIndex != kOutputPortIndex) {
+ return OMX_ErrorUnsupportedIndex;
+ }
+
+ vp8AndroidParams->nKeyFrameInterval = mKeyFrameInterval;
+ vp8AndroidParams->eTemporalPattern = mTemporalPatternType;
+ vp8AndroidParams->nTemporalLayerCount = mTemporalLayers;
+ vp8AndroidParams->nMinQuantizer = mMinQuantizer;
+ vp8AndroidParams->nMaxQuantizer = mMaxQuantizer;
+ memcpy(vp8AndroidParams->nTemporalLayerBitrateRatio,
+ mTemporalLayerBitrateRatio, sizeof(mTemporalLayerBitrateRatio));
+ return OMX_ErrorNone;
}
default:
@@ -435,17 +447,38 @@
const int32_t indexFull = index;
switch (indexFull) {
- case OMX_IndexParamVideoBitrate:
- return internalSetBitrateParams(
- (const OMX_VIDEO_PARAM_BITRATETYPE *)param);
+ case OMX_IndexParamVideoBitrate: {
+ const OMX_VIDEO_PARAM_BITRATETYPE *bitRate =
+ (const OMX_VIDEO_PARAM_BITRATETYPE*) param;
- case OMX_IndexParamVideoVp8:
- return internalSetVp8Params(
- (const OMX_VIDEO_PARAM_VP8TYPE *)param);
+ if (!isValidOMXParam(bitRate)) {
+ return OMX_ErrorBadParameter;
+ }
- case OMX_IndexParamVideoAndroidVp8Encoder:
- return internalSetAndroidVp8Params(
- (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *)param);
+ return internalSetBitrateParams(bitRate);
+ }
+
+ case OMX_IndexParamVideoVp8: {
+ const OMX_VIDEO_PARAM_VP8TYPE *vp8Params =
+ (const OMX_VIDEO_PARAM_VP8TYPE*) param;
+
+ if (!isValidOMXParam(vp8Params)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ return internalSetVp8Params(vp8Params);
+ }
+
+ case OMX_IndexParamVideoAndroidVp8Encoder: {
+ const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE *vp8AndroidParams =
+ (const OMX_VIDEO_PARAM_ANDROID_VP8ENCODERTYPE*) param;
+
+ if (!isValidOMXParam(vp8AndroidParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
+ return internalSetAndroidVp8Params(vp8AndroidParams);
+ }
default:
return SoftVideoEncoderOMXComponent::internalSetParameter(index, param);
@@ -460,6 +493,10 @@
OMX_CONFIG_INTRAREFRESHVOPTYPE *params =
(OMX_CONFIG_INTRAREFRESHVOPTYPE *)_params;
+ if (!isValidOMXParam(params)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (params->nPortIndex != kOutputPortIndex) {
return OMX_ErrorBadPortIndex;
}
@@ -473,6 +510,10 @@
OMX_VIDEO_CONFIG_BITRATETYPE *params =
(OMX_VIDEO_CONFIG_BITRATETYPE *)_params;
+ if (!isValidOMXParam(params)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (params->nPortIndex != kOutputPortIndex) {
return OMX_ErrorBadPortIndex;
}
@@ -693,9 +734,10 @@
const uint8_t *source =
inputBufferHeader->pBuffer + inputBufferHeader->nOffset;
+ size_t frameSize = mWidth * mHeight * 3 / 2;
if (mInputDataIsMeta) {
source = extractGraphicBuffer(
- mConversionBuffer, mWidth * mHeight * 3 / 2,
+ mConversionBuffer, frameSize,
source, inputBufferHeader->nFilledLen,
mWidth, mHeight);
if (source == NULL) {
@@ -703,11 +745,21 @@
notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
return;
}
- } else if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
- ConvertYUV420SemiPlanarToYUV420Planar(
- source, mConversionBuffer, mWidth, mHeight);
+ } else {
+ if (inputBufferHeader->nFilledLen < frameSize) {
+ android_errorWriteLog(0x534e4554, "27569635");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
+ return;
+ } else if (inputBufferHeader->nFilledLen > frameSize) {
+ ALOGW("Input buffer contains too many pixels");
+ }
- source = mConversionBuffer;
+ if (mColorFormat == OMX_COLOR_FormatYUV420SemiPlanar) {
+ ConvertYUV420SemiPlanarToYUV420Planar(
+ source, mConversionBuffer, mWidth, mHeight);
+
+ source = mConversionBuffer;
+ }
}
vpx_image_t raw_frame;
vpx_img_wrap(&raw_frame, VPX_IMG_FMT_I420, mWidth, mHeight,
@@ -769,9 +821,14 @@
outputBufferHeader->nTimeStamp = encoded_packet->data.frame.pts;
outputBufferHeader->nFlags = 0;
if (encoded_packet->data.frame.flags & VPX_FRAME_IS_KEY)
- outputBufferHeader->nFlags |= OMX_BUFFERFLAG_SYNCFRAME;
+ outputBufferHeader->nFlags |= OMX_BUFFERFLAG_SYNCFRAME;
outputBufferHeader->nOffset = 0;
outputBufferHeader->nFilledLen = encoded_packet->data.frame.sz;
+ if (outputBufferHeader->nFilledLen > outputBufferHeader->nAllocLen) {
+ android_errorWriteLog(0x534e4554, "27569635");
+ notify(OMX_EventError, OMX_ErrorUndefined, 0, 0);
+ return;
+ }
memcpy(outputBufferHeader->pBuffer,
encoded_packet->data.frame.buf,
encoded_packet->data.frame.sz);
diff --git a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
index cb10bce..2afa0ed 100644
--- a/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
+++ b/media/libstagefright/codecs/opus/dec/SoftOpus.cpp
@@ -134,6 +134,10 @@
OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *opusParams =
(OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *)params;
+ if (!isValidOMXParam(opusParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (opusParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
@@ -156,6 +160,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -191,6 +199,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.opus",
OMX_MAX_STRINGNAME_SIZE - 1)) {
@@ -205,6 +217,10 @@
const OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *opusParams =
(const OMX_AUDIO_PARAM_ANDROID_OPUSTYPE *)params;
+ if (!isValidOMXParam(opusParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (opusParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/codecs/raw/SoftRaw.cpp b/media/libstagefright/codecs/raw/SoftRaw.cpp
index 9d3bab8..acb2b37 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.cpp
+++ b/media/libstagefright/codecs/raw/SoftRaw.cpp
@@ -42,7 +42,9 @@
: SimpleSoftOMXComponent(name, callbacks, appData, component),
mSignalledError(false),
mChannelCount(2),
- mSampleRate(44100) {
+ mSampleRate(44100),
+ mNumericalData(OMX_NumericalDataSigned),
+ mBitsPerSample(16) {
initPorts();
CHECK_EQ(initDecoder(), (status_t)OK);
}
@@ -103,14 +105,18 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0 && pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
- pcmParams->eNumData = OMX_NumericalDataSigned;
+ pcmParams->eNumData = (OMX_NUMERICALDATATYPE)mNumericalData;
pcmParams->eEndian = OMX_EndianBig;
pcmParams->bInterleaved = OMX_TRUE;
- pcmParams->nBitPerSample = 16;
+ pcmParams->nBitPerSample = mBitsPerSample;
pcmParams->ePCMMode = OMX_AUDIO_PCMModeLinear;
pcmParams->eChannelMapping[0] = OMX_AUDIO_ChannelLF;
pcmParams->eChannelMapping[1] = OMX_AUDIO_ChannelRF;
@@ -134,6 +140,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.raw",
OMX_MAX_STRINGNAME_SIZE - 1)) {
@@ -148,12 +158,18 @@
const OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
mChannelCount = pcmParams->nChannels;
mSampleRate = pcmParams->nSamplingRate;
+ mNumericalData = pcmParams->eNumData;
+ mBitsPerSample = pcmParams->nBitPerSample;
return OMX_ErrorNone;
}
diff --git a/media/libstagefright/codecs/raw/SoftRaw.h b/media/libstagefright/codecs/raw/SoftRaw.h
index 015c4a3..80906b4 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.h
+++ b/media/libstagefright/codecs/raw/SoftRaw.h
@@ -50,6 +50,8 @@
int32_t mChannelCount;
int32_t mSampleRate;
+ int32_t mNumericalData;
+ int32_t mBitsPerSample;
void initPorts();
status_t initDecoder();
diff --git a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
index 3b1b2dc..6a689c4 100644
--- a/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
+++ b/media/libstagefright/codecs/vorbis/dec/SoftVorbis.cpp
@@ -131,6 +131,10 @@
OMX_AUDIO_PARAM_VORBISTYPE *vorbisParams =
(OMX_AUDIO_PARAM_VORBISTYPE *)params;
+ if (!isValidOMXParam(vorbisParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (vorbisParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
@@ -162,6 +166,10 @@
OMX_AUDIO_PARAM_PCMMODETYPE *pcmParams =
(OMX_AUDIO_PARAM_PCMMODETYPE *)params;
+ if (!isValidOMXParam(pcmParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (pcmParams->nPortIndex != 1) {
return OMX_ErrorUndefined;
}
@@ -198,6 +206,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
"audio_decoder.vorbis",
OMX_MAX_STRINGNAME_SIZE - 1)) {
@@ -212,6 +224,10 @@
const OMX_AUDIO_PARAM_VORBISTYPE *vorbisParams =
(const OMX_AUDIO_PARAM_VORBISTYPE *)params;
+ if (!isValidOMXParam(vorbisParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (vorbisParams->nPortIndex != 0) {
return OMX_ErrorUndefined;
}
@@ -322,7 +338,13 @@
}
if (inHeader->nFilledLen || !mSawInputEos) {
- CHECK_GE(inHeader->nFilledLen, sizeof(numPageSamples));
+ if (inHeader->nFilledLen < sizeof(numPageSamples)) {
+ notify(OMX_EventError, OMX_ErrorBadParameter, 0, NULL);
+ mSignalledError = true;
+ ALOGE("onQueueFilled, input header has nFilledLen %u, expected %zu",
+ inHeader->nFilledLen, sizeof(numPageSamples));
+ return;
+ }
memcpy(&numPageSamples,
inHeader->pBuffer
+ inHeader->nOffset + inHeader->nFilledLen - 4,
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index e92c192..bbc4d26 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -363,6 +363,16 @@
}
}
+ // TODO: propagate color aspects to software renderer to allow better
+ // color conversion to RGB. For now, just mark dataspace for YUV rendering.
+ android_dataspace dataSpace;
+ if (format->findInt32("android._dataspace", (int32_t *)&dataSpace) && dataSpace != mDataSpace) {
+ ALOGD("setting dataspace on output surface to #%x", dataSpace);
+ if ((err = native_window_set_buffers_data_space(mNativeWindow.get(), dataSpace))) {
+ ALOGW("failed to set dataspace on surface (%d)", err);
+ }
+ mDataSpace = dataSpace;
+ }
if ((err = mNativeWindow->queueBuffer(mNativeWindow.get(), buf, -1)) != 0) {
ALOGW("Surface::queueBuffer returned error %d", err);
} else {
diff --git a/media/libstagefright/foundation/AMessage.cpp b/media/libstagefright/foundation/AMessage.cpp
index 725a574..855ac95 100644
--- a/media/libstagefright/foundation/AMessage.cpp
+++ b/media/libstagefright/foundation/AMessage.cpp
@@ -749,6 +749,126 @@
}
}
+sp<AMessage> AMessage::changesFrom(const sp<const AMessage> &other, bool deep) const {
+ if (other == NULL) {
+ return const_cast<AMessage*>(this);
+ }
+
+ sp<AMessage> diff = new AMessage;
+ if (mWhat != other->mWhat) {
+ diff->setWhat(mWhat);
+ }
+ if (mHandler != other->mHandler) {
+ diff->setTarget(mHandler.promote());
+ }
+
+ for (size_t i = 0; i < mNumItems; ++i) {
+ const Item &item = mItems[i];
+ const Item *oitem = other->findItem(item.mName, item.mType);
+ switch (item.mType) {
+ case kTypeInt32:
+ if (oitem == NULL || item.u.int32Value != oitem->u.int32Value) {
+ diff->setInt32(item.mName, item.u.int32Value);
+ }
+ break;
+
+ case kTypeInt64:
+ if (oitem == NULL || item.u.int64Value != oitem->u.int64Value) {
+ diff->setInt64(item.mName, item.u.int64Value);
+ }
+ break;
+
+ case kTypeSize:
+ if (oitem == NULL || item.u.sizeValue != oitem->u.sizeValue) {
+ diff->setSize(item.mName, item.u.sizeValue);
+ }
+ break;
+
+ case kTypeFloat:
+ if (oitem == NULL || item.u.floatValue != oitem->u.floatValue) {
+ diff->setFloat(item.mName, item.u.sizeValue);
+ }
+ break;
+
+ case kTypeDouble:
+ if (oitem == NULL || item.u.doubleValue != oitem->u.doubleValue) {
+ diff->setDouble(item.mName, item.u.sizeValue);
+ }
+ break;
+
+ case kTypeString:
+ if (oitem == NULL || *item.u.stringValue != *oitem->u.stringValue) {
+ diff->setString(item.mName, *item.u.stringValue);
+ }
+ break;
+
+ case kTypeRect:
+ if (oitem == NULL || memcmp(&item.u.rectValue, &oitem->u.rectValue, sizeof(Rect))) {
+ diff->setRect(
+ item.mName, item.u.rectValue.mLeft, item.u.rectValue.mTop,
+ item.u.rectValue.mRight, item.u.rectValue.mBottom);
+ }
+ break;
+
+ case kTypePointer:
+ if (oitem == NULL || item.u.ptrValue != oitem->u.ptrValue) {
+ diff->setPointer(item.mName, item.u.ptrValue);
+ }
+ break;
+
+ case kTypeBuffer:
+ {
+ sp<ABuffer> myBuf = static_cast<ABuffer *>(item.u.refValue);
+ if (myBuf == NULL) {
+ if (oitem == NULL || oitem->u.refValue != NULL) {
+ diff->setBuffer(item.mName, NULL);
+ }
+ break;
+ }
+ sp<ABuffer> oBuf = oitem == NULL ? NULL : static_cast<ABuffer *>(oitem->u.refValue);
+ if (oBuf == NULL
+ || myBuf->size() != oBuf->size()
+ || (!myBuf->data() ^ !oBuf->data()) // data nullness differs
+ || (myBuf->data() && memcmp(myBuf->data(), oBuf->data(), myBuf->size()))) {
+ diff->setBuffer(item.mName, myBuf);
+ }
+ break;
+ }
+
+ case kTypeMessage:
+ {
+ sp<AMessage> myMsg = static_cast<AMessage *>(item.u.refValue);
+ if (myMsg == NULL) {
+ if (oitem == NULL || oitem->u.refValue != NULL) {
+ diff->setMessage(item.mName, NULL);
+ }
+ break;
+ }
+ sp<AMessage> oMsg =
+ oitem == NULL ? NULL : static_cast<AMessage *>(oitem->u.refValue);
+ sp<AMessage> changes = myMsg->changesFrom(oMsg, deep);
+ if (changes->countEntries()) {
+ diff->setMessage(item.mName, deep ? changes : myMsg);
+ }
+ break;
+ }
+
+ case kTypeObject:
+ if (oitem == NULL || item.u.refValue != oitem->u.refValue) {
+ diff->setObject(item.mName, item.u.refValue);
+ }
+ break;
+
+ default:
+ {
+ ALOGE("Unknown type %d", item.mType);
+ TRESPASS();
+ }
+ }
+ }
+ return diff;
+}
+
size_t AMessage::countEntries() const {
return mNumItems;
}
diff --git a/media/libstagefright/foundation/Android.mk b/media/libstagefright/foundation/Android.mk
index 711601f..3c3ed59 100644
--- a/media/libstagefright/foundation/Android.mk
+++ b/media/libstagefright/foundation/Android.mk
@@ -15,6 +15,7 @@
AString.cpp \
AStringUtils.cpp \
AWakeLock.cpp \
+ ColorUtils.cpp \
MediaBuffer.cpp \
MediaBufferGroup.cpp \
MetaData.cpp \
diff --git a/media/libstagefright/foundation/ColorUtils.cpp b/media/libstagefright/foundation/ColorUtils.cpp
new file mode 100644
index 0000000..99031ca
--- /dev/null
+++ b/media/libstagefright/foundation/ColorUtils.cpp
@@ -0,0 +1,582 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "ColorUtils"
+
+#include <inttypes.h>
+
+#include <media/stagefright/foundation/ADebug.h>
+#include <media/stagefright/foundation/ALookup.h>
+#include <media/stagefright/foundation/ColorUtils.h>
+
+namespace android {
+
+// shortcut names for brevity in the following tables
+typedef ColorAspects CA;
+typedef ColorUtils CU;
+
+const static
+ALookup<CU::ColorRange, CA::Range> sRanges{
+ {
+ { CU::kColorRangeLimited, CA::RangeLimited },
+ { CU::kColorRangeFull, CA::RangeFull },
+ { CU::kColorRangeUnspecified, CA::RangeUnspecified },
+ }
+};
+
+const static
+ALookup<CU::ColorStandard, std::pair<CA::Primaries, CA::MatrixCoeffs>> sStandards {
+ {
+ { CU::kColorStandardUnspecified, { CA::PrimariesUnspecified, CA::MatrixUnspecified } },
+ { CU::kColorStandardBT709, { CA::PrimariesBT709_5, CA::MatrixBT709_5 } },
+ { CU::kColorStandardBT601_625, { CA::PrimariesBT601_6_625, CA::MatrixBT601_6 } },
+ { CU::kColorStandardBT601_625_Unadjusted,
+ // this is a really close match
+ { CA::PrimariesBT601_6_625, CA::MatrixBT709_5 } },
+ { CU::kColorStandardBT601_525, { CA::PrimariesBT601_6_525, CA::MatrixBT601_6 } },
+ { CU::kColorStandardBT601_525_Unadjusted,
+ { CA::PrimariesBT601_6_525, CA::MatrixSMPTE240M } },
+ { CU::kColorStandardBT2020, { CA::PrimariesBT2020, CA::MatrixBT2020 } },
+ { CU::kColorStandardBT2020Constant, { CA::PrimariesBT2020, CA::MatrixBT2020Constant } },
+ { CU::kColorStandardBT470M, { CA::PrimariesBT470_6M, CA::MatrixBT470_6M } },
+ // NOTE: there is no close match to the matrix used by standard film, chose closest
+ { CU::kColorStandardFilm, { CA::PrimariesGenericFilm, CA::MatrixBT2020 } },
+ }
+};
+
+const static
+ALookup<CU::ColorTransfer, CA::Transfer> sTransfers{
+ {
+ { CU::kColorTransferUnspecified, CA::TransferUnspecified },
+ { CU::kColorTransferLinear, CA::TransferLinear },
+ { CU::kColorTransferSRGB, CA::TransferSRGB },
+ { CU::kColorTransferSMPTE_170M, CA::TransferSMPTE170M },
+ { CU::kColorTransferGamma22, CA::TransferGamma22 },
+ { CU::kColorTransferGamma28, CA::TransferGamma28 },
+ { CU::kColorTransferST2084, CA::TransferST2084 },
+ { CU::kColorTransferHLG, CA::TransferHLG },
+ }
+};
+
+static bool isValid(ColorAspects::Primaries p) {
+ return p <= ColorAspects::PrimariesOther;
+}
+
+static bool isDefined(ColorAspects::Primaries p) {
+ return p <= ColorAspects::PrimariesBT2020;
+}
+
+static bool isValid(ColorAspects::MatrixCoeffs c) {
+ return c <= ColorAspects::MatrixOther;
+}
+
+static bool isDefined(ColorAspects::MatrixCoeffs c) {
+ return c <= ColorAspects::MatrixBT2020Constant;
+}
+
+//static
+int32_t ColorUtils::wrapColorAspectsIntoColorStandard(
+ ColorAspects::Primaries primaries, ColorAspects::MatrixCoeffs coeffs) {
+ ColorStandard res;
+ if (sStandards.map(std::make_pair(primaries, coeffs), &res)) {
+ return res;
+ } else if (!isValid(primaries) || !isValid(coeffs)) {
+ return kColorStandardUnspecified;
+ }
+
+ // check platform media limits
+ uint32_t numPrimaries = ColorAspects::PrimariesBT2020 + 1;
+ if (isDefined(primaries) && isDefined(coeffs)) {
+ return kColorStandardExtendedStart + primaries + coeffs * numPrimaries;
+ } else {
+ return kColorStandardVendorStart + primaries + coeffs * 0x100;
+ }
+}
+
+//static
+status_t ColorUtils::unwrapColorAspectsFromColorStandard(
+ int32_t standard,
+ ColorAspects::Primaries *primaries, ColorAspects::MatrixCoeffs *coeffs) {
+ std::pair<ColorAspects::Primaries, ColorAspects::MatrixCoeffs> res;
+ if (sStandards.map((ColorStandard)standard, &res)) {
+ *primaries = res.first;
+ *coeffs = res.second;
+ return OK;
+ }
+
+ int32_t start = kColorStandardExtendedStart;
+ int32_t numPrimaries = ColorAspects::PrimariesBT2020 + 1;
+ int32_t numCoeffs = ColorAspects::MatrixBT2020Constant + 1;
+ if (standard >= (int32_t)kColorStandardVendorStart) {
+ start = kColorStandardVendorStart;
+ numPrimaries = ColorAspects::PrimariesOther + 1; // 0x100
+ numCoeffs = ColorAspects::MatrixOther + 1; // 0x100;
+ }
+ if (standard >= start && standard < start + numPrimaries * numCoeffs) {
+ int32_t product = standard - start;
+ *primaries = (ColorAspects::Primaries)(product % numPrimaries);
+ *coeffs = (ColorAspects::MatrixCoeffs)(product / numPrimaries);
+ return OK;
+ }
+ *primaries = ColorAspects::PrimariesOther;
+ *coeffs = ColorAspects::MatrixOther;
+ return BAD_VALUE;
+}
+
+static bool isValid(ColorAspects::Range r) {
+ return r <= ColorAspects::RangeOther;
+}
+
+static bool isDefined(ColorAspects::Range r) {
+ return r <= ColorAspects::RangeLimited;
+}
+
+// static
+int32_t ColorUtils::wrapColorAspectsIntoColorRange(ColorAspects::Range range) {
+ ColorRange res;
+ if (sRanges.map(range, &res)) {
+ return res;
+ } else if (!isValid(range)) {
+ return kColorRangeUnspecified;
+ } else {
+ CHECK(!isDefined(range));
+ // all platform values are in sRanges
+ return kColorRangeVendorStart + range;
+ }
+}
+
+//static
+status_t ColorUtils::unwrapColorAspectsFromColorRange(
+ int32_t range, ColorAspects::Range *aspect) {
+ if (sRanges.map((ColorRange)range, aspect)) {
+ return OK;
+ }
+
+ int32_t start = kColorRangeVendorStart;
+ int32_t numRanges = ColorAspects::RangeOther + 1; // 0x100
+ if (range >= start && range < start + numRanges) {
+ *aspect = (ColorAspects::Range)(range - start);
+ return OK;
+ }
+ *aspect = ColorAspects::RangeOther;
+ return BAD_VALUE;
+}
+
+static bool isValid(ColorAspects::Transfer t) {
+ return t <= ColorAspects::TransferOther;
+}
+
+static bool isDefined(ColorAspects::Transfer t) {
+ return t <= ColorAspects::TransferHLG
+ || (t >= ColorAspects::TransferSMPTE240M && t <= ColorAspects::TransferST428);
+}
+
+// static
+int32_t ColorUtils::wrapColorAspectsIntoColorTransfer(
+ ColorAspects::Transfer transfer) {
+ ColorTransfer res;
+ if (sTransfers.map(transfer, &res)) {
+ return res;
+ } else if (!isValid(transfer)) {
+ return kColorTransferUnspecified;
+ } else if (isDefined(transfer)) {
+ return kColorTransferExtendedStart + transfer;
+ } else {
+ // all platform values are in sRanges
+ return kColorTransferVendorStart + transfer;
+ }
+}
+
+//static
+status_t ColorUtils::unwrapColorAspectsFromColorTransfer(
+ int32_t transfer, ColorAspects::Transfer *aspect) {
+ if (sTransfers.map((ColorTransfer)transfer, aspect)) {
+ return OK;
+ }
+
+ int32_t start = kColorTransferExtendedStart;
+ int32_t numTransfers = ColorAspects::TransferST428 + 1;
+ if (transfer >= (int32_t)kColorTransferVendorStart) {
+ start = kColorTransferVendorStart;
+ numTransfers = ColorAspects::TransferOther + 1; // 0x100
+ }
+ if (transfer >= start && transfer < start + numTransfers) {
+ *aspect = (ColorAspects::Transfer)(transfer - start);
+ return OK;
+ }
+ *aspect = ColorAspects::TransferOther;
+ return BAD_VALUE;
+}
+
+// static
+status_t ColorUtils::convertPlatformColorAspectsToCodecAspects(
+ int32_t range, int32_t standard, int32_t transfer, ColorAspects &aspects) {
+ status_t res1 = unwrapColorAspectsFromColorRange(range, &aspects.mRange);
+ status_t res2 = unwrapColorAspectsFromColorStandard(
+ standard, &aspects.mPrimaries, &aspects.mMatrixCoeffs);
+ status_t res3 = unwrapColorAspectsFromColorTransfer(transfer, &aspects.mTransfer);
+ return res1 != OK ? res1 : (res2 != OK ? res2 : res3);
+}
+
+// static
+status_t ColorUtils::convertCodecColorAspectsToPlatformAspects(
+ const ColorAspects &aspects, int32_t *range, int32_t *standard, int32_t *transfer) {
+ *range = wrapColorAspectsIntoColorRange(aspects.mRange);
+ *standard = wrapColorAspectsIntoColorStandard(aspects.mPrimaries, aspects.mMatrixCoeffs);
+ *transfer = wrapColorAspectsIntoColorTransfer(aspects.mTransfer);
+ if (isValid(aspects.mRange) && isValid(aspects.mPrimaries)
+ && isValid(aspects.mMatrixCoeffs) && isValid(aspects.mTransfer)) {
+ return OK;
+ } else {
+ return BAD_VALUE;
+ }
+}
+
+const static
+ALookup<int32_t, ColorAspects::Primaries> sIsoPrimaries {
+ {
+ { 1, ColorAspects::PrimariesBT709_5 },
+ { 2, ColorAspects::PrimariesUnspecified },
+ { 4, ColorAspects::PrimariesBT470_6M },
+ { 5, ColorAspects::PrimariesBT601_6_625 },
+ { 6, ColorAspects::PrimariesBT601_6_525 /* main */},
+ { 7, ColorAspects::PrimariesBT601_6_525 },
+ // -- ITU T.832 201201 ends here
+ { 8, ColorAspects::PrimariesGenericFilm },
+ { 9, ColorAspects::PrimariesBT2020 },
+ { 10, ColorAspects::PrimariesOther /* XYZ */ },
+ }
+};
+
+const static
+ALookup<int32_t, ColorAspects::Transfer> sIsoTransfers {
+ {
+ { 1, ColorAspects::TransferSMPTE170M /* main */},
+ { 2, ColorAspects::TransferUnspecified },
+ { 4, ColorAspects::TransferGamma22 },
+ { 5, ColorAspects::TransferGamma28 },
+ { 6, ColorAspects::TransferSMPTE170M },
+ { 7, ColorAspects::TransferSMPTE240M },
+ { 8, ColorAspects::TransferLinear },
+ { 9, ColorAspects::TransferOther /* log 100:1 */ },
+ { 10, ColorAspects::TransferOther /* log 316:1 */ },
+ { 11, ColorAspects::TransferXvYCC },
+ { 12, ColorAspects::TransferBT1361 },
+ { 13, ColorAspects::TransferSRGB },
+ // -- ITU T.832 201201 ends here
+ { 14, ColorAspects::TransferSMPTE170M },
+ { 15, ColorAspects::TransferSMPTE170M },
+ { 16, ColorAspects::TransferST2084 },
+ { 17, ColorAspects::TransferST428 },
+ }
+};
+
+const static
+ALookup<int32_t, ColorAspects::MatrixCoeffs> sIsoMatrixCoeffs {
+ {
+ { 0, ColorAspects::MatrixOther },
+ { 1, ColorAspects::MatrixBT709_5 },
+ { 2, ColorAspects::MatrixUnspecified },
+ { 4, ColorAspects::MatrixBT470_6M },
+ { 6, ColorAspects::MatrixBT601_6 /* main */ },
+ { 5, ColorAspects::MatrixBT601_6 },
+ { 7, ColorAspects::MatrixSMPTE240M },
+ { 8, ColorAspects::MatrixOther /* YCgCo */ },
+ // -- ITU T.832 201201 ends here
+ { 9, ColorAspects::MatrixBT2020 },
+ { 10, ColorAspects::MatrixBT2020Constant },
+ }
+};
+
+// static
+void ColorUtils::convertCodecColorAspectsToIsoAspects(
+ const ColorAspects &aspects,
+ int32_t *primaries, int32_t *transfer, int32_t *coeffs, bool *fullRange) {
+ if (aspects.mPrimaries == ColorAspects::PrimariesOther ||
+ !sIsoPrimaries.map(aspects.mPrimaries, primaries)) {
+ CHECK(sIsoPrimaries.map(ColorAspects::PrimariesUnspecified, primaries));
+ }
+ if (aspects.mTransfer == ColorAspects::TransferOther ||
+ !sIsoTransfers.map(aspects.mTransfer, transfer)) {
+ CHECK(sIsoTransfers.map(ColorAspects::TransferUnspecified, transfer));
+ }
+ if (aspects.mMatrixCoeffs == ColorAspects::MatrixOther ||
+ !sIsoMatrixCoeffs.map(aspects.mMatrixCoeffs, coeffs)) {
+ CHECK(sIsoMatrixCoeffs.map(ColorAspects::MatrixUnspecified, coeffs));
+ }
+ *fullRange = aspects.mRange == ColorAspects::RangeFull;
+}
+
+// static
+void ColorUtils::convertIsoColorAspectsToCodecAspects(
+ int32_t primaries, int32_t transfer, int32_t coeffs, bool fullRange,
+ ColorAspects &aspects) {
+ if (!sIsoPrimaries.map(primaries, &aspects.mPrimaries)) {
+ aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
+ }
+ if (!sIsoTransfers.map(transfer, &aspects.mTransfer)) {
+ aspects.mTransfer = ColorAspects::TransferUnspecified;
+ }
+ if (!sIsoMatrixCoeffs.map(coeffs, &aspects.mMatrixCoeffs)) {
+ aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
+ }
+ aspects.mRange = fullRange ? ColorAspects::RangeFull : ColorAspects::RangeLimited;
+}
+
+// static
+void ColorUtils::setDefaultCodecColorAspectsIfNeeded(
+ ColorAspects &aspects, int32_t width, int32_t height) {
+ ColorAspects::MatrixCoeffs coeffs;
+ ColorAspects::Primaries primaries;
+
+ // Default to BT2020, BT709 or BT601 based on size. Allow 2.35:1 aspect ratio. Limit BT601
+ // to PAL or smaller, BT2020 to 4K or larger, leaving BT709 for all resolutions in between.
+ if (width >= 3840 || height >= 3840 || width * (int64_t)height >= 3840 * 1634) {
+ primaries = ColorAspects::PrimariesBT2020;
+ coeffs = ColorAspects::MatrixBT2020;
+ } else if ((width <= 720 && height > 480 && height <= 576)
+ || (height <= 720 && width > 480 && width <= 576)) {
+ primaries = ColorAspects::PrimariesBT601_6_625;
+ coeffs = ColorAspects::MatrixBT601_6;
+ } else if ((width <= 720 && height <= 480) || (height <= 720 && width <= 480)) {
+ primaries = ColorAspects::PrimariesBT601_6_525;
+ coeffs = ColorAspects::MatrixBT601_6;
+ } else {
+ primaries = ColorAspects::PrimariesBT709_5;
+ coeffs = ColorAspects::MatrixBT709_5;
+ }
+
+ if (aspects.mRange == ColorAspects::RangeUnspecified) {
+ aspects.mRange = ColorAspects::RangeLimited;
+ }
+
+ if (aspects.mPrimaries == ColorAspects::PrimariesUnspecified) {
+ aspects.mPrimaries = primaries;
+ }
+ if (aspects.mMatrixCoeffs == ColorAspects::MatrixUnspecified) {
+ aspects.mMatrixCoeffs = coeffs;
+ }
+ if (aspects.mTransfer == ColorAspects::TransferUnspecified) {
+ aspects.mTransfer = ColorAspects::TransferSMPTE170M;
+ }
+}
+
+// TODO: move this into a Video HAL
+ALookup<CU::ColorStandard, std::pair<CA::Primaries, CA::MatrixCoeffs>> sStandardFallbacks {
+ {
+ { CU::kColorStandardBT601_625, { CA::PrimariesBT709_5, CA::MatrixBT470_6M } },
+ { CU::kColorStandardBT601_625, { CA::PrimariesBT709_5, CA::MatrixBT601_6 } },
+ { CU::kColorStandardBT709, { CA::PrimariesBT709_5, CA::MatrixSMPTE240M } },
+ { CU::kColorStandardBT709, { CA::PrimariesBT709_5, CA::MatrixBT2020 } },
+ { CU::kColorStandardBT601_525, { CA::PrimariesBT709_5, CA::MatrixBT2020Constant } },
+
+ { CU::kColorStandardBT2020Constant,
+ { CA::PrimariesBT470_6M, CA::MatrixBT2020Constant } },
+
+ { CU::kColorStandardBT601_625, { CA::PrimariesBT601_6_625, CA::MatrixBT470_6M } },
+ { CU::kColorStandardBT601_525, { CA::PrimariesBT601_6_625, CA::MatrixBT2020Constant } },
+
+ { CU::kColorStandardBT601_525, { CA::PrimariesBT601_6_525, CA::MatrixBT470_6M } },
+ { CU::kColorStandardBT601_525, { CA::PrimariesBT601_6_525, CA::MatrixBT2020Constant } },
+
+ { CU::kColorStandardBT2020Constant,
+ { CA::PrimariesGenericFilm, CA::MatrixBT2020Constant } },
+ }
+};
+
+ALookup<CU::ColorStandard, CA::Primaries> sStandardPrimariesFallbacks {
+ {
+ { CU::kColorStandardFilm, CA::PrimariesGenericFilm },
+ { CU::kColorStandardBT470M, CA::PrimariesBT470_6M },
+ { CU::kColorStandardBT2020, CA::PrimariesBT2020 },
+ { CU::kColorStandardBT601_525_Unadjusted, CA::PrimariesBT601_6_525 },
+ { CU::kColorStandardBT601_625_Unadjusted, CA::PrimariesBT601_6_625 },
+ }
+};
+
+static ALookup<android_dataspace, android_dataspace> sLegacyDataSpaceToV0 {
+ {
+ { HAL_DATASPACE_SRGB, HAL_DATASPACE_V0_SRGB },
+ { HAL_DATASPACE_BT709, HAL_DATASPACE_V0_BT709 },
+ { HAL_DATASPACE_SRGB_LINEAR, HAL_DATASPACE_V0_SRGB_LINEAR },
+ { HAL_DATASPACE_BT601_525, HAL_DATASPACE_V0_BT601_525 },
+ { HAL_DATASPACE_BT601_625, HAL_DATASPACE_V0_BT601_625 },
+ { HAL_DATASPACE_JFIF, HAL_DATASPACE_V0_JFIF },
+ }
+};
+
+bool ColorUtils::convertDataSpaceToV0(android_dataspace &dataSpace) {
+ (void)sLegacyDataSpaceToV0.lookup(dataSpace, &dataSpace);
+ return (dataSpace & 0xC000FFFF) == 0;
+}
+
+bool ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+ ColorAspects &aspects, const ColorAspects &orig, bool usePlatformAspects) {
+ // remove changed aspects (change them to Unspecified)
+ bool changed = false;
+ if (aspects.mRange && aspects.mRange != orig.mRange) {
+ aspects.mRange = ColorAspects::RangeUnspecified;
+ changed = true;
+ }
+ if (aspects.mPrimaries && aspects.mPrimaries != orig.mPrimaries) {
+ aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
+ if (usePlatformAspects) {
+ aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
+ }
+ changed = true;
+ }
+ if (aspects.mMatrixCoeffs && aspects.mMatrixCoeffs != orig.mMatrixCoeffs) {
+ aspects.mMatrixCoeffs = ColorAspects::MatrixUnspecified;
+ if (usePlatformAspects) {
+ aspects.mPrimaries = ColorAspects::PrimariesUnspecified;
+ }
+ changed = true;
+ }
+ if (aspects.mTransfer && aspects.mTransfer != orig.mTransfer) {
+ aspects.mTransfer = ColorAspects::TransferUnspecified;
+ changed = true;
+ }
+ return changed;
+}
+
+// static
+android_dataspace ColorUtils::getDataSpaceForColorAspects(ColorAspects &aspects, bool mayExpand) {
+ // This platform implementation never expands color space (e.g. returns an expanded
+ // dataspace to use where the codec does in-the-background color space conversion)
+ mayExpand = false;
+
+ if (aspects.mRange == ColorAspects::RangeUnspecified
+ || aspects.mPrimaries == ColorAspects::PrimariesUnspecified
+ || aspects.mMatrixCoeffs == ColorAspects::MatrixUnspecified
+ || aspects.mTransfer == ColorAspects::TransferUnspecified) {
+ ALOGW("expected specified color aspects (%u:%u:%u:%u)",
+ aspects.mRange, aspects.mPrimaries, aspects.mMatrixCoeffs, aspects.mTransfer);
+ }
+
+ // default to video range and transfer
+ ColorRange range = kColorRangeLimited;
+ ColorTransfer transfer = kColorTransferSMPTE_170M;
+ (void)sRanges.map(aspects.mRange, &range);
+ (void)sTransfers.map(aspects.mTransfer, &transfer);
+
+ ColorStandard standard = kColorStandardBT709;
+ auto pair = std::make_pair(aspects.mPrimaries, aspects.mMatrixCoeffs);
+ if (!sStandards.map(pair, &standard)) {
+ if (!sStandardFallbacks.map(pair, &standard)) {
+ (void)sStandardPrimariesFallbacks.map(aspects.mPrimaries, &standard);
+
+ if (aspects.mMatrixCoeffs == ColorAspects::MatrixBT2020Constant) {
+ range = kColorRangeFull;
+ }
+ }
+ }
+
+ android_dataspace dataSpace = (android_dataspace)(
+ (range << HAL_DATASPACE_RANGE_SHIFT) | (standard << HAL_DATASPACE_STANDARD_SHIFT) |
+ (transfer << HAL_DATASPACE_TRANSFER_SHIFT));
+ (void)sLegacyDataSpaceToV0.rlookup(dataSpace, &dataSpace);
+
+ if (!mayExpand) {
+ // update codec aspects based on dataspace
+ convertPlatformColorAspectsToCodecAspects(range, standard, transfer, aspects);
+ }
+ return dataSpace;
+}
+
+// static
+void ColorUtils::getColorConfigFromFormat(
+ const sp<AMessage> &format, int32_t *range, int32_t *standard, int32_t *transfer) {
+ if (!format->findInt32("color-range", range)) {
+ *range = kColorRangeUnspecified;
+ }
+ if (!format->findInt32("color-standard", standard)) {
+ *standard = kColorStandardUnspecified;
+ }
+ if (!format->findInt32("color-transfer", transfer)) {
+ *transfer = kColorTransferUnspecified;
+ }
+}
+
+// static
+void ColorUtils::copyColorConfig(const sp<AMessage> &source, sp<AMessage> &target) {
+ // 0 values are unspecified
+ int32_t value;
+ if (source->findInt32("color-range", &value)) {
+ target->setInt32("color-range", value);
+ }
+ if (source->findInt32("color-standard", &value)) {
+ target->setInt32("color-standard", value);
+ }
+ if (source->findInt32("color-transfer", &value)) {
+ target->setInt32("color-transfer", value);
+ }
+}
+
+// static
+void ColorUtils::getColorAspectsFromFormat(const sp<AMessage> &format, ColorAspects &aspects) {
+ int32_t range, standard, transfer;
+ getColorConfigFromFormat(format, &range, &standard, &transfer);
+
+ if (convertPlatformColorAspectsToCodecAspects(
+ range, standard, transfer, aspects) != OK) {
+ ALOGW("Ignoring illegal color aspects(R:%d(%s), S:%d(%s), T:%d(%s))",
+ range, asString((ColorRange)range),
+ standard, asString((ColorStandard)standard),
+ transfer, asString((ColorTransfer)transfer));
+ // Invalid values were converted to unspecified !params!, but otherwise were not changed
+ // For encoders, we leave these as is. For decoders, we will use default values.
+ }
+ ALOGV("Got color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+ "from format (out:R:%d(%s), S:%d(%s), T:%d(%s))",
+ aspects.mRange, asString(aspects.mRange),
+ aspects.mPrimaries, asString(aspects.mPrimaries),
+ aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+ aspects.mTransfer, asString(aspects.mTransfer),
+ range, asString((ColorRange)range),
+ standard, asString((ColorStandard)standard),
+ transfer, asString((ColorTransfer)transfer));
+}
+
+// static
+void ColorUtils::setColorAspectsIntoFormat(
+ const ColorAspects &aspects, sp<AMessage> &format, bool force) {
+ int32_t range = 0, standard = 0, transfer = 0;
+ convertCodecColorAspectsToPlatformAspects(aspects, &range, &standard, &transfer);
+ // save set values to base output format
+ // (encoder input format will read back actually supported values by the codec)
+ if (range != 0 || force) {
+ format->setInt32("color-range", range);
+ }
+ if (standard != 0 || force) {
+ format->setInt32("color-standard", standard);
+ }
+ if (transfer != 0 || force) {
+ format->setInt32("color-transfer", transfer);
+ }
+ ALOGV("Setting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) "
+ "into format (out:R:%d(%s), S:%d(%s), T:%d(%s))",
+ aspects.mRange, asString(aspects.mRange),
+ aspects.mPrimaries, asString(aspects.mPrimaries),
+ aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+ aspects.mTransfer, asString(aspects.mTransfer),
+ range, asString((ColorRange)range),
+ standard, asString((ColorStandard)standard),
+ transfer, asString((ColorTransfer)transfer));
+}
+
+} // namespace android
+
diff --git a/media/libstagefright/include/AACExtractor.h b/media/libstagefright/include/AACExtractor.h
index e231e62..bd4c41c 100644
--- a/media/libstagefright/include/AACExtractor.h
+++ b/media/libstagefright/include/AACExtractor.h
@@ -36,6 +36,7 @@
virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
virtual sp<MetaData> getMetaData();
+ virtual const char * name() { return "AACExtractor"; }
protected:
virtual ~AACExtractor();
diff --git a/media/libstagefright/include/AMRExtractor.h b/media/libstagefright/include/AMRExtractor.h
index 0770397..ba2b674 100644
--- a/media/libstagefright/include/AMRExtractor.h
+++ b/media/libstagefright/include/AMRExtractor.h
@@ -36,6 +36,7 @@
virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
virtual sp<MetaData> getMetaData();
+ virtual const char * name() { return "AMRExtractor"; }
protected:
virtual ~AMRExtractor();
diff --git a/media/libstagefright/include/AVIExtractor.h b/media/libstagefright/include/AVIExtractor.h
index ff5dcb5..3be505c 100644
--- a/media/libstagefright/include/AVIExtractor.h
+++ b/media/libstagefright/include/AVIExtractor.h
@@ -36,6 +36,7 @@
size_t index, uint32_t flags);
virtual sp<MetaData> getMetaData();
+ virtual const char * name() { return "AVIExtractor"; }
protected:
virtual ~AVIExtractor();
diff --git a/media/libstagefright/include/DRMExtractor.h b/media/libstagefright/include/DRMExtractor.h
index a035d8c..3dc7df8 100644
--- a/media/libstagefright/include/DRMExtractor.h
+++ b/media/libstagefright/include/DRMExtractor.h
@@ -38,6 +38,7 @@
virtual sp<IMediaSource> getTrack(size_t index);
virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
virtual sp<MetaData> getMetaData();
+ virtual const char * name() { return "DRMExtractor"; }
protected:
virtual ~DRMExtractor();
diff --git a/media/libstagefright/include/DataConverter.h b/media/libstagefright/include/DataConverter.h
new file mode 100644
index 0000000..8d67921
--- /dev/null
+++ b/media/libstagefright/include/DataConverter.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef STAGEFRIGHT_DATACONVERTER_H_
+#define STAGEFRIGHT_DATACONVERTER_H_
+
+#include <utils/Errors.h>
+#include <utils/RefBase.h>
+
+#include <media/stagefright/MediaDefs.h>
+
+namespace android {
+
+struct ABuffer;
+
+// DataConverter base class, defaults to memcpy
+struct DataConverter : public RefBase {
+ virtual size_t sourceSize(size_t targetSize); // will clamp to SIZE_MAX
+ virtual size_t targetSize(size_t sourceSize); // will clamp to SIZE_MAX
+
+ status_t convert(const sp<ABuffer> &source, sp<ABuffer> &target);
+ virtual ~DataConverter();
+
+protected:
+ virtual status_t safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target);
+};
+
+// SampleConverterBase uses a ratio to calculate the source and target sizes
+// based on source and target sample sizes.
+struct SampleConverterBase : public DataConverter {
+ virtual size_t sourceSize(size_t targetSize);
+ virtual size_t targetSize(size_t sourceSize);
+
+protected:
+ virtual status_t safeConvert(const sp<ABuffer> &source, sp<ABuffer> &target) = 0;
+
+ // sourceSize = sourceSampleSize / targetSampleSize * targetSize
+ SampleConverterBase(uint32_t sourceSampleSize, uint32_t targetSampleSize)
+ : mSourceSampleSize(sourceSampleSize),
+ mTargetSampleSize(targetSampleSize) { }
+ size_t mSourceSampleSize;
+ size_t mTargetSampleSize;
+};
+
+// AudioConverter converts between audio PCM formats
+struct AudioConverter : public SampleConverterBase {
+ // return nullptr if conversion is not needed or not supported
+ static AudioConverter *Create(AudioEncoding source, AudioEncoding target);
+
+protected:
+ virtual status_t safeConvert(const sp<ABuffer> &src, sp<ABuffer> &tgt);
+
+private:
+ AudioConverter(
+ AudioEncoding source, size_t sourceSample,
+ AudioEncoding target, size_t targetSample)
+ : SampleConverterBase(sourceSample, targetSample),
+ mFrom(source),
+ mTo(target) { }
+ AudioEncoding mFrom;
+ AudioEncoding mTo;
+};
+
+} // namespace android
+
+#endif
diff --git a/media/libstagefright/include/FLACExtractor.h b/media/libstagefright/include/FLACExtractor.h
index a6e6c1d..5d030b1 100644
--- a/media/libstagefright/include/FLACExtractor.h
+++ b/media/libstagefright/include/FLACExtractor.h
@@ -36,6 +36,7 @@
virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
virtual sp<MetaData> getMetaData();
+ virtual const char * name() { return "FLACExtractor"; }
protected:
virtual ~FLACExtractor();
diff --git a/media/libstagefright/include/MPEG2PSExtractor.h b/media/libstagefright/include/MPEG2PSExtractor.h
index e815f0e..c8abfb6 100644
--- a/media/libstagefright/include/MPEG2PSExtractor.h
+++ b/media/libstagefright/include/MPEG2PSExtractor.h
@@ -40,6 +40,7 @@
virtual sp<MetaData> getMetaData();
virtual uint32_t flags() const;
+ virtual const char * name() { return "MPEG2PSExtractor"; }
protected:
virtual ~MPEG2PSExtractor();
diff --git a/media/libstagefright/include/MPEG2TSExtractor.h b/media/libstagefright/include/MPEG2TSExtractor.h
index 9907572..34b9606 100644
--- a/media/libstagefright/include/MPEG2TSExtractor.h
+++ b/media/libstagefright/include/MPEG2TSExtractor.h
@@ -25,6 +25,8 @@
#include <utils/KeyedVector.h>
#include <utils/Vector.h>
+#include "mpeg2ts/ATSParser.h"
+
namespace android {
struct AMessage;
@@ -44,6 +46,7 @@
virtual sp<MetaData> getMetaData();
virtual uint32_t flags() const;
+ virtual const char * name() { return "MPEG2TSExtractor"; }
private:
friend struct MPEG2TSSource;
@@ -54,6 +57,10 @@
sp<ATSParser> mParser;
+ // Used to remember SyncEvent occurred in feedMore() when called from init(),
+ // because init() needs to update |mSourceImpls| before adding SyncPoint.
+ ATSParser::SyncEvent mLastSyncEvent;
+
Vector<sp<AnotherPacketSource> > mSourceImpls;
Vector<KeyedVector<int64_t, off64_t> > mSyncPoints;
@@ -64,7 +71,14 @@
off64_t mOffset;
void init();
- status_t feedMore();
+ // Try to feed more data from source to parser.
+ // |isInit| means this function is called inside init(). This is a signal to
+ // save SyncEvent so that init() can add SyncPoint after it updates |mSourceImpls|.
+ // This function returns OK if expected amount of data is fed from DataSource to
+ // parser and is successfully parsed. Otherwise, various error codes could be
+ // returned, e.g., ERROR_END_OF_STREAM, or no data availalbe from DataSource, or
+ // the data has syntax error during parsing, etc.
+ status_t feedMore(bool isInit = false);
status_t seek(int64_t seekTimeUs,
const MediaSource::ReadOptions::SeekMode& seekMode);
status_t queueDiscontinuityForSeek(int64_t actualSeekTimeUs);
@@ -72,6 +86,9 @@
status_t feedUntilBufferAvailable(const sp<AnotherPacketSource> &impl);
+ // Add a SynPoint derived from |event|.
+ void addSyncPoint_l(const ATSParser::SyncEvent &event);
+
DISALLOW_EVIL_CONSTRUCTORS(MPEG2TSExtractor);
};
diff --git a/media/libstagefright/include/MPEG4Extractor.h b/media/libstagefright/include/MPEG4Extractor.h
index cff976d..18b14e1 100644
--- a/media/libstagefright/include/MPEG4Extractor.h
+++ b/media/libstagefright/include/MPEG4Extractor.h
@@ -58,6 +58,7 @@
virtual sp<MetaData> getMetaData();
virtual uint32_t flags() const;
+ virtual const char * name() { return "MPEG4Extractor"; }
// for DRM
virtual char* getDrmTrackInfo(size_t trackID, int *len);
@@ -109,6 +110,7 @@
status_t readMetaData();
status_t parseChunk(off64_t *offset, int depth);
status_t parseITunesMetaData(off64_t offset, size_t size);
+ status_t parseColorInfo(off64_t offset, size_t size);
status_t parse3GPPMetaData(off64_t offset, size_t size, int depth);
void parseID3v2MetaData(off64_t offset);
status_t parseQTMetaKey(off64_t data_offset, size_t data_size);
diff --git a/media/libstagefright/include/MidiExtractor.h b/media/libstagefright/include/MidiExtractor.h
index 333277b..5a7d90e 100644
--- a/media/libstagefright/include/MidiExtractor.h
+++ b/media/libstagefright/include/MidiExtractor.h
@@ -60,6 +60,7 @@
virtual sp<MetaData> getTrackMetaData(size_t index, uint32_t flags);
virtual sp<MetaData> getMetaData();
+ virtual const char * name() { return "MidiExtractor"; }
protected:
virtual ~MidiExtractor();
diff --git a/media/libstagefright/include/OMX.h b/media/libstagefright/include/OMX.h
index c715939..9726741 100644
--- a/media/libstagefright/include/OMX.h
+++ b/media/libstagefright/include/OMX.h
@@ -94,7 +94,7 @@
const sp<GraphicBuffer> &graphicBuffer, buffer_id buffer);
virtual status_t createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer,
MetadataBufferType *type);
diff --git a/media/libstagefright/include/OMXNodeInstance.h b/media/libstagefright/include/OMXNodeInstance.h
index 732894c..25c3773 100644
--- a/media/libstagefright/include/OMXNodeInstance.h
+++ b/media/libstagefright/include/OMXNodeInstance.h
@@ -29,8 +29,6 @@
struct OMXMaster;
class GraphicBufferSource;
-status_t StatusFromOMXError(OMX_ERRORTYPE err);
-
struct OMXNodeInstance {
OMXNodeInstance(
OMX *owner, const sp<IOMXObserver> &observer, const char *name);
@@ -82,7 +80,8 @@
OMX::buffer_id buffer);
status_t createInputSurface(
- OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer,
+ OMX_U32 portIndex, android_dataspace dataSpace,
+ sp<IGraphicBufferProducer> *bufferProducer,
MetadataBufferType *type);
static status_t createPersistentInputSurface(
@@ -95,6 +94,8 @@
status_t signalEndOfInputStream();
+ void signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2);
+
status_t allocateSecureBuffer(
OMX_U32 portIndex, size_t size, OMX::buffer_id *buffer,
void **buffer_data, native_handle_t **native_handle);
diff --git a/media/libstagefright/include/SoftOMXComponent.h b/media/libstagefright/include/SoftOMXComponent.h
index a808611..3ab6f88 100644
--- a/media/libstagefright/include/SoftOMXComponent.h
+++ b/media/libstagefright/include/SoftOMXComponent.h
@@ -168,6 +168,22 @@
DISALLOW_EVIL_CONSTRUCTORS(SoftOMXComponent);
};
+template<typename T>
+bool isValidOMXParam(T *a) {
+ static_assert(offsetof(typeof(*a), nSize) == 0, "nSize not at offset 0");
+ static_assert(std::is_same< decltype(a->nSize), OMX_U32>::value, "nSize has wrong type");
+ static_assert(offsetof(typeof(*a), nVersion) == 4, "nVersion not at offset 4");
+ static_assert(std::is_same< decltype(a->nVersion), OMX_VERSIONTYPE>::value,
+ "nVersion has wrong type");
+
+ if (a->nSize < sizeof(*a)) {
+ ALOGE("b/27207275: need %zu, got %u", sizeof(*a), a->nSize);
+ android_errorWriteLog(0x534e4554, "27207275");
+ return false;
+ }
+ return true;
+}
+
} // namespace android
#endif // SOFT_OMX_COMPONENT_H_
diff --git a/media/libstagefright/include/SoftwareRenderer.h b/media/libstagefright/include/SoftwareRenderer.h
index 757b308..258511a 100644
--- a/media/libstagefright/include/SoftwareRenderer.h
+++ b/media/libstagefright/include/SoftwareRenderer.h
@@ -54,6 +54,7 @@
int32_t mCropLeft, mCropTop, mCropRight, mCropBottom;
int32_t mCropWidth, mCropHeight;
int32_t mRotationDegrees;
+ android_dataspace mDataSpace;
FrameRenderTracker mRenderTracker;
SoftwareRenderer(const SoftwareRenderer &);
diff --git a/media/libstagefright/matroska/MatroskaExtractor.h b/media/libstagefright/matroska/MatroskaExtractor.h
index a1d6b00..592e7cf 100644
--- a/media/libstagefright/matroska/MatroskaExtractor.h
+++ b/media/libstagefright/matroska/MatroskaExtractor.h
@@ -46,6 +46,8 @@
virtual uint32_t flags() const;
+ virtual const char * name() { return "MatroskaExtractor"; }
+
protected:
virtual ~MatroskaExtractor();
diff --git a/media/libstagefright/mpeg2ts/ATSParser.cpp b/media/libstagefright/mpeg2ts/ATSParser.cpp
index 2790a0e..fb43a38 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.cpp
+++ b/media/libstagefright/mpeg2ts/ATSParser.cpp
@@ -122,7 +122,7 @@
void setPID(unsigned pid) { mElementaryPID = pid; }
// Parse the payload and set event when PES with a sync frame is detected.
- // This method knows when a PES starts; so record mPesStartOffset in that
+ // This method knows when a PES starts; so record mPesStartOffsets in that
// case.
status_t parse(
unsigned continuity_counter,
@@ -157,7 +157,7 @@
bool mEOSReached;
uint64_t mPrevPTS;
- off64_t mPesStartOffset;
+ List<off64_t> mPesStartOffsets;
ElementaryStreamQueue *mQueue;
@@ -205,16 +205,19 @@
};
ATSParser::SyncEvent::SyncEvent(off64_t offset)
- : mInit(false), mOffset(offset), mTimeUs(0) {}
+ : mHasReturnedData(false), mOffset(offset), mTimeUs(0) {}
void ATSParser::SyncEvent::init(off64_t offset, const sp<MediaSource> &source,
int64_t timeUs) {
- mInit = true;
+ mHasReturnedData = true;
mOffset = offset;
mMediaSource = source;
mTimeUs = timeUs;
}
+void ATSParser::SyncEvent::reset() {
+ mHasReturnedData = false;
+}
////////////////////////////////////////////////////////////////////////////////
ATSParser::Program::Program(
@@ -661,6 +664,7 @@
ALOGI("discontinuity on stream pid 0x%04x", mElementaryPID);
mPayloadStarted = false;
+ mPesStartOffsets.clear();
mBuffer->setRange(0, 0);
mExpectedContinuityCounter = -1;
@@ -697,7 +701,7 @@
}
mPayloadStarted = true;
- mPesStartOffset = offset;
+ mPesStartOffsets.push_back(offset);
}
if (!mPayloadStarted) {
@@ -772,6 +776,7 @@
}
mPayloadStarted = false;
+ mPesStartOffsets.clear();
mEOSReached = false;
mBuffer->setRange(0, 0);
@@ -1105,7 +1110,9 @@
int64_t timeUs;
if (accessUnit->meta()->findInt64("timeUs", &timeUs)) {
found = true;
- event->init(mPesStartOffset, mSource, timeUs);
+ off64_t pesStartOffset = *mPesStartOffsets.begin();
+ event->init(pesStartOffset, mSource, timeUs);
+ mPesStartOffsets.erase(mPesStartOffsets.begin());
}
}
}
diff --git a/media/libstagefright/mpeg2ts/ATSParser.h b/media/libstagefright/mpeg2ts/ATSParser.h
index 430a8d5..fb03cd6 100644
--- a/media/libstagefright/mpeg2ts/ATSParser.h
+++ b/media/libstagefright/mpeg2ts/ATSParser.h
@@ -69,16 +69,18 @@
void init(off64_t offset, const sp<MediaSource> &source,
int64_t timeUs);
- bool isInit() { return mInit; }
- off64_t getOffset() { return mOffset; }
- const sp<MediaSource> &getMediaSource() { return mMediaSource; }
- int64_t getTimeUs() { return mTimeUs; }
+ bool hasReturnedData() const { return mHasReturnedData; }
+ void reset();
+ off64_t getOffset() const { return mOffset; }
+ const sp<MediaSource> &getMediaSource() const { return mMediaSource; }
+ int64_t getTimeUs() const { return mTimeUs; }
private:
- bool mInit;
+ bool mHasReturnedData;
/*
- * mInit == false: the current offset
- * mInit == true: the start offset of sync payload
+ * mHasReturnedData == false: the current offset (or undefined if the returned data
+ has been invalidated via reset())
+ * mHasReturnedData == true: the start offset of sync payload
*/
off64_t mOffset;
/* The media source object for this event. */
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index daf6b3d..96ca405 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -592,6 +592,7 @@
mFormat->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
mFormat->setInt32(kKeyChannelCount, 2);
mFormat->setInt32(kKeySampleRate, 48000);
+ mFormat->setInt32(kKeyPcmEncoding, kAudioEncodingPcm16bit);
}
static const size_t kFramesPerAU = 80;
diff --git a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
index 0b456c3..fb5e079 100644
--- a/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
+++ b/media/libstagefright/mpeg2ts/MPEG2TSExtractor.cpp
@@ -112,6 +112,7 @@
MPEG2TSExtractor::MPEG2TSExtractor(const sp<DataSource> &source)
: mDataSource(source),
mParser(new ATSParser),
+ mLastSyncEvent(0),
mOffset(0) {
init();
}
@@ -149,8 +150,10 @@
bool haveVideo = false;
int64_t startTime = ALooper::GetNowUs();
- while (feedMore() == OK) {
+ while (feedMore(true /* isInit */) == OK) {
if (haveAudio && haveVideo) {
+ addSyncPoint_l(mLastSyncEvent);
+ mLastSyncEvent.reset();
break;
}
if (!haveVideo) {
@@ -181,6 +184,9 @@
}
}
+ addSyncPoint_l(mLastSyncEvent);
+ mLastSyncEvent.reset();
+
// Wait only for 2 seconds to detect audio/video streams.
if (ALooper::GetNowUs() - startTime > 2000000ll) {
break;
@@ -245,7 +251,7 @@
haveAudio, haveVideo, ALooper::GetNowUs() - startTime);
}
-status_t MPEG2TSExtractor::feedMore() {
+status_t MPEG2TSExtractor::feedMore(bool isInit) {
Mutex::Autolock autoLock(mLock);
uint8_t packet[kTSPacketSize];
@@ -261,29 +267,41 @@
ATSParser::SyncEvent event(mOffset);
mOffset += n;
status_t err = mParser->feedTSPacket(packet, kTSPacketSize, &event);
- if (event.isInit()) {
- for (size_t i = 0; i < mSourceImpls.size(); ++i) {
- if (mSourceImpls[i].get() == event.getMediaSource().get()) {
- KeyedVector<int64_t, off64_t> *syncPoints = &mSyncPoints.editItemAt(i);
- syncPoints->add(event.getTimeUs(), event.getOffset());
- // We're keeping the size of the sync points at most 5mb per a track.
- size_t size = syncPoints->size();
- if (size >= 327680) {
- int64_t firstTimeUs = syncPoints->keyAt(0);
- int64_t lastTimeUs = syncPoints->keyAt(size - 1);
- if (event.getTimeUs() - firstTimeUs > lastTimeUs - event.getTimeUs()) {
- syncPoints->removeItemsAt(0, 4096);
- } else {
- syncPoints->removeItemsAt(size - 4096, 4096);
- }
- }
- break;
- }
+ if (event.hasReturnedData()) {
+ if (isInit) {
+ mLastSyncEvent = event;
+ } else {
+ addSyncPoint_l(event);
}
}
return err;
}
+void MPEG2TSExtractor::addSyncPoint_l(const ATSParser::SyncEvent &event) {
+ if (!event.hasReturnedData()) {
+ return;
+ }
+
+ for (size_t i = 0; i < mSourceImpls.size(); ++i) {
+ if (mSourceImpls[i].get() == event.getMediaSource().get()) {
+ KeyedVector<int64_t, off64_t> *syncPoints = &mSyncPoints.editItemAt(i);
+ syncPoints->add(event.getTimeUs(), event.getOffset());
+ // We're keeping the size of the sync points at most 5mb per a track.
+ size_t size = syncPoints->size();
+ if (size >= 327680) {
+ int64_t firstTimeUs = syncPoints->keyAt(0);
+ int64_t lastTimeUs = syncPoints->keyAt(size - 1);
+ if (event.getTimeUs() - firstTimeUs > lastTimeUs - event.getTimeUs()) {
+ syncPoints->removeItemsAt(0, 4096);
+ } else {
+ syncPoints->removeItemsAt(size - 4096, 4096);
+ }
+ }
+ break;
+ }
+ }
+}
+
uint32_t MPEG2TSExtractor::flags() const {
return CAN_PAUSE | CAN_SEEK_BACKWARD | CAN_SEEK_FORWARD;
}
diff --git a/media/libstagefright/omx/Android.mk b/media/libstagefright/omx/Android.mk
index 804afe9..e4fbd81 100644
--- a/media/libstagefright/omx/Android.mk
+++ b/media/libstagefright/omx/Android.mk
@@ -7,6 +7,7 @@
OMX.cpp \
OMXMaster.cpp \
OMXNodeInstance.cpp \
+ OMXUtils.cpp \
SimpleSoftOMXComponent.cpp \
SoftOMXComponent.cpp \
SoftOMXPlugin.cpp \
diff --git a/media/libstagefright/omx/GraphicBufferSource.cpp b/media/libstagefright/omx/GraphicBufferSource.cpp
index acdc4b0..995e50e 100644
--- a/media/libstagefright/omx/GraphicBufferSource.cpp
+++ b/media/libstagefright/omx/GraphicBufferSource.cpp
@@ -20,12 +20,16 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#define STRINGIFY_ENUMS // for asString in HardwareAPI.h/VideoAPI.h
+
#include "GraphicBufferSource.h"
+#include "OMXUtils.h"
#include <OMX_Core.h>
#include <OMX_IndexExt.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ColorUtils.h>
#include <media/hardware/MetadataBufferType.h>
#include <ui/GraphicBuffer.h>
@@ -39,6 +43,8 @@
static const bool EXTRA_CHECK = true;
+static const OMX_U32 kPortIndexInput = 0;
+
GraphicBufferSource::PersistentProxyListener::PersistentProxyListener(
const wp<IGraphicBufferConsumer> &consumer,
const wp<ConsumerListener>& consumerListener) :
@@ -117,6 +123,7 @@
mNodeInstance(nodeInstance),
mExecuting(false),
mSuspended(false),
+ mLastDataSpace(HAL_DATASPACE_UNKNOWN),
mIsPersistent(false),
mConsumer(consumer),
mNumFramesAvailable(0),
@@ -189,6 +196,8 @@
return;
}
+ memset(&mColorAspects, 0, sizeof(mColorAspects));
+
CHECK(mInitCheck == NO_ERROR);
}
@@ -215,6 +224,8 @@
mNumFramesAvailable, mCodecBuffers.size());
CHECK(!mExecuting);
mExecuting = true;
+ mLastDataSpace = HAL_DATASPACE_UNKNOWN;
+ ALOGV("clearing last dataSpace");
// Start by loading up as many buffers as possible. We want to do this,
// rather than just submit the first buffer, to avoid a degenerate case:
@@ -495,6 +506,76 @@
}
}
+void GraphicBufferSource::onDataSpaceChanged_l(
+ android_dataspace dataSpace, android_pixel_format pixelFormat) {
+ ALOGD("got buffer with new dataSpace #%x", dataSpace);
+ mLastDataSpace = dataSpace;
+
+ if (ColorUtils::convertDataSpaceToV0(dataSpace)) {
+ ColorAspects aspects = mColorAspects; // initially requested aspects
+
+ // request color aspects to encode
+ OMX_INDEXTYPE index;
+ status_t err = mNodeInstance->getExtensionIndex(
+ "OMX.google.android.index.describeColorAspects", &index);
+ if (err == OK) {
+ // V0 dataspace
+ DescribeColorAspectsParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = kPortIndexInput;
+ params.nDataSpace = mLastDataSpace;
+ params.nPixelFormat = pixelFormat;
+ params.bDataSpaceChanged = OMX_TRUE;
+ params.sAspects = mColorAspects;
+
+ err = mNodeInstance->getConfig(index, ¶ms, sizeof(params));
+ if (err == OK) {
+ aspects = params.sAspects;
+ ALOGD("Codec resolved it to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+ params.sAspects.mRange, asString(params.sAspects.mRange),
+ params.sAspects.mPrimaries, asString(params.sAspects.mPrimaries),
+ params.sAspects.mMatrixCoeffs, asString(params.sAspects.mMatrixCoeffs),
+ params.sAspects.mTransfer, asString(params.sAspects.mTransfer),
+ err, asString(err));
+ } else {
+ params.sAspects = aspects;
+ err = OK;
+ }
+ params.bDataSpaceChanged = OMX_FALSE;
+ for (int triesLeft = 2; --triesLeft >= 0; ) {
+ status_t err = mNodeInstance->setConfig(index, ¶ms, sizeof(params));
+ if (err == OK) {
+ err = mNodeInstance->getConfig(index, ¶ms, sizeof(params));
+ }
+ if (err != OK || !ColorUtils::checkIfAspectsChangedAndUnspecifyThem(
+ params.sAspects, aspects)) {
+ // if we can't set or get color aspects, still communicate dataspace to client
+ break;
+ }
+
+ ALOGW_IF(triesLeft == 0, "Codec repeatedly changed requested ColorAspects.");
+ }
+ }
+
+ ALOGV("Set color aspects to (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s)) err=%d(%s)",
+ aspects.mRange, asString(aspects.mRange),
+ aspects.mPrimaries, asString(aspects.mPrimaries),
+ aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+ aspects.mTransfer, asString(aspects.mTransfer),
+ err, asString(err));
+
+ // signal client that the dataspace has changed; this will update the output format
+ // TODO: we should tie this to an output buffer somehow, and signal the change
+ // just before the output buffer is returned to the client, but there are many
+ // ways this could fail (e.g. flushing), and we are not yet supporting this scenario.
+
+ mNodeInstance->signalEvent(
+ OMX_EventDataSpaceChanged, dataSpace,
+ (aspects.mRange << 24) | (aspects.mPrimaries << 16)
+ | (aspects.mMatrixCoeffs << 8) | aspects.mTransfer);
+ }
+}
+
bool GraphicBufferSource::fillCodecBuffer_l() {
CHECK(mExecuting && mNumFramesAvailable > 0);
@@ -534,6 +615,12 @@
mBufferSlot[item.mSlot] = item.mGraphicBuffer;
}
+ if (item.mDataSpace != mLastDataSpace) {
+ onDataSpaceChanged_l(
+ item.mDataSpace, (android_pixel_format)mBufferSlot[item.mSlot]->getPixelFormat());
+ }
+
+
err = UNKNOWN_ERROR;
// only submit sample if start time is unspecified, or sample
@@ -925,6 +1012,13 @@
ALOG_ASSERT(false, "GraphicBufferSource can't consume sideband streams");
}
+void GraphicBufferSource::setDefaultDataSpace(android_dataspace dataSpace) {
+ // no need for mutex as we are not yet running
+ ALOGD("setting dataspace: %#x", dataSpace);
+ mConsumer->setDefaultBufferDataSpace(dataSpace);
+ mLastDataSpace = dataSpace;
+}
+
status_t GraphicBufferSource::setRepeatPreviousFrameDelayUs(
int64_t repeatAfterUs) {
Mutex::Autolock autoLock(mMutex);
@@ -974,19 +1068,29 @@
(skipFramesBeforeUs > 0) ? (skipFramesBeforeUs * 1000) : -1ll;
}
-status_t GraphicBufferSource::setTimeLapseUs(int64_t* data) {
+status_t GraphicBufferSource::setTimeLapseConfig(const TimeLapseConfig &config) {
Mutex::Autolock autoLock(mMutex);
- if (mExecuting || data[0] <= 0ll || data[1] <= 0ll) {
+ if (mExecuting || config.mTimePerFrameUs <= 0ll || config.mTimePerCaptureUs <= 0ll) {
return INVALID_OPERATION;
}
- mTimePerFrameUs = data[0];
- mTimePerCaptureUs = data[1];
+ mTimePerFrameUs = config.mTimePerFrameUs;
+ mTimePerCaptureUs = config.mTimePerCaptureUs;
return OK;
}
+void GraphicBufferSource::setColorAspects(const ColorAspects &aspects) {
+ Mutex::Autolock autoLock(mMutex);
+ mColorAspects = aspects;
+ ALOGD("requesting color aspects (R:%d(%s), P:%d(%s), M:%d(%s), T:%d(%s))",
+ aspects.mRange, asString(aspects.mRange),
+ aspects.mPrimaries, asString(aspects.mPrimaries),
+ aspects.mMatrixCoeffs, asString(aspects.mMatrixCoeffs),
+ aspects.mTransfer, asString(aspects.mTransfer));
+}
+
void GraphicBufferSource::onMessageReceived(const sp<AMessage> &msg) {
switch (msg->what()) {
case kWhatRepeatLastFrame:
diff --git a/media/libstagefright/omx/GraphicBufferSource.h b/media/libstagefright/omx/GraphicBufferSource.h
index 7150684..c8b0e62 100644
--- a/media/libstagefright/omx/GraphicBufferSource.h
+++ b/media/libstagefright/omx/GraphicBufferSource.h
@@ -23,6 +23,7 @@
#include <utils/RefBase.h>
#include <OMX_Core.h>
+#include <VideoAPI.h>
#include "../include/OMXNodeInstance.h"
#include <media/stagefright/foundation/ABase.h>
#include <media/stagefright/foundation/AHandlerReflector.h>
@@ -73,6 +74,9 @@
return mProducer;
}
+ // Sets the default buffer data space
+ void setDefaultDataSpace(android_dataspace dataSpace);
+
// This is called when OMX transitions to OMX_StateExecuting, which means
// we can start handing it buffers. If we already have buffers of data
// sitting in the BufferQueue, this will send them to the codec.
@@ -130,17 +134,23 @@
// When set, the max frame rate fed to the encoder will be capped at maxFps.
status_t setMaxFps(float maxFps);
+ struct TimeLapseConfig {
+ int64_t mTimePerFrameUs; // the time (us) between two frames for playback
+ int64_t mTimePerCaptureUs; // the time (us) between two frames for capture
+ };
+
// Sets the time lapse (or slow motion) parameters.
- // data[0] is the time (us) between two frames for playback
- // data[1] is the time (us) between two frames for capture
// When set, the sample's timestamp will be modified to playback framerate,
// and capture timestamp will be modified to capture rate.
- status_t setTimeLapseUs(int64_t* data);
+ status_t setTimeLapseConfig(const TimeLapseConfig &config);
// Sets the start time us (in system time), samples before which should
// be dropped and not submitted to encoder
void setSkipFramesBeforeUs(int64_t startTimeUs);
+ // Sets the desired color aspects, e.g. to be used when producer does not specify a dataspace.
+ void setColorAspects(const ColorAspects &aspects);
+
protected:
// BufferQueue::ConsumerListener interface, called when a new frame of
// data is available. If we're executing and a codec buffer is
@@ -238,6 +248,9 @@
bool repeatLatestBuffer_l();
int64_t getTimestamp(const BufferItem &item);
+ // called when the data space of the input buffer changes
+ void onDataSpaceChanged_l(android_dataspace dataSpace, android_pixel_format pixelFormat);
+
// Lock, covers all member variables.
mutable Mutex mMutex;
@@ -252,6 +265,9 @@
bool mSuspended;
+ // Last dataspace seen
+ android_dataspace mLastDataSpace;
+
// Our BufferQueue interfaces. mProducer is passed to the producer through
// getIGraphicBufferProducer, and mConsumer is used internally to retrieve
// the buffers queued by the producer.
@@ -321,6 +337,7 @@
int64_t mPrevFrameUs;
MetadataBufferType mMetadataBufferType;
+ ColorAspects mColorAspects;
void onMessageReceived(const sp<AMessage> &msg);
diff --git a/media/libstagefright/omx/OMX.cpp b/media/libstagefright/omx/OMX.cpp
index 6be289b..4d89ba1 100644
--- a/media/libstagefright/omx/OMX.cpp
+++ b/media/libstagefright/omx/OMX.cpp
@@ -31,6 +31,7 @@
#include <utils/threads.h>
#include "OMXMaster.h"
+#include "OMXUtils.h"
#include <OMX_AsString.h>
#include <OMX_Component.h>
@@ -182,7 +183,12 @@
Mutex::Autolock autoLock(mLock);
ssize_t index = mLiveNodes.indexOfKey(the_late_who);
- CHECK(index >= 0);
+
+ if (index < 0) {
+ ALOGE("b/27597103, nonexistent observer on binderDied");
+ android_errorWriteLog(0x534e4554, "27597103");
+ return;
+ }
instance = mLiveNodes.editValueAt(index);
mLiveNodes.removeItemsAt(index);
@@ -483,7 +489,7 @@
}
status_t OMX::createInputSurface(
- node_id node, OMX_U32 port_index,
+ node_id node, OMX_U32 port_index, android_dataspace dataSpace,
sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
OMXNodeInstance *instance = findInstance(node);
@@ -492,7 +498,7 @@
}
return instance->createInputSurface(
- port_index, bufferProducer, type);
+ port_index, dataSpace, bufferProducer, type);
}
status_t OMX::createPersistentInputSurface(
diff --git a/media/libstagefright/omx/OMXNodeInstance.cpp b/media/libstagefright/omx/OMXNodeInstance.cpp
index 7380ab2..6b7a871 100644
--- a/media/libstagefright/omx/OMXNodeInstance.cpp
+++ b/media/libstagefright/omx/OMXNodeInstance.cpp
@@ -22,6 +22,7 @@
#include "../include/OMXNodeInstance.h"
#include "OMXMaster.h"
+#include "OMXUtils.h"
#include "GraphicBufferSource.h"
#include <OMX_Component.h>
@@ -89,16 +90,6 @@
// TRICKY: this is needed so formatting macros expand before substitution
#define WITH_STATS(fmt, ...) WITH_STATS_WRAPPER(fmt, ##__VA_ARGS__)
-template<class T>
-static void InitOMXParams(T *params) {
- memset(params, 0, sizeof(T));
- params->nSize = sizeof(T);
- params->nVersion.s.nVersionMajor = 1;
- params->nVersion.s.nVersionMinor = 0;
- params->nVersion.s.nRevision = 0;
- params->nVersion.s.nStep = 0;
-}
-
namespace android {
struct BufferMeta {
@@ -245,20 +236,6 @@
return mNodeID;
}
-status_t StatusFromOMXError(OMX_ERRORTYPE err) {
- switch (err) {
- case OMX_ErrorNone:
- return OK;
- case OMX_ErrorUnsupportedSetting:
- case OMX_ErrorUnsupportedIndex:
- return ERROR_UNSUPPORTED;
- case OMX_ErrorInsufficientResources:
- return NO_MEMORY;
- default:
- return UNKNOWN_ERROR;
- }
-}
-
status_t OMXNodeInstance::freeNode(OMXMaster *master) {
CLOG_LIFE(freeNode, "handle=%p", mHandle);
static int32_t kMaxNumIterations = 10;
@@ -468,38 +445,40 @@
OMX_INDEXTYPE index;
OMX_ERRORTYPE err = OMX_GetExtensionIndex(mHandle, name, &index);
- if (err != OMX_ErrorNone) {
+ if (err == OMX_ErrorNone) {
+ EnableAndroidNativeBuffersParams params;
+ InitOMXParams(¶ms);
+ params.nPortIndex = portIndex;
+ params.enable = enable;
+
+ err = OMX_SetParameter(mHandle, index, ¶ms);
+ CLOG_IF_ERROR(setParameter, err, "%s(%#x): %s:%u en=%d", name, index,
+ portString(portIndex), portIndex, enable);
+ if (!graphic) {
+ if (err == OMX_ErrorNone) {
+ mSecureBufferType[portIndex] =
+ enable ? kSecureBufferTypeNativeHandle : kSecureBufferTypeOpaque;
+ } else if (mSecureBufferType[portIndex] == kSecureBufferTypeUnknown) {
+ mSecureBufferType[portIndex] = kSecureBufferTypeOpaque;
+ }
+ }
+ } else {
CLOG_ERROR_IF(enable, getExtensionIndex, err, "%s", name);
- return StatusFromOMXError(err);
- }
-
- EnableAndroidNativeBuffersParams params;
- InitOMXParams(¶ms);
- params.nPortIndex = portIndex;
- params.enable = enable;
-
- err = OMX_SetParameter(mHandle, index, ¶ms);
- CLOG_IF_ERROR(setParameter, err, "%s(%#x): %s:%u en=%d", name, index,
- portString(portIndex), portIndex, enable);
- if (!graphic) {
- if (err == OK) {
- mSecureBufferType[portIndex] =
- enable ? kSecureBufferTypeNativeHandle : kSecureBufferTypeOpaque;
- } else if (mSecureBufferType[portIndex] == kSecureBufferTypeUnknown) {
-
- // BEGIN ALTERNATE SIGNALING FOR USING NATIVE HANDLES
+ if (!graphic) {
+ // Extension not supported, check for manual override with system property
+ // This is a temporary workaround until partners support the OMX extension
char value[PROPERTY_VALUE_MAX];
if (property_get("media.mediadrmservice.enable", value, NULL)
- && (!strcmp("1", value) || !strcasecmp("true", value))) {
+ && (!strcmp("1", value) || !strcasecmp("true", value))) {
CLOG_CONFIG(enableNativeBuffers, "system property override: using native-handles");
mSecureBufferType[portIndex] = kSecureBufferTypeNativeHandle;
- return OK;
+ } else if (mSecureBufferType[portIndex] == kSecureBufferTypeUnknown) {
+ mSecureBufferType[portIndex] = kSecureBufferTypeOpaque;
}
- // END ALTERNATE SIGNALING FOR USING NATIVE HANDLES
-
- mSecureBufferType[portIndex] = kSecureBufferTypeOpaque;
+ err = OMX_ErrorNone;
}
}
+
return StatusFromOMXError(err);
}
@@ -954,7 +933,8 @@
}
status_t OMXNodeInstance::createInputSurface(
- OMX_U32 portIndex, sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
+ OMX_U32 portIndex, android_dataspace dataSpace,
+ sp<IGraphicBufferProducer> *bufferProducer, MetadataBufferType *type) {
if (bufferProducer == NULL) {
ALOGE("b/25884056");
return BAD_VALUE;
@@ -967,6 +947,8 @@
return err;
}
+ mGraphicBufferSource->setDefaultDataSpace(dataSpace);
+
*bufferProducer = mGraphicBufferSource->getIGraphicBufferProducer();
return OK;
}
@@ -1009,6 +991,10 @@
return createGraphicBufferSource(portIndex, bufferConsumer, type);
}
+void OMXNodeInstance::signalEvent(OMX_EVENTTYPE event, OMX_U32 arg1, OMX_U32 arg2) {
+ mOwner->OnEvent(mNodeID, event, arg1, arg2, NULL);
+}
+
status_t OMXNodeInstance::signalEndOfInputStream() {
// For non-Surface input, the MediaCodec should convert the call to a
// pair of requests (dequeue input buffer, queue input buffer with EOS
@@ -1380,6 +1366,16 @@
}
}
+template<typename T>
+static bool getInternalOption(
+ const void *data, size_t size, T *out) {
+ if (size != sizeof(T)) {
+ return false;
+ }
+ *out = *(T*)data;
+ return true;
+}
+
status_t OMXNodeInstance::setInternalOption(
OMX_U32 portIndex,
IOMX::InternalOptionType type,
@@ -1394,6 +1390,7 @@
case IOMX::INTERNAL_OPTION_MAX_FPS:
case IOMX::INTERNAL_OPTION_START_TIME:
case IOMX::INTERNAL_OPTION_TIME_LAPSE:
+ case IOMX::INTERNAL_OPTION_COLOR_ASPECTS:
{
const sp<GraphicBufferSource> &bufferSource =
getGraphicBufferSource();
@@ -1404,58 +1401,63 @@
}
if (type == IOMX::INTERNAL_OPTION_SUSPEND) {
- if (size != sizeof(bool)) {
+ bool suspend;
+ if (!getInternalOption(data, size, &suspend)) {
return INVALID_OPERATION;
}
- bool suspend = *(bool *)data;
CLOG_CONFIG(setInternalOption, "suspend=%d", suspend);
bufferSource->suspend(suspend);
- } else if (type ==
- IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY){
- if (size != sizeof(int64_t)) {
+ } else if (type == IOMX::INTERNAL_OPTION_REPEAT_PREVIOUS_FRAME_DELAY) {
+ int64_t delayUs;
+ if (!getInternalOption(data, size, &delayUs)) {
return INVALID_OPERATION;
}
- int64_t delayUs = *(int64_t *)data;
CLOG_CONFIG(setInternalOption, "delayUs=%lld", (long long)delayUs);
return bufferSource->setRepeatPreviousFrameDelayUs(delayUs);
- } else if (type ==
- IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP){
- if (size != sizeof(int64_t)) {
+ } else if (type == IOMX::INTERNAL_OPTION_MAX_TIMESTAMP_GAP) {
+ int64_t maxGapUs;
+ if (!getInternalOption(data, size, &maxGapUs)) {
return INVALID_OPERATION;
}
- int64_t maxGapUs = *(int64_t *)data;
CLOG_CONFIG(setInternalOption, "gapUs=%lld", (long long)maxGapUs);
return bufferSource->setMaxTimestampGapUs(maxGapUs);
} else if (type == IOMX::INTERNAL_OPTION_MAX_FPS) {
- if (size != sizeof(float)) {
+ float maxFps;
+ if (!getInternalOption(data, size, &maxFps)) {
return INVALID_OPERATION;
}
- float maxFps = *(float *)data;
CLOG_CONFIG(setInternalOption, "maxFps=%f", maxFps);
return bufferSource->setMaxFps(maxFps);
} else if (type == IOMX::INTERNAL_OPTION_START_TIME) {
- if (size != sizeof(int64_t)) {
+ int64_t skipFramesBeforeUs;
+ if (!getInternalOption(data, size, &skipFramesBeforeUs)) {
return INVALID_OPERATION;
}
- int64_t skipFramesBeforeUs = *(int64_t *)data;
CLOG_CONFIG(setInternalOption, "beforeUs=%lld", (long long)skipFramesBeforeUs);
bufferSource->setSkipFramesBeforeUs(skipFramesBeforeUs);
- } else { // IOMX::INTERNAL_OPTION_TIME_LAPSE
- if (size != sizeof(int64_t) * 2) {
+ } else if (type == IOMX::INTERNAL_OPTION_TIME_LAPSE) {
+ GraphicBufferSource::TimeLapseConfig config;
+ if (!getInternalOption(data, size, &config)) {
return INVALID_OPERATION;
}
- int64_t timePerFrameUs = ((int64_t *)data)[0];
- int64_t timePerCaptureUs = ((int64_t *)data)[1];
CLOG_CONFIG(setInternalOption, "perFrameUs=%lld perCaptureUs=%lld",
- (long long)timePerFrameUs, (long long)timePerCaptureUs);
+ (long long)config.mTimePerFrameUs, (long long)config.mTimePerCaptureUs);
- bufferSource->setTimeLapseUs((int64_t *)data);
+ return bufferSource->setTimeLapseConfig(config);
+ } else if (type == IOMX::INTERNAL_OPTION_COLOR_ASPECTS) {
+ ColorAspects aspects;
+ if (!getInternalOption(data, size, &aspects)) {
+ return INVALID_OPERATION;
+ }
+
+ CLOG_CONFIG(setInternalOption, "setting color aspects");
+ bufferSource->setColorAspects(aspects);
}
return OK;
diff --git a/media/libstagefright/omx/OMXUtils.cpp b/media/libstagefright/omx/OMXUtils.cpp
new file mode 100644
index 0000000..799696c
--- /dev/null
+++ b/media/libstagefright/omx/OMXUtils.cpp
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "OMXUtils"
+
+#include <string.h>
+
+#include <media/hardware/HardwareAPI.h>
+#include <media/stagefright/MediaErrors.h>
+#include "OMXUtils.h"
+
+namespace android {
+
+status_t StatusFromOMXError(OMX_ERRORTYPE err) {
+ switch (err) {
+ case OMX_ErrorNone:
+ return OK;
+ case OMX_ErrorUnsupportedSetting:
+ case OMX_ErrorUnsupportedIndex:
+ return ERROR_UNSUPPORTED; // this is a media specific error
+ case OMX_ErrorInsufficientResources:
+ return NO_MEMORY;
+ case OMX_ErrorInvalidComponentName:
+ case OMX_ErrorComponentNotFound:
+ return NAME_NOT_FOUND;
+ default:
+ return UNKNOWN_ERROR;
+ }
+}
+
+/**************************************************************************************************/
+
+DescribeColorFormatParams::DescribeColorFormatParams(const DescribeColorFormat2Params ¶ms) {
+ InitOMXParams(this);
+
+ eColorFormat = params.eColorFormat;
+ nFrameWidth = params.nFrameWidth;
+ nFrameHeight = params.nFrameHeight;
+ nStride = params.nStride;
+ nSliceHeight = params.nSliceHeight;
+ bUsingNativeBuffers = params.bUsingNativeBuffers;
+ // we don't copy media images as this conversion is only used pre-query
+};
+
+void DescribeColorFormat2Params::initFromV1(const DescribeColorFormatParams ¶ms) {
+ InitOMXParams(this);
+
+ eColorFormat = params.eColorFormat;
+ nFrameWidth = params.nFrameWidth;
+ nFrameHeight = params.nFrameHeight;
+ nStride = params.nStride;
+ nSliceHeight = params.nSliceHeight;
+ bUsingNativeBuffers = params.bUsingNativeBuffers;
+ sMediaImage.initFromV1(params.sMediaImage);
+};
+
+void MediaImage2::initFromV1(const MediaImage &image) {
+ memset(this, 0, sizeof(*this));
+
+ if (image.mType != MediaImage::MEDIA_IMAGE_TYPE_YUV) {
+ mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+ return;
+ }
+
+ for (size_t ix = 0; ix < image.mNumPlanes; ++ix) {
+ if (image.mPlane[ix].mHorizSubsampling > INT32_MAX
+ || image.mPlane[ix].mVertSubsampling > INT32_MAX) {
+ mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
+ return;
+ }
+ }
+
+ mType = (MediaImage2::Type)image.mType;
+ mNumPlanes = image.mNumPlanes;
+ mWidth = image.mWidth;
+ mHeight = image.mHeight;
+ mBitDepth = image.mBitDepth;
+ mBitDepthAllocated = 8;
+ for (size_t ix = 0; ix < image.mNumPlanes; ++ix) {
+ mPlane[ix].mOffset = image.mPlane[ix].mOffset;
+ mPlane[ix].mColInc = image.mPlane[ix].mColInc;
+ mPlane[ix].mRowInc = image.mPlane[ix].mRowInc;
+ mPlane[ix].mHorizSubsampling = (int32_t)image.mPlane[ix].mHorizSubsampling;
+ mPlane[ix].mVertSubsampling = (int32_t)image.mPlane[ix].mVertSubsampling;
+ }
+}
+
+/**************************************************************************************************/
+
+} // namespace android
+
diff --git a/media/libstagefright/omx/OMXUtils.h b/media/libstagefright/omx/OMXUtils.h
new file mode 100644
index 0000000..0c5e537
--- /dev/null
+++ b/media/libstagefright/omx/OMXUtils.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef OMX_UTILS_H_
+#define OMX_UTILS_H_
+
+/***** DO NOT USE THIS INCLUDE!!! INTERAL ONLY!!! UNLESS YOU RESIDE IN media/libstagefright *****/
+
+// OMXUtils contains omx-specific utility functions for stagefright/omx library
+// TODO: move ACodec and OMXClient into this library
+
+namespace android {
+
+template<class T>
+static void InitOMXParams(T *params) {
+ memset(params, 0, sizeof(T));
+ params->nSize = sizeof(T);
+ params->nVersion.s.nVersionMajor = 1;
+ params->nVersion.s.nVersionMinor = 0;
+ params->nVersion.s.nRevision = 0;
+ params->nVersion.s.nStep = 0;
+}
+
+status_t StatusFromOMXError(OMX_ERRORTYPE err);
+
+} // namespace android
+
+#endif
diff --git a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
index e6a0c49..60c1e2e 100644
--- a/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
+++ b/media/libstagefright/omx/SimpleSoftOMXComponent.cpp
@@ -125,6 +125,10 @@
OMX_PARAM_PORTDEFINITIONTYPE *defParams =
(OMX_PARAM_PORTDEFINITIONTYPE *)params;
+ if (!isValidOMXParam(defParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (defParams->nPortIndex >= mPorts.size()
|| defParams->nSize
!= sizeof(OMX_PARAM_PORTDEFINITIONTYPE)) {
@@ -152,6 +156,10 @@
OMX_PARAM_PORTDEFINITIONTYPE *defParams =
(OMX_PARAM_PORTDEFINITIONTYPE *)params;
+ if (!isValidOMXParam(defParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (defParams->nPortIndex >= mPorts.size()) {
return OMX_ErrorBadPortIndex;
}
diff --git a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
index e1f4125..d3553bd 100644
--- a/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
+++ b/media/libstagefright/omx/SoftVideoDecoderOMXComponent.cpp
@@ -264,6 +264,10 @@
OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
(OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (formatParams->nPortIndex > kMaxPortIndex) {
return OMX_ErrorBadPortIndex;
}
@@ -292,6 +296,10 @@
OMX_VIDEO_PARAM_PROFILELEVELTYPE *profileLevel =
(OMX_VIDEO_PARAM_PROFILELEVELTYPE *) params;
+ if (!isValidOMXParam(profileLevel)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (profileLevel->nPortIndex != kInputPortIndex) {
ALOGE("Invalid port index: %" PRIu32, profileLevel->nPortIndex);
return OMX_ErrorUnsupportedIndex;
@@ -322,6 +330,10 @@
const OMX_PARAM_COMPONENTROLETYPE *roleParams =
(const OMX_PARAM_COMPONENTROLETYPE *)params;
+ if (!isValidOMXParam(roleParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (strncmp((const char *)roleParams->cRole,
mComponentRole,
OMX_MAX_STRINGNAME_SIZE - 1)) {
@@ -336,6 +348,10 @@
OMX_VIDEO_PARAM_PORTFORMATTYPE *formatParams =
(OMX_VIDEO_PARAM_PORTFORMATTYPE *)params;
+ if (!isValidOMXParam(formatParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (formatParams->nPortIndex > kMaxPortIndex) {
return OMX_ErrorBadPortIndex;
}
@@ -363,6 +379,11 @@
{
const PrepareForAdaptivePlaybackParams* adaptivePlaybackParams =
(const PrepareForAdaptivePlaybackParams *)params;
+
+ if (!isValidOMXParam(adaptivePlaybackParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
mIsAdaptive = adaptivePlaybackParams->bEnable;
if (mIsAdaptive) {
mAdaptiveMaxWidth = adaptivePlaybackParams->nMaxFrameWidth;
@@ -381,6 +402,11 @@
{
OMX_PARAM_PORTDEFINITIONTYPE *newParams =
(OMX_PARAM_PORTDEFINITIONTYPE *)params;
+
+ if (!isValidOMXParam(newParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
OMX_VIDEO_PORTDEFINITIONTYPE *video_def = &newParams->format.video;
OMX_PARAM_PORTDEFINITIONTYPE *def = &editPortInfo(newParams->nPortIndex)->mDef;
@@ -429,6 +455,10 @@
{
OMX_CONFIG_RECTTYPE *rectParams = (OMX_CONFIG_RECTTYPE *)params;
+ if (!isValidOMXParam(rectParams)) {
+ return OMX_ErrorBadParameter;
+ }
+
if (rectParams->nPortIndex != kOutputPortIndex) {
return OMX_ErrorUndefined;
}
diff --git a/media/libstagefright/rtsp/MyHandler.h b/media/libstagefright/rtsp/MyHandler.h
index eedbb42..6fa83fa 100644
--- a/media/libstagefright/rtsp/MyHandler.h
+++ b/media/libstagefright/rtsp/MyHandler.h
@@ -235,7 +235,7 @@
sp<AMessage> msg = new AMessage('paus', this);
mPauseGeneration++;
msg->setInt32("pausecheck", mPauseGeneration);
- msg->post(kPauseDelayUs);
+ msg->post();
}
void resume() {
@@ -979,6 +979,11 @@
case 'accu':
{
+ if (mSeekPending) {
+ ALOGV("Stale access unit.");
+ break;
+ }
+
int32_t timeUpdate;
if (msg->findInt32("time-update", &timeUpdate) && timeUpdate) {
size_t trackIndex;
@@ -1070,6 +1075,12 @@
ALOGW("This is a live stream, ignoring pause request.");
break;
}
+
+ if (mPausing) {
+ ALOGV("This stream is already paused.");
+ break;
+ }
+
mCheckPending = true;
++mCheckGeneration;
mPausing = true;
diff --git a/media/libstagefright/wifi-display/source/PlaybackSession.cpp b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
index ed5a404..3ecb52b 100644
--- a/media/libstagefright/wifi-display/source/PlaybackSession.cpp
+++ b/media/libstagefright/wifi-display/source/PlaybackSession.cpp
@@ -957,10 +957,12 @@
format->setInt32("level-idc", levelIdc);
format->setInt32("constraint-set", constraintSet);
} else {
- format->setString(
- "mime",
- usePCMAudio
- ? MEDIA_MIMETYPE_AUDIO_RAW : MEDIA_MIMETYPE_AUDIO_AAC);
+ if (usePCMAudio) {
+ format->setInt32("pcm-encoding", kAudioEncodingPcm16bit);
+ format->setString("mime", MEDIA_MIMETYPE_AUDIO_RAW);
+ } else {
+ format->setString("mime", MEDIA_MIMETYPE_AUDIO_AAC);
+ }
}
notify = new AMessage(kWhatConverterNotify, this);
diff --git a/media/mediaserver/Android.mk b/media/mediaserver/Android.mk
index 107d2b6..2cec5d2 100644
--- a/media/mediaserver/Android.mk
+++ b/media/mediaserver/Android.mk
@@ -14,7 +14,8 @@
main_mediaserver.cpp
LOCAL_SHARED_LIBRARIES := \
- libcamera_metadata\
+ libcamera_metadata \
+ libcamera_client \
libcameraservice \
libresourcemanagerservice \
libcutils \
diff --git a/media/mediaserver/main_mediaserver.cpp b/media/mediaserver/main_mediaserver.cpp
index e9dede9..ecddc48 100644
--- a/media/mediaserver/main_mediaserver.cpp
+++ b/media/mediaserver/main_mediaserver.cpp
@@ -25,7 +25,6 @@
#include "RegisterExtensions.h"
// from LOCAL_C_INCLUDES
-#include "CameraService.h"
#include "IcuUtils.h"
#include "MediaPlayerService.h"
#include "ResourceManagerService.h"
diff --git a/media/mtp/Android.mk b/media/mtp/Android.mk
index 3af0956..cb7e4aa 100644
--- a/media/mtp/Android.mk
+++ b/media/mtp/Android.mk
@@ -37,7 +37,7 @@
LOCAL_MODULE:= libmtp
-LOCAL_CFLAGS := -DMTP_DEVICE -DMTP_HOST
+LOCAL_CFLAGS := -DMTP_DEVICE -DMTP_HOST -Wall -Wextra -Werror
LOCAL_SHARED_LIBRARIES := libutils libcutils liblog libusbhost libbinder
diff --git a/media/mtp/MtpDataPacket.cpp b/media/mtp/MtpDataPacket.cpp
index d0ec2a6..0381edf 100644
--- a/media/mtp/MtpDataPacket.cpp
+++ b/media/mtp/MtpDataPacket.cpp
@@ -456,7 +456,7 @@
// look at the length field to see if the data spans multiple packets
uint32_t totalLength = MtpPacket::getUInt32(MTP_CONTAINER_LENGTH_OFFSET);
allocate(totalLength);
- while (totalLength > length) {
+ while (totalLength > static_cast<uint32_t>(length)) {
request->buffer = mBuffer + length;
request->buffer_length = totalLength - length;
int ret = transfer(request);
diff --git a/media/mtp/MtpDevice.cpp b/media/mtp/MtpDevice.cpp
index 7d7ea13..bd89a51 100644
--- a/media/mtp/MtpDevice.cpp
+++ b/media/mtp/MtpDevice.cpp
@@ -505,6 +505,7 @@
int remaining = size;
mRequest.reset();
mRequest.setParameter(1, handle);
+ bool error = false;
if (sendRequest(MTP_OPERATION_SEND_OBJECT)) {
// send data header
writeDataHeader(MTP_OPERATION_SEND_OBJECT, remaining);
@@ -514,7 +515,9 @@
while (remaining > 0) {
int count = read(srcFD, buffer, sizeof(buffer));
if (count > 0) {
- int written = mData.write(mRequestOut, buffer, count);
+ if (mData.write(mRequestOut, buffer, count) < 0) {
+ error = true;
+ }
// FIXME check error
remaining -= count;
} else {
@@ -523,7 +526,7 @@
}
}
MtpResponseCode ret = readResponse();
- return (remaining == 0 && ret == MTP_RESPONSE_OK);
+ return (remaining == 0 && ret == MTP_RESPONSE_OK && !error);
}
bool MtpDevice::deleteObject(MtpObjectHandle handle) {
@@ -608,7 +611,7 @@
return NULL;
if (!readData())
return NULL;
- MtpResponseCode ret = readResponse();
+ const MtpResponseCode ret = readResponse();
if (ret == MTP_RESPONSE_OK) {
MtpProperty* property = new MtpProperty;
if (property->read(mData))
@@ -619,6 +622,25 @@
return NULL;
}
+bool MtpDevice::getObjectPropValue(MtpObjectHandle handle, MtpProperty* property) {
+ if (property == nullptr)
+ return false;
+
+ Mutex::Autolock autoLock(mMutex);
+
+ mRequest.reset();
+ mRequest.setParameter(1, handle);
+ mRequest.setParameter(2, property->getPropertyCode());
+ if (!sendRequest(MTP_OPERATION_GET_OBJECT_PROP_VALUE))
+ return false;
+ if (!readData())
+ return false;
+ if (readResponse() != MTP_RESPONSE_OK)
+ return false;
+ property->setCurrentValue(mData);
+ return true;
+}
+
bool MtpDevice::readObject(MtpObjectHandle handle,
ReadObjectCallback callback,
uint32_t expectedLength,
@@ -676,11 +698,6 @@
return false;
}
- if (mData.getContainerType() == MTP_CONTAINER_TYPE_RESPONSE) {
- mResponse.copyFrom(mData);
- return mResponse.getResponseCode() == MTP_RESPONSE_OK ? 0 : -1;
- }
-
// If object size 0 byte, the remote device can reply response packet
// without sending any data packets.
if (mData.getContainerType() == MTP_CONTAINER_TYPE_RESPONSE) {
diff --git a/media/mtp/MtpDevice.h b/media/mtp/MtpDevice.h
index ce60811..4be44cf 100644
--- a/media/mtp/MtpDevice.h
+++ b/media/mtp/MtpDevice.h
@@ -107,6 +107,9 @@
MtpProperty* getDevicePropDesc(MtpDeviceProperty code);
MtpProperty* getObjectPropDesc(MtpObjectProperty code, MtpObjectFormat format);
+ // Reads value of |property| for |handle|. Returns true on success.
+ bool getObjectPropValue(MtpObjectHandle handle, MtpProperty* property);
+
bool readObject(MtpObjectHandle handle, ReadObjectCallback callback,
uint32_t objectSize, void* clientData);
bool readObject(MtpObjectHandle handle, const char* destPath, int group,
diff --git a/media/mtp/MtpPacket.cpp b/media/mtp/MtpPacket.cpp
index bab1335..35ecb4f 100644
--- a/media/mtp/MtpPacket.cpp
+++ b/media/mtp/MtpPacket.cpp
@@ -69,7 +69,7 @@
char buffer[500];
char* bufptr = buffer;
- for (int i = 0; i < mPacketSize; i++) {
+ for (size_t i = 0; i < mPacketSize; i++) {
sprintf(bufptr, "%02X ", mBuffer[i]);
bufptr += strlen(bufptr);
if (i % DUMP_BYTES_PER_ROW == (DUMP_BYTES_PER_ROW - 1)) {
diff --git a/media/mtp/MtpProperty.cpp b/media/mtp/MtpProperty.cpp
index d58e2a4..039e4f5 100644
--- a/media/mtp/MtpProperty.cpp
+++ b/media/mtp/MtpProperty.cpp
@@ -236,6 +236,12 @@
mCurrentValue.str = NULL;
}
+void MtpProperty::setCurrentValue(MtpDataPacket& packet) {
+ free(mCurrentValue.str);
+ mCurrentValue.str = NULL;
+ readValue(packet, mCurrentValue);
+}
+
void MtpProperty::setFormRange(int min, int max, int step) {
mFormFlag = kFormRange;
switch (mType) {
@@ -544,7 +550,7 @@
MtpPropertyValue* result = new MtpPropertyValue[length];
for (uint32_t i = 0; i < length; i++)
if (!readValue(packet, result[i])) {
- delete result;
+ delete [] result;
return NULL;
}
return result;
diff --git a/media/mtp/MtpProperty.h b/media/mtp/MtpProperty.h
index 2e2ead1..03c08e1 100644
--- a/media/mtp/MtpProperty.h
+++ b/media/mtp/MtpProperty.h
@@ -81,13 +81,16 @@
int defaultValue = 0);
virtual ~MtpProperty();
- inline MtpPropertyCode getPropertyCode() const { return mCode; }
+ MtpPropertyCode getPropertyCode() const { return mCode; }
+ MtpDataType getDataType() const { return mType; }
bool read(MtpDataPacket& packet);
void write(MtpDataPacket& packet);
void setDefaultValue(const uint16_t* string);
void setCurrentValue(const uint16_t* string);
+ void setCurrentValue(MtpDataPacket& packet);
+ const MtpPropertyValue& getCurrentValue() { return mCurrentValue; }
void setFormRange(int min, int max, int step);
void setFormEnum(const int* values, int count);
diff --git a/media/mtp/MtpRequestPacket.cpp b/media/mtp/MtpRequestPacket.cpp
index 40b11b0..471967f 100644
--- a/media/mtp/MtpRequestPacket.cpp
+++ b/media/mtp/MtpRequestPacket.cpp
@@ -44,11 +44,12 @@
}
// request packet should have 12 byte header followed by 0 to 5 32-bit arguments
- if (ret >= MTP_CONTAINER_HEADER_SIZE
- && ret <= MTP_CONTAINER_HEADER_SIZE + 5 * sizeof(uint32_t)
- && ((ret - MTP_CONTAINER_HEADER_SIZE) & 3) == 0) {
- mPacketSize = ret;
- mParameterCount = (ret - MTP_CONTAINER_HEADER_SIZE) / sizeof(uint32_t);
+ const size_t read_size = static_cast<size_t>(ret);
+ if (read_size >= MTP_CONTAINER_HEADER_SIZE
+ && read_size <= MTP_CONTAINER_HEADER_SIZE + 5 * sizeof(uint32_t)
+ && ((read_size - MTP_CONTAINER_HEADER_SIZE) & 3) == 0) {
+ mPacketSize = read_size;
+ mParameterCount = (read_size - MTP_CONTAINER_HEADER_SIZE) / sizeof(uint32_t);
} else {
ALOGE("Malformed MTP request packet");
ret = -1;
diff --git a/media/mtp/MtpServer.cpp b/media/mtp/MtpServer.cpp
index 3e0f239..90f1a77 100644
--- a/media/mtp/MtpServer.cpp
+++ b/media/mtp/MtpServer.cpp
@@ -923,9 +923,7 @@
if (!mData.getUInt32(temp32)) return MTP_RESPONSE_INVALID_PARAMETER; // image bit depth
if (!mData.getUInt32(temp32)) return MTP_RESPONSE_INVALID_PARAMETER; // parent
if (!mData.getUInt16(temp16)) return MTP_RESPONSE_INVALID_PARAMETER;
- uint16_t associationType = temp16;
if (!mData.getUInt32(temp32)) return MTP_RESPONSE_INVALID_PARAMETER;
- uint32_t associationDesc = temp32; // association desc
if (!mData.getUInt32(temp32)) return MTP_RESPONSE_INVALID_PARAMETER; // sequence number
MtpStringBuffer name, created, modified;
if (!mData.getString(name)) return MTP_RESPONSE_INVALID_PARAMETER; // file name
@@ -1102,7 +1100,6 @@
}
strcpy(fileSpot, name);
- int type = entry->d_type;
if (entry->d_type == DT_DIR) {
deleteRecursive(pathbuf);
rmdir(pathbuf);
diff --git a/media/ndk/Android.mk b/media/ndk/Android.mk
index 8dbb291..f287761 100644
--- a/media/ndk/Android.mk
+++ b/media/ndk/Android.mk
@@ -40,6 +40,8 @@
LOCAL_CFLAGS += -fvisibility=hidden -D EXPORT='__attribute__ ((visibility ("default")))'
+LOCAL_CFLAGS += -Werror
+
LOCAL_SHARED_LIBRARIES := \
libbinder \
libmedia \
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index d57a86e..387a302 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -203,10 +203,14 @@
mCbLooper = new ALooper;
mCbLooper->setName(consumerName.string());
- status_t ret = mCbLooper->start(
+ res = mCbLooper->start(
/*runOnCallingThread*/false,
/*canCallJava*/ true,
PRIORITY_DEFAULT);
+ if (res != OK) {
+ ALOGE("Failed to start the looper");
+ return AMEDIA_ERROR_UNKNOWN;
+ }
mHandler = new CallbackHandler(this);
mCbLooper->registerHandler(mHandler);
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 5bb2dcd..2cb7cc7 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -145,10 +145,14 @@
AMediaCodec *mData = new AMediaCodec();
mData->mLooper = new ALooper;
mData->mLooper->setName("NDK MediaCodec_looper");
- status_t ret = mData->mLooper->start(
+ size_t res = mData->mLooper->start(
false, // runOnCallingThread
true, // canCallJava XXX
PRIORITY_FOREGROUND);
+ if (res != OK) {
+ ALOGE("Failed to start the looper");
+ return NULL;
+ }
if (name_is_type) {
mData->mCodec = android::MediaCodec::CreateByType(mData->mLooper, name, encoder);
} else {
diff --git a/media/ndk/NdkMediaCrypto.cpp b/media/ndk/NdkMediaCrypto.cpp
index af8ffea..67d12a4 100644
--- a/media/ndk/NdkMediaCrypto.cpp
+++ b/media/ndk/NdkMediaCrypto.cpp
@@ -37,15 +37,6 @@
using namespace android;
-static media_status_t translate_error(status_t err) {
- if (err == OK) {
- return AMEDIA_OK;
- }
- ALOGE("sf error code: %d", err);
- return AMEDIA_ERROR_UNKNOWN;
-}
-
-
static sp<ICrypto> makeCrypto() {
sp<IServiceManager> sm = defaultServiceManager();
sp<ICrypto> crypto;
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index ea47d57..e98b124 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -629,9 +629,9 @@
Vector<uint8_t> outputVec;
if (encrypt) {
- status_t status = mObj->mDrm->encrypt(*iter, keyIdVec, inputVec, ivVec, outputVec);
+ status = mObj->mDrm->encrypt(*iter, keyIdVec, inputVec, ivVec, outputVec);
} else {
- status_t status = mObj->mDrm->decrypt(*iter, keyIdVec, inputVec, ivVec, outputVec);
+ status = mObj->mDrm->decrypt(*iter, keyIdVec, inputVec, ivVec, outputVec);
}
if (status == OK) {
memcpy(output, outputVec.array(), outputVec.size());
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index b869c54..1118959 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -23,6 +23,7 @@
#include "NdkMediaFormatPriv.h"
+#include <inttypes.h>
#include <utils/Log.h>
#include <utils/StrongPointer.h>
#include <media/hardware/CryptoAPI.h>
@@ -72,7 +73,7 @@
EXPORT
media_status_t AMediaExtractor_setDataSourceFd(AMediaExtractor *mData, int fd, off64_t offset,
off64_t length) {
- ALOGV("setDataSource(%d, %lld, %lld)", fd, offset, length);
+ ALOGV("setDataSource(%d, %" PRId64 ", %" PRId64 ")", fd, offset, length);
return translate_error(mData->mImpl->setDataSource(fd, offset, length));
}
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 302e4dc..6700f6e 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -60,8 +60,9 @@
libcpustats \
libmedia_helper
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libaudioflinger
-LOCAL_32_BIT_ONLY := true
LOCAL_SRC_FILES += \
AudioWatchdog.cpp \
@@ -79,6 +80,8 @@
LOCAL_CFLAGS += -fvisibility=hidden
+LOCAL_CFLAGS += -Werror -Wall
+
include $(BUILD_SHARED_LIBRARY)
#
@@ -107,6 +110,8 @@
LOCAL_MODULE_TAGS := optional
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_EXECUTABLE)
include $(CLEAR_VARS)
@@ -127,6 +132,8 @@
LOCAL_MODULE := libaudioresampler
+LOCAL_CFLAGS := -Werror -Wall
+
# uncomment to disable NEON on architectures that actually do support NEON, for benchmarking
#LOCAL_CFLAGS += -DUSE_NEON=false
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 0a3a832..016c25e 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -175,7 +175,7 @@
mHardwareStatus(AUDIO_HW_IDLE),
mMasterVolume(1.0f),
mMasterMute(false),
- mNextUniqueId(1),
+ mNextUniqueId(AUDIO_UNIQUE_ID_USE_MAX), // zero has a special meaning, so unavailable
mMode(AUDIO_MODE_INVALID),
mBtNrecIsOff(false),
mIsLowRamDevice(true),
@@ -219,8 +219,6 @@
void AudioFlinger::onFirstRef()
{
- int rc = 0;
-
Mutex::Autolock _l(mLock);
/* TODO: move all this work into an Init() function */
@@ -552,7 +550,7 @@
const sp<IMemory>& sharedBuffer,
audio_io_handle_t output,
pid_t tid,
- int *sessionId,
+ audio_session_t *sessionId,
int clientUid,
status_t *status)
{
@@ -560,7 +558,7 @@
sp<TrackHandle> trackHandle;
sp<Client> client;
status_t lStatus;
- int lSessionId;
+ audio_session_t lSessionId;
// client AudioTrack::set already implements AUDIO_STREAM_DEFAULT => AUDIO_STREAM_MUSIC,
// but if someone uses binder directly they could bypass that and cause us to crash
@@ -611,6 +609,11 @@
PlaybackThread *effectThread = NULL;
if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
+ if (audio_unique_id_get_use(*sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
+ ALOGE("createTrack() invalid session ID %d", *sessionId);
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
lSessionId = *sessionId;
// check if an effect chain with the same session ID is present on another
// output thread and move it here.
@@ -626,7 +629,7 @@
}
} else {
// if no audio session id is provided, create one here
- lSessionId = nextUniqueId();
+ lSessionId = (audio_session_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
if (sessionId != NULL) {
*sessionId = lSessionId;
}
@@ -662,7 +665,7 @@
}
}
- setAudioHwSyncForSession_l(thread, (audio_session_t)lSessionId);
+ setAudioHwSyncForSession_l(thread, lSessionId);
}
if (lStatus != NO_ERROR) {
@@ -686,12 +689,12 @@
return trackHandle;
}
-uint32_t AudioFlinger::sampleRate(audio_io_handle_t output) const
+uint32_t AudioFlinger::sampleRate(audio_io_handle_t ioHandle) const
{
Mutex::Autolock _l(mLock);
- PlaybackThread *thread = checkPlaybackThread_l(output);
+ ThreadBase *thread = checkThread_l(ioHandle);
if (thread == NULL) {
- ALOGW("sampleRate() unknown thread %d", output);
+ ALOGW("sampleRate() unknown thread %d", ioHandle);
return 0;
}
return thread->sampleRate();
@@ -708,12 +711,12 @@
return thread->format();
}
-size_t AudioFlinger::frameCount(audio_io_handle_t output) const
+size_t AudioFlinger::frameCount(audio_io_handle_t ioHandle) const
{
Mutex::Autolock _l(mLock);
- PlaybackThread *thread = checkPlaybackThread_l(output);
+ ThreadBase *thread = checkThread_l(ioHandle);
if (thread == NULL) {
- ALOGW("frameCount() unknown thread %d", output);
+ ALOGW("frameCount() unknown thread %d", ioHandle);
return 0;
}
// FIXME currently returns the normal mixer's frame count to avoid confusing legacy callers;
@@ -1070,10 +1073,10 @@
audio_devices_t device = thread->inDevice();
bool suspend = audio_is_bluetooth_sco_device(device) && btNrecIsOff;
// collect all of the thread's session IDs
- KeyedVector<int, bool> ids = thread->sessionIds();
+ KeyedVector<audio_session_t, bool> ids = thread->sessionIds();
// suspend effects associated with those session IDs
for (size_t j = 0; j < ids.size(); ++j) {
- int sessionId = ids.keyAt(j);
+ audio_session_t sessionId = ids.keyAt(j);
thread->setEffectSuspended(FX_IID_AEC,
suspend,
sessionId);
@@ -1241,8 +1244,6 @@
status_t AudioFlinger::getRenderPosition(uint32_t *halFrames, uint32_t *dspFrames,
audio_io_handle_t output) const
{
- status_t status;
-
Mutex::Autolock _l(mLock);
PlaybackThread *playbackThread = checkPlaybackThread_l(output);
@@ -1301,7 +1302,7 @@
bool removed = false;
for (size_t i = 0; i< num; ) {
AudioSessionRef *ref = mAudioSessionRefs.itemAt(i);
- ALOGV(" pid %d @ %d", ref->mPid, i);
+ ALOGV(" pid %d @ %zu", ref->mPid, i);
if (ref->mPid == pid) {
ALOGV(" removing entry for pid %d session %d", pid, ref->mSessionid);
mAudioSessionRefs.removeAt(i);
@@ -1339,7 +1340,8 @@
}
// getEffectThread_l() must be called with AudioFlinger::mLock held
-sp<AudioFlinger::PlaybackThread> AudioFlinger::getEffectThread_l(int sessionId, int EffectId)
+sp<AudioFlinger::PlaybackThread> AudioFlinger::getEffectThread_l(audio_session_t sessionId,
+ int EffectId)
{
sp<PlaybackThread> thread;
@@ -1404,10 +1406,6 @@
// ----------------------------------------------------------------------------
-static bool deviceRequiresCaptureAudioOutputPermission(audio_devices_t inDevice) {
- return audio_is_remote_submix_device(inDevice);
-}
-
sp<IAudioRecord> AudioFlinger::openRecord(
audio_io_handle_t input,
uint32_t sampleRate,
@@ -1418,7 +1416,7 @@
IAudioFlinger::track_flags_t *flags,
pid_t tid,
int clientUid,
- int *sessionId,
+ audio_session_t *sessionId,
size_t *notificationFrames,
sp<IMemory>& cblk,
sp<IMemory>& buffers,
@@ -1428,7 +1426,7 @@
sp<RecordHandle> recordHandle;
sp<Client> client;
status_t lStatus;
- int lSessionId;
+ audio_session_t lSessionId;
cblk.clear();
buffers.clear();
@@ -1481,10 +1479,14 @@
client = registerPid(pid);
if (sessionId != NULL && *sessionId != AUDIO_SESSION_ALLOCATE) {
+ if (audio_unique_id_get_use(*sessionId) != AUDIO_UNIQUE_ID_USE_SESSION) {
+ lStatus = BAD_VALUE;
+ goto Exit;
+ }
lSessionId = *sessionId;
} else {
// if no audio session id is provided, create one here
- lSessionId = nextUniqueId();
+ lSessionId = (audio_session_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
if (sessionId != NULL) {
*sessionId = lSessionId;
}
@@ -1499,7 +1501,7 @@
if (lStatus == NO_ERROR) {
// Check if one effect chain was awaiting for an AudioRecord to be created on this
// session and move it to this thread.
- sp<EffectChain> chain = getOrphanEffectChain_l((audio_session_t)lSessionId);
+ sp<EffectChain> chain = getOrphanEffectChain_l(lSessionId);
if (chain != 0) {
Mutex::Autolock _l(thread->mLock);
thread->addEffectChain_l(chain);
@@ -1561,7 +1563,7 @@
int rc = load_audio_interface(name, &dev);
if (rc) {
- ALOGI("loadHwModule() error %d loading module %s ", rc, name);
+ ALOGE("loadHwModule() error %d loading module %s", rc, name);
return 0;
}
@@ -1569,7 +1571,7 @@
rc = dev->init_check(dev);
mHardwareStatus = AUDIO_HW_IDLE;
if (rc) {
- ALOGI("loadHwModule() init check error %d for module %s ", rc, name);
+ ALOGE("loadHwModule() init check error %d for module %s", rc, name);
return 0;
}
@@ -1617,7 +1619,7 @@
mHardwareStatus = AUDIO_HW_IDLE;
}
- audio_module_handle_t handle = nextUniqueId();
+ audio_module_handle_t handle = nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
ALOGI("loadHwModule() Loaded %s audio interface from %s (%s) handle %d",
@@ -1761,9 +1763,13 @@
return 0;
}
- audio_hw_device_t *hwDevHal = outHwDev->hwDevice();
if (*output == AUDIO_IO_HANDLE_NONE) {
- *output = nextUniqueId();
+ *output = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT);
+ } else {
+ // Audio Policy does not currently request a specific output handle.
+ // If this is ever needed, see openInput_l() for example code.
+ ALOGE("openOutput_l requested output handle %d is not AUDIO_IO_HANDLE_NONE", *output);
+ return 0;
}
mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
@@ -1804,7 +1810,8 @@
PlaybackThread *thread;
if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
- thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady);
+ thread = new OffloadThread(this, outputStream, *output, devices, mSystemReady,
+ config->offload_info.bit_rate);
ALOGV("openOutput_l() created offload output: ID %d thread %p", *output, thread);
} else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
|| !isValidPcmSinkFormat(config->format)
@@ -1880,7 +1887,7 @@
return AUDIO_IO_HANDLE_NONE;
}
- audio_io_handle_t id = nextUniqueId();
+ audio_io_handle_t id = nextUniqueId(AUDIO_UNIQUE_ID_USE_OUTPUT);
DuplicatingThread *thread = new DuplicatingThread(this, thread1, id, mSystemReady);
thread->addOutputTrack(thread2);
mPlaybackThreads.add(id, thread);
@@ -2034,8 +2041,18 @@
return 0;
}
+ // Audio Policy can request a specific handle for hardware hotword.
+ // The goal here is not to re-open an already opened input.
+ // It is to use a pre-assigned I/O handle.
if (*input == AUDIO_IO_HANDLE_NONE) {
- *input = nextUniqueId();
+ *input = nextUniqueId(AUDIO_UNIQUE_ID_USE_INPUT);
+ } else if (audio_unique_id_get_use(*input) != AUDIO_UNIQUE_ID_USE_INPUT) {
+ ALOGE("openInput_l() requested input handle %d is invalid", *input);
+ return 0;
+ } else if (mRecordThreads.indexOfKey(*input) >= 0) {
+ // This should not happen in a transient state with current design.
+ ALOGE("openInput_l() requested input handle %d is already assigned", *input);
+ return 0;
}
audio_config_t halconfig = *config;
@@ -2239,12 +2256,12 @@
}
-audio_unique_id_t AudioFlinger::newAudioUniqueId()
+audio_unique_id_t AudioFlinger::newAudioUniqueId(audio_unique_id_use_t use)
{
- return nextUniqueId();
+ return nextUniqueId(use);
}
-void AudioFlinger::acquireAudioSessionId(int audioSession, pid_t pid)
+void AudioFlinger::acquireAudioSessionId(audio_session_t audioSession, pid_t pid)
{
Mutex::Autolock _l(mLock);
pid_t caller = IPCThreadState::self()->getCallingPid();
@@ -2278,7 +2295,7 @@
ALOGV(" added new entry for %d", audioSession);
}
-void AudioFlinger::releaseAudioSessionId(int audioSession, pid_t pid)
+void AudioFlinger::releaseAudioSessionId(audio_session_t audioSession, pid_t pid)
{
Mutex::Autolock _l(mLock);
pid_t caller = IPCThreadState::self()->getCallingPid();
@@ -2363,6 +2380,23 @@
return;
}
+// checkThread_l() must be called with AudioFlinger::mLock held
+AudioFlinger::ThreadBase *AudioFlinger::checkThread_l(audio_io_handle_t ioHandle) const
+{
+ ThreadBase *thread = NULL;
+ switch (audio_unique_id_get_use(ioHandle)) {
+ case AUDIO_UNIQUE_ID_USE_OUTPUT:
+ thread = checkPlaybackThread_l(ioHandle);
+ break;
+ case AUDIO_UNIQUE_ID_USE_INPUT:
+ thread = checkRecordThread_l(ioHandle);
+ break;
+ default:
+ break;
+ }
+ return thread;
+}
+
// checkPlaybackThread_l() must be called with AudioFlinger::mLock held
AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(audio_io_handle_t output) const
{
@@ -2382,9 +2416,14 @@
return mRecordThreads.valueFor(input).get();
}
-uint32_t AudioFlinger::nextUniqueId()
+audio_unique_id_t AudioFlinger::nextUniqueId(audio_unique_id_use_t use)
{
- return (uint32_t) android_atomic_inc(&mNextUniqueId);
+ int32_t base = android_atomic_add(AUDIO_UNIQUE_ID_USE_MAX, &mNextUniqueId);
+ // We have no way of recovering from wraparound
+ LOG_ALWAYS_FATAL_IF(base == 0, "unique ID overflow");
+ LOG_ALWAYS_FATAL_IF((unsigned) use >= (unsigned) AUDIO_UNIQUE_ID_USE_MAX);
+ ALOG_ASSERT(audio_unique_id_get_use(base) == AUDIO_UNIQUE_ID_USE_UNSPECIFIED);
+ return (audio_unique_id_t) (base | use);
}
AudioFlinger::PlaybackThread *AudioFlinger::primaryPlaybackThread_l() const
@@ -2414,8 +2453,8 @@
}
sp<AudioFlinger::SyncEvent> AudioFlinger::createSyncEvent(AudioSystem::sync_event_t type,
- int triggerSession,
- int listenerSession,
+ audio_session_t triggerSession,
+ audio_session_t listenerSession,
sync_event_callback_t callBack,
wp<RefBase> cookie)
{
@@ -2475,7 +2514,7 @@
const sp<IEffectClient>& effectClient,
int32_t priority,
audio_io_handle_t io,
- int sessionId,
+ audio_session_t sessionId,
const String16& opPackageName,
status_t *status,
int *id,
@@ -2636,7 +2675,7 @@
} else {
// Check if one effect chain was awaiting for an effect to be created on this
// session and used it instead of creating a new one.
- sp<EffectChain> chain = getOrphanEffectChain_l((audio_session_t)sessionId);
+ sp<EffectChain> chain = getOrphanEffectChain_l(sessionId);
if (chain != 0) {
Mutex::Autolock _l(thread->mLock);
thread->addEffectChain_l(chain);
@@ -2663,7 +2702,7 @@
return handle;
}
-status_t AudioFlinger::moveEffects(int sessionId, audio_io_handle_t srcOutput,
+status_t AudioFlinger::moveEffects(audio_session_t sessionId, audio_io_handle_t srcOutput,
audio_io_handle_t dstOutput)
{
ALOGV("moveEffects() session %d, srcOutput %d, dstOutput %d",
@@ -2690,7 +2729,7 @@
}
// moveEffectChain_l must be called with both srcThread and dstThread mLocks held
-status_t AudioFlinger::moveEffectChain_l(int sessionId,
+status_t AudioFlinger::moveEffectChain_l(audio_session_t sessionId,
AudioFlinger::PlaybackThread *srcThread,
AudioFlinger::PlaybackThread *dstThread,
bool reRegister)
@@ -2816,9 +2855,9 @@
status_t AudioFlinger::putOrphanEffectChain_l(const sp<AudioFlinger::EffectChain>& chain)
{
- audio_session_t session = (audio_session_t)chain->sessionId();
+ audio_session_t session = chain->sessionId();
ssize_t index = mOrphanEffectChains.indexOfKey(session);
- ALOGV("putOrphanEffectChain_l session %d index %d", session, index);
+ ALOGV("putOrphanEffectChain_l session %d index %zd", session, index);
if (index >= 0) {
ALOGW("putOrphanEffectChain_l chain for session %d already present", session);
return ALREADY_EXISTS;
@@ -2831,7 +2870,7 @@
{
sp<EffectChain> chain;
ssize_t index = mOrphanEffectChains.indexOfKey(session);
- ALOGV("getOrphanEffectChain_l session %d index %d", session, index);
+ ALOGV("getOrphanEffectChain_l session %d index %zd", session, index);
if (index >= 0) {
chain = mOrphanEffectChains.valueAt(index);
mOrphanEffectChains.removeItemsAt(index);
@@ -2842,13 +2881,13 @@
bool AudioFlinger::updateOrphanEffectChains(const sp<AudioFlinger::EffectModule>& effect)
{
Mutex::Autolock _l(mLock);
- audio_session_t session = (audio_session_t)effect->sessionId();
+ audio_session_t session = effect->sessionId();
ssize_t index = mOrphanEffectChains.indexOfKey(session);
- ALOGV("updateOrphanEffectChains session %d index %d", session, index);
+ ALOGV("updateOrphanEffectChains session %d index %zd", session, index);
if (index >= 0) {
sp<EffectChain> chain = mOrphanEffectChains.valueAt(index);
if (chain->removeEffect_l(effect) == 0) {
- ALOGV("updateOrphanEffectChains removing effect chain at index %d", index);
+ ALOGV("updateOrphanEffectChains removing effect chain at index %zd", index);
mOrphanEffectChains.removeItemsAt(index);
}
return true;
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index f2f11e3..f11fd1c 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -108,7 +108,7 @@
const sp<IMemory>& sharedBuffer,
audio_io_handle_t output,
pid_t tid,
- int *sessionId,
+ audio_session_t *sessionId,
int clientUid,
status_t *status /*non-NULL*/);
@@ -122,15 +122,15 @@
IAudioFlinger::track_flags_t *flags,
pid_t tid,
int clientUid,
- int *sessionId,
+ audio_session_t *sessionId,
size_t *notificationFrames,
sp<IMemory>& cblk,
sp<IMemory>& buffers,
status_t *status /*non-NULL*/);
- virtual uint32_t sampleRate(audio_io_handle_t output) const;
+ virtual uint32_t sampleRate(audio_io_handle_t ioHandle) const;
virtual audio_format_t format(audio_io_handle_t output) const;
- virtual size_t frameCount(audio_io_handle_t output) const;
+ virtual size_t frameCount(audio_io_handle_t ioHandle) const;
virtual uint32_t latency(audio_io_handle_t output) const;
virtual status_t setMasterVolume(float value);
@@ -196,11 +196,11 @@
virtual uint32_t getInputFramesLost(audio_io_handle_t ioHandle) const;
- virtual audio_unique_id_t newAudioUniqueId();
+ virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use);
- virtual void acquireAudioSessionId(int audioSession, pid_t pid);
+ virtual void acquireAudioSessionId(audio_session_t audioSession, pid_t pid);
- virtual void releaseAudioSessionId(int audioSession, pid_t pid);
+ virtual void releaseAudioSessionId(audio_session_t audioSession, pid_t pid);
virtual status_t queryNumberEffects(uint32_t *numEffects) const;
@@ -214,13 +214,13 @@
const sp<IEffectClient>& effectClient,
int32_t priority,
audio_io_handle_t io,
- int sessionId,
+ audio_session_t sessionId,
const String16& opPackageName,
status_t *status /*non-NULL*/,
int *id,
int *enabled);
- virtual status_t moveEffects(int sessionId, audio_io_handle_t srcOutput,
+ virtual status_t moveEffects(audio_session_t sessionId, audio_io_handle_t srcOutput,
audio_io_handle_t dstOutput);
virtual audio_module_handle_t loadHwModule(const char *name);
@@ -283,8 +283,8 @@
class SyncEvent : public RefBase {
public:
SyncEvent(AudioSystem::sync_event_t type,
- int triggerSession,
- int listenerSession,
+ audio_session_t triggerSession,
+ audio_session_t listenerSession,
sync_event_callback_t callBack,
wp<RefBase> cookie)
: mType(type), mTriggerSession(triggerSession), mListenerSession(listenerSession),
@@ -297,22 +297,22 @@
bool isCancelled() const { Mutex::Autolock _l(mLock); return (mCallback == NULL); }
void cancel() { Mutex::Autolock _l(mLock); mCallback = NULL; }
AudioSystem::sync_event_t type() const { return mType; }
- int triggerSession() const { return mTriggerSession; }
- int listenerSession() const { return mListenerSession; }
+ audio_session_t triggerSession() const { return mTriggerSession; }
+ audio_session_t listenerSession() const { return mListenerSession; }
wp<RefBase> cookie() const { return mCookie; }
private:
const AudioSystem::sync_event_t mType;
- const int mTriggerSession;
- const int mListenerSession;
+ const audio_session_t mTriggerSession;
+ const audio_session_t mListenerSession;
sync_event_callback_t mCallback;
const wp<RefBase> mCookie;
mutable Mutex mLock;
};
sp<SyncEvent> createSyncEvent(AudioSystem::sync_event_t type,
- int triggerSession,
- int listenerSession,
+ audio_session_t triggerSession,
+ audio_session_t listenerSession,
sync_event_callback_t callBack,
wp<RefBase> cookie);
@@ -505,7 +505,8 @@
public:
RecordHandle(const sp<RecordThread::RecordTrack>& recordTrack);
virtual ~RecordHandle();
- virtual status_t start(int /*AudioSystem::sync_event_t*/ event, int triggerSession);
+ virtual status_t start(int /*AudioSystem::sync_event_t*/ event,
+ audio_session_t triggerSession);
virtual void stop();
virtual status_t onTransact(
uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags);
@@ -517,6 +518,7 @@
};
+ ThreadBase *checkThread_l(audio_io_handle_t ioHandle) const;
PlaybackThread *checkPlaybackThread_l(audio_io_handle_t output) const;
MixerThread *checkMixerThread_l(audio_io_handle_t output) const;
RecordThread *checkRecordThread_l(audio_io_handle_t input) const;
@@ -547,25 +549,28 @@
const sp<AudioIoDescriptor>& ioDesc,
pid_t pid = 0);
- // Allocate an audio_io_handle_t, session ID, effect ID, or audio_module_handle_t.
+ // Allocate an audio_unique_id_t.
+ // Specific types are audio_io_handle_t, audio_session_t, effect ID (int),
+ // audio_module_handle_t, and audio_patch_handle_t.
// They all share the same ID space, but the namespaces are actually independent
// because there are separate KeyedVectors for each kind of ID.
- // The return value is uint32_t, but is cast to signed for some IDs.
+ // The return value is cast to the specific type depending on how the ID will be used.
// FIXME This API does not handle rollover to zero (for unsigned IDs),
// or from positive to negative (for signed IDs).
// Thus it may fail by returning an ID of the wrong sign,
// or by returning a non-unique ID.
- uint32_t nextUniqueId();
+ audio_unique_id_t nextUniqueId(audio_unique_id_use_t use);
- status_t moveEffectChain_l(int sessionId,
+ status_t moveEffectChain_l(audio_session_t sessionId,
PlaybackThread *srcThread,
PlaybackThread *dstThread,
bool reRegister);
+
// return thread associated with primary hardware device, or NULL
PlaybackThread *primaryPlaybackThread_l() const;
audio_devices_t primaryOutputDevice_l() const;
- sp<PlaybackThread> getEffectThread_l(int sessionId, int EffectId);
+ sp<PlaybackThread> getEffectThread_l(audio_session_t sessionId, int EffectId);
void removeClient_l(pid_t pid);
@@ -608,9 +613,9 @@
// for mAudioSessionRefs only
struct AudioSessionRef {
- AudioSessionRef(int sessionid, pid_t pid) :
+ AudioSessionRef(audio_session_t sessionid, pid_t pid) :
mSessionid(sessionid), mPid(pid), mCnt(1) {}
- const int mSessionid;
+ const audio_session_t mSessionid;
const pid_t mPid;
int mCnt;
};
diff --git a/services/audioflinger/AudioResamplerCubic.cpp b/services/audioflinger/AudioResamplerCubic.cpp
index 6a324ad..9c3c7cb 100644
--- a/services/audioflinger/AudioResamplerCubic.cpp
+++ b/services/audioflinger/AudioResamplerCubic.cpp
@@ -75,7 +75,6 @@
int16_t *in = mBuffer.i16;
while (outputIndex < outputSampleCount) {
- int32_t sample;
int32_t x;
// calculate output sample
diff --git a/services/audioflinger/AudioResamplerDyn.cpp b/services/audioflinger/AudioResamplerDyn.cpp
index 618b56c..e615700 100644
--- a/services/audioflinger/AudioResamplerDyn.cpp
+++ b/services/audioflinger/AudioResamplerDyn.cpp
@@ -282,7 +282,6 @@
return;
}
int32_t oldSampleRate = mInSampleRate;
- int32_t oldHalfNumCoefs = mConstants.mHalfNumCoefs;
uint32_t oldPhaseWrapLimit = mConstants.mL << mConstants.mShift;
bool useS32 = false;
diff --git a/services/audioflinger/AudioResamplerSinc.cpp b/services/audioflinger/AudioResamplerSinc.cpp
index f600d6c..320b8cf 100644
--- a/services/audioflinger/AudioResamplerSinc.cpp
+++ b/services/audioflinger/AudioResamplerSinc.cpp
@@ -141,6 +141,8 @@
// ----------------------------------------------------------------------------
+#if !USE_NEON
+
static inline
int32_t mulRL(int left, int32_t in, uint32_t vRL)
{
@@ -202,6 +204,8 @@
#endif
}
+#endif // !USE_NEON
+
// ----------------------------------------------------------------------------
AudioResamplerSinc::AudioResamplerSinc(
diff --git a/services/audioflinger/AutoPark.h b/services/audioflinger/AutoPark.h
new file mode 100644
index 0000000..e539e47
--- /dev/null
+++ b/services/audioflinger/AutoPark.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+namespace android {
+
+// T is FastMixer or FastCapture
+template<typename T> class AutoPark {
+public:
+
+ // Park the specific FastThread, which can be nullptr, in hot idle if not currently idling
+ AutoPark(const sp<T>& fastThread) : mFastThread(fastThread)
+ {
+ mPreviousCommand = FastThreadState::HOT_IDLE;
+ if (fastThread != nullptr) {
+ auto sq = mFastThread->sq();
+ FastThreadState *state = sq->begin();
+ if (!(state->mCommand & FastThreadState::IDLE)) {
+ mPreviousCommand = state->mCommand;
+ state->mCommand = FastThreadState::HOT_IDLE;
+ sq->end();
+ sq->push(sq->BLOCK_UNTIL_ACKED);
+ } else {
+ sq->end(false /*didModify*/);
+ }
+ }
+ }
+
+ // Remove the FastThread from hot idle if necessary
+ ~AutoPark()
+ {
+ if (!(mPreviousCommand & FastThreadState::IDLE)) {
+ ALOG_ASSERT(mFastThread != nullptr);
+ auto sq = mFastThread->sq();
+ FastThreadState *state = sq->begin();
+ ALOG_ASSERT(state->mCommand == FastThreadState::HOT_IDLE);
+ state->mCommand = mPreviousCommand;
+ sq->end();
+ sq->push(sq->BLOCK_UNTIL_PUSHED);
+ }
+ }
+
+private:
+ const sp<T> mFastThread;
+ // if !&IDLE, holds the FastThread state to restore after new parameters processed
+ FastThreadState::Command mPreviousCommand;
+}; // class AutoPark
+
+} // namespace
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 949c91d..00304b2 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -59,7 +59,7 @@
const wp<AudioFlinger::EffectChain>& chain,
effect_descriptor_t *desc,
int id,
- int sessionId)
+ audio_session_t sessionId)
: mPinned(sessionId > AUDIO_SESSION_OUTPUT_MIX),
mThread(thread), mChain(chain), mId(id), mSessionId(sessionId),
mDescriptor(*desc),
@@ -138,7 +138,7 @@
} else {
status = ALREADY_EXISTS;
}
- ALOGV("addHandle() %p added handle %p in position %d", this, handle, i);
+ ALOGV("addHandle() %p added handle %p in position %zu", this, handle, i);
mHandles.insertAt(handle, i);
return status;
}
@@ -156,7 +156,7 @@
if (i == size) {
return size;
}
- ALOGV("removeHandle() %p removed handle %p in position %d", this, handle, i);
+ ALOGV("removeHandle() %p removed handle %p in position %zu", this, handle, i);
mHandles.removeAt(i);
// if removed from first place, move effect control from this handle to next in line
@@ -380,7 +380,7 @@
mConfig.inputCfg.buffer.frameCount = thread->frameCount();
mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
- ALOGV("configure() %p thread %p buffer %p framecount %d",
+ ALOGV("configure() %p thread %p buffer %p framecount %zu",
this, thread.get(), mConfig.inputCfg.buffer.raw, mConfig.inputCfg.buffer.frameCount);
status_t cmdStatus;
@@ -677,7 +677,6 @@
if (isProcessEnabled() &&
((mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_CTRL ||
(mDescriptor.flags & EFFECT_FLAG_VOLUME_MASK) == EFFECT_FLAG_VOLUME_IND)) {
- status_t cmdStatus;
uint32_t volume[2];
uint32_t *pVolume = NULL;
uint32_t size = sizeof(volume);
@@ -934,7 +933,7 @@
int len = s.length();
if (s.length() > 2) {
- char *str = s.lockBuffer(len);
+ (void) s.lockBuffer(len);
s.unlockBuffer(len - 2);
}
return s;
@@ -1051,7 +1050,7 @@
mCblkMemory = client->heap()->allocate(EFFECT_PARAM_BUFFER_SIZE + bufOffset);
if (mCblkMemory == 0 ||
(mCblk = static_cast<effect_param_cblk_t *>(mCblkMemory->pointer())) == NULL) {
- ALOGE("not enough memory for Effect size=%u", EFFECT_PARAM_BUFFER_SIZE +
+ ALOGE("not enough memory for Effect size=%zu", EFFECT_PARAM_BUFFER_SIZE +
sizeof(effect_param_cblk_t));
mCblkMemory.clear();
return;
@@ -1341,7 +1340,7 @@
#define LOG_TAG "AudioFlinger::EffectChain"
AudioFlinger::EffectChain::EffectChain(ThreadBase *thread,
- int sessionId)
+ audio_session_t sessionId)
: mThread(thread), mSessionId(sessionId), mActiveTrackCnt(0), mTrackCnt(0), mTailBufferCount(0),
mOwnInBuffer(false), mVolumeCtrlIdx(-1), mLeftVolume(UINT_MAX), mRightVolume(UINT_MAX),
mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX), mForceVolume(false)
@@ -1580,7 +1579,7 @@
}
mEffects.insertAt(effect, idx_insert);
- ALOGV("addEffect_l() effect %p, added in chain %p at rank %d", effect.get(), this,
+ ALOGV("addEffect_l() effect %p, added in chain %p at rank %zu", effect.get(), this,
idx_insert);
}
effect->configure();
@@ -1612,7 +1611,7 @@
}
}
mEffects.removeAt(i);
- ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %d", effect.get(),
+ ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %zu", effect.get(),
this, i);
break;
}
@@ -1727,7 +1726,7 @@
String8 result;
size_t numEffects = mEffects.size();
- snprintf(buffer, SIZE, " %d effects for session %d\n", numEffects, mSessionId);
+ snprintf(buffer, SIZE, " %zu effects for session %d\n", numEffects, mSessionId);
result.append(buffer);
if (numEffects) {
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 6f93f81..bc9bc94 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -45,7 +45,7 @@
const wp<AudioFlinger::EffectChain>& chain,
effect_descriptor_t *desc,
int id,
- int sessionId);
+ audio_session_t sessionId);
virtual ~EffectModule();
enum effect_state {
@@ -76,7 +76,7 @@
uint32_t status() {
return mStatus;
}
- int sessionId() const {
+ audio_session_t sessionId() const {
return mSessionId;
}
status_t setEnabled(bool enabled);
@@ -141,7 +141,7 @@
wp<ThreadBase> mThread; // parent thread
wp<EffectChain> mChain; // parent effect chain
const int mId; // this instance unique ID
- const int mSessionId; // audio session ID
+ const audio_session_t mSessionId; // audio session ID
const effect_descriptor_t mDescriptor;// effect descriptor received from effect engine
effect_config_t mConfig; // input and output audio configuration
effect_handle_t mEffectInterface; // Effect module C API
@@ -235,15 +235,17 @@
// the EffectChain class represents a group of effects associated to one audio session.
// There can be any number of EffectChain objects per output mixer thread (PlaybackThread).
-// The EffecChain with session ID 0 contains global effects applied to the output mix.
+// The EffectChain with session ID AUDIO_SESSION_OUTPUT_MIX contains global effects applied
+// to the output mix.
// Effects in this chain can be insert or auxiliary. Effects in other chains (attached to
// tracks) are insert only. The EffectChain maintains an ordered list of effect module, the
-// order corresponding in the effect process order. When attached to a track (session ID != 0),
+// order corresponding in the effect process order. When attached to a track (session ID !=
+// AUDIO_SESSION_OUTPUT_MIX),
// it also provide it's own input buffer used by the track as accumulation buffer.
class EffectChain : public RefBase {
public:
- EffectChain(const wp<ThreadBase>& wThread, int sessionId);
- EffectChain(ThreadBase *thread, int sessionId);
+ EffectChain(const wp<ThreadBase>& wThread, audio_session_t sessionId);
+ EffectChain(ThreadBase *thread, audio_session_t sessionId);
virtual ~EffectChain();
// special key used for an entry in mSuspendedEffects keyed vector
@@ -266,8 +268,8 @@
status_t addEffect_l(const sp<EffectModule>& handle);
size_t removeEffect_l(const sp<EffectModule>& handle);
- int sessionId() const { return mSessionId; }
- void setSessionId(int sessionId) { mSessionId = sessionId; }
+ audio_session_t sessionId() const { return mSessionId; }
+ void setSessionId(audio_session_t sessionId) { mSessionId = sessionId; }
sp<EffectModule> getEffectFromDesc_l(effect_descriptor_t *descriptor);
sp<EffectModule> getEffectFromId_l(int id);
@@ -362,7 +364,7 @@
wp<ThreadBase> mThread; // parent mixer thread
Mutex mLock; // mutex protecting effect list
Vector< sp<EffectModule> > mEffects; // list of effect modules
- int mSessionId; // audio session ID
+ audio_session_t mSessionId; // audio session ID
int16_t *mInBuffer; // chain input buffer
int16_t *mOutBuffer; // chain output buffer
diff --git a/services/audioflinger/FastCapture.cpp b/services/audioflinger/FastCapture.cpp
index bb83858..d202169 100644
--- a/services/audioflinger/FastCapture.cpp
+++ b/services/audioflinger/FastCapture.cpp
@@ -104,8 +104,10 @@
} else {
mFormat = mInputSource->format();
mSampleRate = Format_sampleRate(mFormat);
+#if !LOG_NDEBUG
unsigned channelCount = Format_channelCount(mFormat);
ALOG_ASSERT(channelCount >= 1 && channelCount <= FCC_8);
+#endif
}
dumpState->mSampleRate = mSampleRate;
eitherChanged = true;
@@ -186,7 +188,6 @@
ALOG_ASSERT(mPipeSink != NULL);
ALOG_ASSERT(mReadBuffer != NULL);
if (mReadBufferState < 0) {
- unsigned channelCount = Format_channelCount(mFormat);
memset(mReadBuffer, 0, frameCount * Format_frameSize(mFormat));
mReadBufferState = frameCount;
}
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index d31b8d3..26cd1f9 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -47,7 +47,6 @@
/*static*/ const FastMixerState FastMixer::sInitial;
FastMixer::FastMixer() : FastThread(),
- mSlopNs(0),
// mFastTrackNames
// mGenerations
mOutputSink(NULL),
@@ -338,6 +337,11 @@
if ((command & FastMixerState::MIX) && (mMixer != NULL) && mIsWarm) {
ALOG_ASSERT(mMixerBuffer != NULL);
+
+ // AudioMixer::mState.enabledTracks is undefined if mState.hook == process__validate,
+ // so we keep a side copy of enabledTracks
+ bool anyEnabledTracks = false;
+
// for each track, update volume and check for underrun
unsigned currentTrackMask = current->mTrackMask;
while (currentTrackMask != 0) {
@@ -398,19 +402,26 @@
underruns.mBitFields.mPartial++;
underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL;
mMixer->enable(name);
+ anyEnabledTracks = true;
}
} else {
underruns.mBitFields.mFull++;
underruns.mBitFields.mMostRecent = UNDERRUN_FULL;
mMixer->enable(name);
+ anyEnabledTracks = true;
}
ftDump->mUnderruns = underruns;
ftDump->mFramesReady = framesReady;
}
- // process() is CPU-bound
- mMixer->process();
- mMixerBufferState = MIXED;
+ if (anyEnabledTracks) {
+ // process() is CPU-bound
+ mMixer->process();
+ mMixerBufferState = MIXED;
+ } else if (mMixerBufferState != ZEROED) {
+ mMixerBufferState = UNDEFINED;
+ }
+
} else if (mMixerBufferState == MIXED) {
mMixerBufferState = UNDEFINED;
}
@@ -422,7 +433,8 @@
}
if (mMasterMono.load()) { // memory_order_seq_cst
- mono_blend(mMixerBuffer, mMixerBufferFormat, Format_channelCount(mFormat), frameCount, true /*limit*/);
+ mono_blend(mMixerBuffer, mMixerBufferFormat, Format_channelCount(mFormat), frameCount,
+ true /*limit*/);
}
// prepare the buffer used to write to sink
void *buffer = mSinkBuffer != NULL ? mSinkBuffer : mMixerBuffer;
diff --git a/services/audioflinger/FastMixer.h b/services/audioflinger/FastMixer.h
index 3cc7c9f..bdfd8a0 100644
--- a/services/audioflinger/FastMixer.h
+++ b/services/audioflinger/FastMixer.h
@@ -57,7 +57,6 @@
static const FastMixerState sInitial;
FastMixerState mPreIdle; // copy of state before we went into idle
- long mSlopNs; // accumulated time we've woken up too early (> 0) or too late (< 0)
int mFastTrackNames[FastMixerState::kMaxFastTracks];
// handles used by mixer to identify tracks
int mGenerations[FastMixerState::kMaxFastTracks];
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index a6cb9c0..bf6763f 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -139,18 +139,18 @@
status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
- ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d",
- patch->num_sources, patch->num_sinks, *handle);
status_t status = NO_ERROR;
audio_patch_handle_t halHandle = AUDIO_PATCH_HANDLE_NONE;
sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
+ if (handle == NULL || patch == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("createAudioPatch() num_sources %d num_sinks %d handle %d",
+ patch->num_sources, patch->num_sinks, *handle);
if (audioflinger == 0) {
return NO_INIT;
}
- if (handle == NULL || patch == NULL) {
- return BAD_VALUE;
- }
if (patch->num_sources == 0 || patch->num_sources > AUDIO_PATCH_PORTS_MAX ||
(patch->num_sinks == 0 && patch->num_sources != 2) ||
patch->num_sinks > AUDIO_PATCH_PORTS_MAX) {
@@ -349,7 +349,7 @@
exit:
ALOGV("createAudioPatch() status %d", status);
if (status == NO_ERROR) {
- *handle = audioflinger->nextUniqueId();
+ *handle = audioflinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_PATCH);
newPatch->mHandle = *handle;
newPatch->mHalHandle = halHandle;
mPatches.add(newPatch);
@@ -401,7 +401,7 @@
shift = playbackShift;
}
size_t frameCount = (playbackFrameCount * recordFramecount) >> shift;
- ALOGV("createPatchConnections() playframeCount %d recordFramecount %d frameCount %d ",
+ ALOGV("createPatchConnections() playframeCount %zu recordFramecount %zu frameCount %zu",
playbackFrameCount, recordFramecount, frameCount);
// create a special record track to capture from record thread
@@ -453,7 +453,7 @@
patch->mPatchTrack->setPeerProxy(patch->mPatchRecord.get());
// start capture and playback
- patch->mPatchRecord->start(AudioSystem::SYNC_EVENT_NONE, 0);
+ patch->mPatchRecord->start(AudioSystem::SYNC_EVENT_NONE, AUDIO_SESSION_NONE);
patch->mPatchTrack->start();
return status;
@@ -614,7 +614,6 @@
status_t AudioFlinger::PatchPanel::setAudioPortConfig(const struct audio_port_config *config)
{
ALOGV("setAudioPortConfig");
- status_t status = NO_ERROR;
sp<AudioFlinger> audioflinger = mAudioFlinger.promote();
if (audioflinger == 0) {
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index fa61af2..270e27f 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -31,7 +31,7 @@
size_t frameCount,
void *buffer,
const sp<IMemory>& sharedBuffer,
- int sessionId,
+ audio_session_t sessionId,
int uid,
IAudioFlinger::track_flags_t flags,
track_type type);
@@ -42,7 +42,7 @@
void dump(char* buffer, size_t size, bool active);
virtual status_t start(AudioSystem::sync_event_t event =
AudioSystem::SYNC_EVENT_NONE,
- int triggerSession = 0);
+ audio_session_t triggerSession = AUDIO_SESSION_NONE);
virtual void stop();
void pause();
@@ -110,10 +110,13 @@
// audioHalFrames is derived from output latency
// FIXME parameters not needed, could get them from the thread
bool presentationComplete(int64_t framesWritten, size_t audioHalFrames);
+ void signalClientFlag(int32_t flag);
public:
void triggerEvents(AudioSystem::sync_event_t type);
void invalidate();
+ void disable();
+
bool isInvalid() const { return mIsInvalid; }
int fastIndex() const { return mFastIndex; }
@@ -187,7 +190,7 @@
virtual status_t start(AudioSystem::sync_event_t event =
AudioSystem::SYNC_EVENT_NONE,
- int triggerSession = 0);
+ audio_session_t triggerSession = AUDIO_SESSION_NONE);
virtual void stop();
bool write(void* data, uint32_t frames);
bool bufferQueueEmpty() const { return mBufferQueue.size() == 0; }
@@ -200,6 +203,8 @@
uint32_t waitTimeMs);
void clearBufferQueue();
+ void restartIfDisabled();
+
// Maximum number of pending buffers allocated by OutputTrack::write()
static const uint8_t kMaxOverFlowBuffers = 10;
@@ -224,6 +229,10 @@
IAudioFlinger::track_flags_t flags);
virtual ~PatchTrack();
+ virtual status_t start(AudioSystem::sync_event_t event =
+ AudioSystem::SYNC_EVENT_NONE,
+ audio_session_t triggerSession = AUDIO_SESSION_NONE);
+
// AudioBufferProvider interface
virtual status_t getNextBuffer(AudioBufferProvider::Buffer* buffer);
virtual void releaseBuffer(AudioBufferProvider::Buffer* buffer);
@@ -236,6 +245,8 @@
void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
private:
+ void restartIfDisabled();
+
sp<ClientProxy> mProxy;
PatchProxyBufferProvider* mPeerProxy;
struct timespec mPeerTimeout;
diff --git a/services/audioflinger/RecordTracks.h b/services/audioflinger/RecordTracks.h
index 6f84af1..13396a6 100644
--- a/services/audioflinger/RecordTracks.h
+++ b/services/audioflinger/RecordTracks.h
@@ -29,14 +29,14 @@
audio_channel_mask_t channelMask,
size_t frameCount,
void *buffer,
- int sessionId,
+ audio_session_t sessionId,
int uid,
IAudioFlinger::track_flags_t flags,
track_type type);
virtual ~RecordTrack();
virtual status_t initCheck() const;
- virtual status_t start(AudioSystem::sync_event_t event, int triggerSession);
+ virtual status_t start(AudioSystem::sync_event_t event, audio_session_t triggerSession);
virtual void stop();
void destroy();
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index e056ef2..b322a45 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -71,6 +71,8 @@
#include <cpustats/ThreadCpuUsage.h>
#endif
+#include "AutoPark.h"
+
// ----------------------------------------------------------------------------
// Note: the following macro is used for extremely verbose logging message. In
@@ -108,6 +110,13 @@
// direct outputs can be a scarce resource in audio hardware and should
// be released as quickly as possible.
static const int8_t kMaxTrackRetriesDirect = 2;
+// retry count before removing active track in case of underrun on offloaded thread:
+// we need to make sure that AudioTrack client has enough time to send large buffers
+//FIXME may be more appropriate if expressed in time units. Need to revise how underrun is handled
+// for offloaded tracks
+static const int8_t kMaxTrackRetriesOffload = 10;
+static const int8_t kMaxTrackStartupRetriesOffload = 100;
+
// don't warn about blocked writes or record buffer overflows more often than this
static const nsecs_t kWarningThrottleNs = seconds(5);
@@ -136,6 +145,14 @@
// Offloaded output thread standby delay: allows track transition without going to standby
static const nsecs_t kOffloadStandbyDelayNs = seconds(1);
+// Direct output thread minimum sleep time in idle or active(underrun) state
+static const nsecs_t kDirectMinSleepTimeUs = 10000;
+
+// Offloaded output bit rate in bits per second when unknown.
+// Used for sleep time calculation, so use a high default bitrate to be conservative on sleep time.
+static const uint32_t kOffloadDefaultBitRateBps = 1500000;
+
+
// Whether to use fast mixer
static const enum {
FastMixer_Never, // never initialize or use: for debugging only
@@ -468,6 +485,7 @@
{AUDIO_DEVICE_OUT_AUX_LINE, "AUX_LINE"},
{AUDIO_DEVICE_OUT_SPEAKER_SAFE, "SPEAKER_SAFE"},
{AUDIO_DEVICE_OUT_IP, "IP"},
+ {AUDIO_DEVICE_OUT_BUS, "BUS"},
{AUDIO_DEVICE_NONE, "NONE"}, // must be last
}, mappingsIn[] = {
{AUDIO_DEVICE_IN_COMMUNICATION, "COMMUNICATION"},
@@ -491,6 +509,7 @@
{AUDIO_DEVICE_IN_BLUETOOTH_A2DP, "BLUETOOTH_A2DP"},
{AUDIO_DEVICE_IN_LOOPBACK, "LOOPBACK"},
{AUDIO_DEVICE_IN_IP, "IP"},
+ {AUDIO_DEVICE_IN_BUS, "BUS"},
{AUDIO_DEVICE_NONE, "NONE"}, // must be last
};
String8 result;
@@ -690,8 +709,6 @@
status_t AudioFlinger::ThreadBase::setParameters(const String8& keyValuePairs)
{
- status_t status;
-
ALOGV("ThreadBase::setParameters() %s", keyValuePairs.string());
Mutex::Autolock _l(mLock);
@@ -710,7 +727,7 @@
return status;
}
mConfigEvents.add(event);
- ALOGV("sendConfigEvent_l() num events %d event %d", mConfigEvents.size(), event->mType);
+ ALOGV("sendConfigEvent_l() num events %zu event %d", mConfigEvents.size(), event->mType);
mWaitWorkCV.signal();
mLock.unlock();
{
@@ -802,7 +819,7 @@
bool configChanged = false;
while (!mConfigEvents.isEmpty()) {
- ALOGV("processConfigEvents_l() remaining events %d", mConfigEvents.size());
+ ALOGV("processConfigEvents_l() remaining events %zu", mConfigEvents.size());
sp<ConfigEvent> event = mConfigEvents[0];
mConfigEvents.removeAt(0);
switch (event->mType) {
@@ -901,7 +918,7 @@
}
const int len = s.length();
if (len > 2) {
- char *str = s.lockBuffer(len); // needed?
+ (void) s.lockBuffer(len); // needed?
s.unlockBuffer(len - 2); // remove trailing ", "
}
return s;
@@ -934,7 +951,7 @@
dprintf(fd, " Sample rate: %u Hz\n", mSampleRate);
dprintf(fd, " HAL frame count: %zu\n", mFrameCount);
dprintf(fd, " HAL format: 0x%x (%s)\n", mHALFormat, formatToString(mHALFormat));
- dprintf(fd, " HAL buffer size: %u bytes\n", mBufferSize);
+ dprintf(fd, " HAL buffer size: %zu bytes\n", mBufferSize);
dprintf(fd, " Channel count: %u\n", mChannelCount);
dprintf(fd, " Channel mask: 0x%08x (%s)\n", mChannelMask,
channelMaskToString(mChannelMask, mType != RECORD).string());
@@ -1096,7 +1113,7 @@
status_t status;
status = mPowerManager->updateWakeLockUids(mWakeLockToken, uids.size(), uids.array(),
true /* FIXME force oneway contrary to .aidl */);
- ALOGV("acquireWakeLock_l() %s status %d", mThreadName, status);
+ ALOGV("updateWakeLockUids_l() %s status %d", mThreadName, status);
}
}
@@ -1117,14 +1134,14 @@
}
void AudioFlinger::ThreadBase::setEffectSuspended(
- const effect_uuid_t *type, bool suspend, int sessionId)
+ const effect_uuid_t *type, bool suspend, audio_session_t sessionId)
{
Mutex::Autolock _l(mLock);
setEffectSuspended_l(type, suspend, sessionId);
}
void AudioFlinger::ThreadBase::setEffectSuspended_l(
- const effect_uuid_t *type, bool suspend, int sessionId)
+ const effect_uuid_t *type, bool suspend, audio_session_t sessionId)
{
sp<EffectChain> chain = getEffectChain_l(sessionId);
if (chain != 0) {
@@ -1164,7 +1181,7 @@
void AudioFlinger::ThreadBase::updateSuspendedSessions_l(const effect_uuid_t *type,
bool suspend,
- int sessionId)
+ audio_session_t sessionId)
{
ssize_t index = mSuspendedSessions.indexOfKey(sessionId);
@@ -1225,7 +1242,7 @@
void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
bool enabled,
- int sessionId)
+ audio_session_t sessionId)
{
Mutex::Autolock _l(mLock);
checkSuspendOnEffectEnabled_l(effect, enabled, sessionId);
@@ -1233,7 +1250,7 @@
void AudioFlinger::ThreadBase::checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect,
bool enabled,
- int sessionId)
+ audio_session_t sessionId)
{
if (mType != RECORD) {
// suspend all effects in AUDIO_SESSION_OUTPUT_MIX when enabling any effect on
@@ -1257,7 +1274,7 @@
const sp<AudioFlinger::Client>& client,
const sp<IEffectClient>& effectClient,
int32_t priority,
- int sessionId,
+ audio_session_t sessionId,
effect_descriptor_t *desc,
int *enabled,
status_t *status)
@@ -1340,7 +1357,7 @@
ALOGV("createEffect_l() got effect %p on chain %p", effect.get(), chain.get());
if (effect == 0) {
- int id = mAudioFlinger->nextUniqueId();
+ audio_unique_id_t id = mAudioFlinger->nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
// Check CPU and memory usage
lStatus = AudioSystem::registerEffect(desc, mId, chain->strategy(), sessionId, id);
if (lStatus != NO_ERROR) {
@@ -1396,13 +1413,15 @@
return handle;
}
-sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(int sessionId, int effectId)
+sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect(audio_session_t sessionId,
+ int effectId)
{
Mutex::Autolock _l(mLock);
return getEffect_l(sessionId, effectId);
}
-sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(int sessionId, int effectId)
+sp<AudioFlinger::EffectModule> AudioFlinger::ThreadBase::getEffect_l(audio_session_t sessionId,
+ int effectId)
{
sp<EffectChain> chain = getEffectChain_l(sessionId);
return chain != 0 ? chain->getEffectFromId_l(effectId) : 0;
@@ -1413,7 +1432,7 @@
status_t AudioFlinger::ThreadBase::addEffect_l(const sp<EffectModule>& effect)
{
// check for existing effect chain with the requested audio session
- int sessionId = effect->sessionId();
+ audio_session_t sessionId = effect->sessionId();
sp<EffectChain> chain = getEffectChain_l(sessionId);
bool chainCreated = false;
@@ -1490,13 +1509,14 @@
}
}
-sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(int sessionId)
+sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain(audio_session_t sessionId)
{
Mutex::Autolock _l(mLock);
return getEffectChain_l(sessionId);
}
-sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(int sessionId) const
+sp<AudioFlinger::EffectChain> AudioFlinger::ThreadBase::getEffectChain_l(audio_session_t sessionId)
+ const
{
size_t size = mEffectChains.size();
for (size_t i = 0; i < size; i++) {
@@ -1551,7 +1571,8 @@
audio_io_handle_t id,
audio_devices_t device,
type_t type,
- bool systemReady)
+ bool systemReady,
+ uint32_t bitRate)
: ThreadBase(audioFlinger, id, device, AUDIO_DEVICE_NONE, type, systemReady),
mNormalFrameCount(0), mSinkBuffer(NULL),
mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
@@ -1614,6 +1635,13 @@
mStreamTypes[stream].volume = mAudioFlinger->streamVolume_l(stream);
mStreamTypes[stream].mute = mAudioFlinger->streamMute_l(stream);
}
+
+ if (audio_has_proportional_frames(mFormat)) {
+ mBufferDurationUs = (uint32_t)((mNormalFrameCount * 1000000LL) / mSampleRate);
+ } else {
+ bitRate = bitRate != 0 ? bitRate : kOffloadDefaultBitRateBps;
+ mBufferDurationUs = (uint32_t)((mBufferSize * 8 * 1000000LL) / bitRate);
+ }
}
AudioFlinger::PlaybackThread::~PlaybackThread()
@@ -1659,10 +1687,10 @@
size_t numtracks = mTracks.size();
size_t numactive = mActiveTracks.size();
- dprintf(fd, " %d Tracks", numtracks);
+ dprintf(fd, " %zu Tracks", numtracks);
size_t numactiveseen = 0;
if (numtracks) {
- dprintf(fd, " of which %d are active\n", numactive);
+ dprintf(fd, " of which %zu are active\n", numactive);
Track::appendDumpHeader(result);
for (size_t i = 0; i < numtracks; ++i) {
sp<Track> track = mTracks[i];
@@ -1703,7 +1731,8 @@
dumpBase(fd, args);
dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
- dprintf(fd, " Last write occurred (msecs): %llu\n", ns2ms(systemTime() - mLastWriteTime));
+ dprintf(fd, " Last write occurred (msecs): %llu\n",
+ (unsigned long long) ns2ms(systemTime() - mLastWriteTime));
dprintf(fd, " Total writes: %d\n", mNumWrites);
dprintf(fd, " Delayed writes: %d\n", mNumDelayedWrites);
dprintf(fd, " Blocked in write: %s\n", mInWrite ? "yes" : "no");
@@ -1744,7 +1773,7 @@
audio_channel_mask_t channelMask,
size_t *pFrameCount,
const sp<IMemory>& sharedBuffer,
- int sessionId,
+ audio_session_t sessionId,
IAudioFlinger::track_flags_t *flags,
pid_t tid,
int uid,
@@ -1757,20 +1786,6 @@
// client expresses a preference for FAST, but we get the final say
if (*flags & IAudioFlinger::TRACK_FAST) {
if (
- // either of these use cases:
- (
- // use case 1: shared buffer with any frame count
- (
- (sharedBuffer != 0)
- ) ||
- // use case 2: frame count is default or at least as large as HAL
- (
- // we formerly checked for a callback handler (non-0 tid),
- // but that is no longer required for TRANSFER_OBTAIN mode
- ((frameCount == 0) ||
- (frameCount >= mFrameCount))
- )
- ) &&
// PCM data
audio_is_linear_pcm(format) &&
// TODO: extract as a data library function that checks that a computationally
@@ -1788,20 +1803,20 @@
// FIXME test that MixerThread for this fast track has a capable output HAL
// FIXME add a permission test also?
) {
- // if frameCount not specified, then it defaults to fast mixer (HAL) frame count
- if (frameCount == 0) {
+ // static tracks can have any nonzero framecount, streaming tracks check against minimum.
+ if (sharedBuffer == 0) {
// read the fast track multiplier property the first time it is needed
int ok = pthread_once(&sFastTrackMultiplierOnce, sFastTrackMultiplierInit);
if (ok != 0) {
ALOGE("%s pthread_once failed: %d", __func__, ok);
}
- frameCount = mFrameCount * sFastTrackMultiplier;
+ frameCount = max(frameCount, mFrameCount * sFastTrackMultiplier); // incl framecount 0
}
- ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%d mFrameCount=%d",
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
frameCount, mFrameCount);
} else {
- ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%d "
- "mFrameCount=%d format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
+ ALOGV("AUDIO_OUTPUT_FLAG_FAST denied: sharedBuffer=%p frameCount=%zu "
+ "mFrameCount=%zu format=%#x mFormat=%#x isLinear=%d channelMask=%#x "
"sampleRate=%u mSampleRate=%u "
"hasFastMixer=%d tid=%d fastTrackAvailMask=%#x",
sharedBuffer.get(), frameCount, mFrameCount, format, mFormat,
@@ -2006,8 +2021,6 @@
{
status_t status = ALREADY_EXISTS;
- // set retry count for buffer fill
- track->mRetryCount = kMaxTrackStartupRetries;
if (mActiveTracks.indexOf(track) < 0) {
// the track is newly added, make sure it fills up all its
// buffers before playing. This is to ensure the client will
@@ -2016,14 +2029,14 @@
TrackBase::track_state state = track->mState;
mLock.unlock();
status = AudioSystem::startOutput(mId, track->streamType(),
- (audio_session_t)track->sessionId());
+ track->sessionId());
mLock.lock();
// abort track was stopped/paused while we released the lock
if (state != track->mState) {
if (status == NO_ERROR) {
mLock.unlock();
AudioSystem::stopOutput(mId, track->streamType(),
- (audio_session_t)track->sessionId());
+ track->sessionId());
mLock.lock();
}
return INVALID_OPERATION;
@@ -2038,6 +2051,13 @@
#endif
}
+ // set retry count for buffer fill
+ if (track->isOffloaded()) {
+ track->mRetryCount = kMaxTrackStartupRetriesOffload;
+ } else {
+ track->mRetryCount = kMaxTrackStartupRetries;
+ }
+
track->mFillingUpStatus = track->sharedBuffer() != 0 ? Track::FS_FILLED : Track::FS_FILLING;
track->mResetDone = false;
track->mPresentationCompleteFrames = 0;
@@ -2228,7 +2248,7 @@
mBufferSize = mOutput->stream->common.get_buffer_size(&mOutput->stream->common);
mFrameCount = mBufferSize / mFrameSize;
if (mFrameCount & 15) {
- ALOGW("HAL output buffer size is %u frames but AudioMixer requires multiples of 16 frames",
+ ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
mFrameCount);
}
@@ -2314,7 +2334,7 @@
if (mType == MIXER || mType == DUPLICATING) {
mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
}
- ALOGI("HAL output buffer size %u frames, normal sink buffer size %u frames", mFrameCount,
+ ALOGI("HAL output buffer size %zu frames, normal sink buffer size %zu frames", mFrameCount,
mNormalFrameCount);
// Check if we want to throttle the processing to no more than 2x normal rate
@@ -2391,7 +2411,7 @@
}
}
-uint32_t AudioFlinger::PlaybackThread::hasAudioSession(int sessionId) const
+uint32_t AudioFlinger::PlaybackThread::hasAudioSession(audio_session_t sessionId) const
{
Mutex::Autolock _l(mLock);
uint32_t result = 0;
@@ -2410,7 +2430,7 @@
return result;
}
-uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(int sessionId)
+uint32_t AudioFlinger::PlaybackThread::getStrategyForSession_l(audio_session_t sessionId)
{
// session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
// it is moved to correct output by audio policy manager when A2DP is connected or disconnected
@@ -2493,14 +2513,14 @@
const sp<Track>& track = tracksToRemove.itemAt(i);
if (track->isExternalTrack()) {
AudioSystem::stopOutput(mId, track->streamType(),
- (audio_session_t)track->sessionId());
+ track->sessionId());
#ifdef ADD_BATTERY_DATA
// to track the speaker usage
addBatteryData(IMediaPlayerService::kBatteryDataAudioFlingerStop);
#endif
if (track->isTerminated()) {
AudioSystem::releaseOutput(mId, track->streamType(),
- (audio_session_t)track->sessionId());
+ track->sessionId());
}
}
}
@@ -2570,6 +2590,7 @@
// FIXME We should have an implementation of timestamps for direct output threads.
// They are used e.g for multichannel PCM playback over HDMI.
bytesWritten = mOutput->write((char *)mSinkBuffer + offset, mBytesRemaining);
+
if (mUseAsyncWrite &&
((bytesWritten < 0) || (bytesWritten == (ssize_t)mBytesRemaining))) {
// do not wait for async callback in case of error of full write
@@ -2650,7 +2671,7 @@
void AudioFlinger::PlaybackThread::invalidateTracks(audio_stream_type_t streamType)
{
- ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %d",
+ ALOGV("MixerThread::invalidateTracks() mixer %p, streamType %d, mTracks.size %zu",
this, streamType, mTracks.size());
Mutex::Autolock _l(mLock);
@@ -2665,13 +2686,13 @@
status_t AudioFlinger::PlaybackThread::addEffectChain_l(const sp<EffectChain>& chain)
{
- int session = chain->sessionId();
+ audio_session_t session = chain->sessionId();
int16_t* buffer = reinterpret_cast<int16_t*>(mEffectBufferEnabled
? mEffectBuffer : mSinkBuffer);
bool ownsBuffer = false;
ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
- if (session > 0) {
+ if (session > AUDIO_SESSION_OUTPUT_MIX) {
// Only one effect chain can be present in direct output thread and it uses
// the sink buffer as input
if (mType != DIRECT) {
@@ -2710,15 +2731,18 @@
chain->setOutBuffer(reinterpret_cast<int16_t*>(mEffectBufferEnabled
? mEffectBuffer : mSinkBuffer));
// Effect chain for session AUDIO_SESSION_OUTPUT_STAGE is inserted at end of effect
- // chains list in order to be processed last as it contains output stage effects
+ // chains list in order to be processed last as it contains output stage effects.
// Effect chain for session AUDIO_SESSION_OUTPUT_MIX is inserted before
// session AUDIO_SESSION_OUTPUT_STAGE to be processed
- // after track specific effects and before output stage
+ // after track specific effects and before output stage.
// It is therefore mandatory that AUDIO_SESSION_OUTPUT_MIX == 0 and
- // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX
+ // that AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX.
// Effect chain for other sessions are inserted at beginning of effect
// chains list to be processed before output mix effects. Relative order between other
- // sessions is not important
+ // sessions is not important.
+ static_assert(AUDIO_SESSION_OUTPUT_MIX == 0 &&
+ AUDIO_SESSION_OUTPUT_STAGE < AUDIO_SESSION_OUTPUT_MIX,
+ "audio_session_t constants misdefined");
size_t size = mEffectChains.size();
size_t i = 0;
for (i = 0; i < size; i++) {
@@ -2734,7 +2758,7 @@
size_t AudioFlinger::PlaybackThread::removeEffectChain_l(const sp<EffectChain>& chain)
{
- int session = chain->sessionId();
+ audio_session_t session = chain->sessionId();
ALOGV("removeEffectChain_l() %p from thread %p for session %d", chain.get(), this, session);
@@ -3091,7 +3115,7 @@
if ((now - lastWarning) > kWarningThrottleNs) {
ATRACE_NAME("underrun");
ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
- ns2ms(delta), mNumDelayedWrites, this);
+ (unsigned long long) ns2ms(delta), mNumDelayedWrites, this);
lastWarning = now;
}
}
@@ -3135,7 +3159,30 @@
} else {
ATRACE_BEGIN("sleep");
- usleep(mSleepTimeUs);
+ if ((mType == OFFLOAD) && !audio_has_proportional_frames(mFormat)) {
+ Mutex::Autolock _l(mLock);
+ if (!mSignalPending && !exitPending()) {
+ // Do not sleep more than one buffer duration since last write and not
+ // less than kDirectMinSleepTimeUs
+ // Wake up if a command is received
+ nsecs_t now = systemTime();
+ uint32_t deltaUs = (uint32_t)((now - mLastWriteTime) / 1000);
+ uint32_t timeoutUs = mSleepTimeUs;
+ if (timeoutUs + deltaUs > mBufferDurationUs) {
+ if (mBufferDurationUs > deltaUs) {
+ timeoutUs = mBufferDurationUs - deltaUs;
+ if (timeoutUs < kDirectMinSleepTimeUs) {
+ timeoutUs = kDirectMinSleepTimeUs;
+ }
+ } else {
+ timeoutUs = kDirectMinSleepTimeUs;
+ }
+ }
+ mWaitWorkCV.waitRelative(mLock, microseconds((nsecs_t)timeoutUs));
+ }
+ } else {
+ usleep(mSleepTimeUs);
+ }
ATRACE_END();
}
}
@@ -3224,31 +3271,9 @@
status_t AudioFlinger::MixerThread::createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
- // if !&IDLE, holds the FastMixer state to restore after new parameters processed
- FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
- if (mFastMixer != 0) {
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- if (!(state->mCommand & FastMixerState::IDLE)) {
- previousCommand = state->mCommand;
- state->mCommand = FastMixerState::HOT_IDLE;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
- } else {
- sq->end(false /*didModify*/);
- }
- }
- status_t status = PlaybackThread::createAudioPatch_l(patch, handle);
+ AutoPark<FastMixer> park(mFastMixer);
- if (!(previousCommand & FastMixerState::IDLE)) {
- ALOG_ASSERT(mFastMixer != 0);
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
- state->mCommand = previousCommand;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
- }
+ status_t status = PlaybackThread::createAudioPatch_l(patch, handle);
return status;
}
@@ -3331,33 +3356,10 @@
status_t AudioFlinger::MixerThread::releaseAudioPatch_l(const audio_patch_handle_t handle)
{
- // if !&IDLE, holds the FastMixer state to restore after new parameters processed
- FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
- if (mFastMixer != 0) {
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- if (!(state->mCommand & FastMixerState::IDLE)) {
- previousCommand = state->mCommand;
- state->mCommand = FastMixerState::HOT_IDLE;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
- } else {
- sq->end(false /*didModify*/);
- }
- }
+ AutoPark<FastMixer> park(mFastMixer);
status_t status = PlaybackThread::releaseAudioPatch_l(handle);
- if (!(previousCommand & FastMixerState::IDLE)) {
- ALOG_ASSERT(mFastMixer != 0);
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
- state->mCommand = previousCommand;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
- }
-
return status;
}
@@ -3413,8 +3415,8 @@
// mNormalSink below
{
ALOGV("MixerThread() id=%d device=%#x type=%d", id, device, type);
- ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%d, mFrameSize=%u, "
- "mFrameCount=%d, mNormalFrameCount=%d",
+ ALOGV("mSampleRate=%u, mChannelMask=%#x, mChannelCount=%u, mFormat=%d, mFrameSize=%zu, "
+ "mFrameCount=%zu, mNormalFrameCount=%zu",
mSampleRate, mChannelMask, mChannelCount, mFormat, mFrameSize, mFrameCount,
mNormalFrameCount);
mAudioMixer = new AudioMixer(mNormalFrameCount, mSampleRate);
@@ -3429,7 +3431,12 @@
mOutputSink = new AudioStreamOutSink(output->stream);
size_t numCounterOffers = 0;
const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
- ssize_t index = mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
+#if !LOG_NDEBUG
+ ssize_t index =
+#else
+ (void)
+#endif
+ mOutputSink->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
// initialize fast mixer depending on configuration
@@ -3464,7 +3471,9 @@
// create a MonoPipe to connect our submix to FastMixer
NBAIO_Format format = mOutputSink->format();
+#ifdef TEE_SINK
NBAIO_Format origformat = format;
+#endif
// adjust format to match that of the Fast Mixer
ALOGV("format changed from %d to %d", format.mFormat, fastMixerFormat);
format.mFormat = fastMixerFormat;
@@ -3476,7 +3485,12 @@
MonoPipe *monoPipe = new MonoPipe(mNormalFrameCount * 4, format, true /*writeCanBlock*/);
const NBAIO_Format offers[1] = {format};
size_t numCounterOffers = 0;
- ssize_t index = monoPipe->negotiate(offers, 1, NULL, numCounterOffers);
+#if !LOG_NDEBUG
+ ssize_t index =
+#else
+ (void)
+#endif
+ monoPipe->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
monoPipe->setAvgFrames((mScreenState & 1) ?
(monoPipe->maxFrames() * 7) / 8 : mNormalFrameCount * 2);
@@ -3907,7 +3921,7 @@
}
// indicate to client process that the track was disabled because of underrun;
// it will then automatically call start() when data is available
- android_atomic_or(CBLK_DISABLED, &track->mCblk->mFlags);
+ track->disable();
// remove from active list, but state remains ACTIVE [confusing but true]
isActive = false;
break;
@@ -4268,7 +4282,7 @@
tracksToRemove->add(track);
// indicate to client process that the track was disabled because of underrun;
// it will then automatically call start() when data is available
- android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
+ track->disable();
// If one track is not ready, mark the mixer also not ready if:
// - the mixer was ready during previous round OR
// - no other track is ready
@@ -4281,7 +4295,6 @@
}
} // local variable scope to avoid goto warning
-track_is_ready: ;
}
@@ -4373,7 +4386,7 @@
// getTrackName_l() must be called with ThreadBase::mLock held
int AudioFlinger::MixerThread::getTrackName_l(audio_channel_mask_t channelMask,
- audio_format_t format, int sessionId)
+ audio_format_t format, audio_session_t sessionId)
{
return mAudioMixer->getTrackName(channelMask, format, sessionId);
}
@@ -4394,20 +4407,7 @@
status = NO_ERROR;
- // if !&IDLE, holds the FastMixer state to restore after new parameters processed
- FastMixerState::Command previousCommand = FastMixerState::HOT_IDLE;
- if (mFastMixer != 0) {
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- if (!(state->mCommand & FastMixerState::IDLE)) {
- previousCommand = state->mCommand;
- state->mCommand = FastMixerState::HOT_IDLE;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_ACKED);
- } else {
- sq->end(false /*didModify*/);
- }
- }
+ AutoPark<FastMixer> park(mFastMixer);
AudioParameter param = AudioParameter(keyValuePair);
int value;
@@ -4502,26 +4502,12 @@
}
}
- if (!(previousCommand & FastMixerState::IDLE)) {
- ALOG_ASSERT(mFastMixer != 0);
- FastMixerStateQueue *sq = mFastMixer->sq();
- FastMixerState *state = sq->begin();
- ALOG_ASSERT(state->mCommand == FastMixerState::HOT_IDLE);
- state->mCommand = previousCommand;
- sq->end();
- sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
- }
-
return reconfig || a2dpDeviceChanged;
}
void AudioFlinger::MixerThread::dumpInternals(int fd, const Vector<String16>& args)
{
- const size_t SIZE = 256;
- char buffer[SIZE];
- String8 result;
-
PlaybackThread::dumpInternals(fd, args);
dprintf(fd, " Thread throttle time (msecs): %u\n", mThreadThrottleTimeMs);
dprintf(fd, " AudioMixer tracks: 0x%08x\n", mAudioMixer->trackNames());
@@ -4581,16 +4567,17 @@
// ----------------------------------------------------------------------------
AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device, bool systemReady)
- : PlaybackThread(audioFlinger, output, id, device, DIRECT, systemReady)
+ AudioStreamOut* output, audio_io_handle_t id, audio_devices_t device, bool systemReady,
+ uint32_t bitRate)
+ : PlaybackThread(audioFlinger, output, id, device, DIRECT, systemReady, bitRate)
// mLeftVolFloat, mRightVolFloat
{
}
AudioFlinger::DirectOutputThread::DirectOutputThread(const sp<AudioFlinger>& audioFlinger,
AudioStreamOut* output, audio_io_handle_t id, uint32_t device,
- ThreadBase::type_t type, bool systemReady)
- : PlaybackThread(audioFlinger, output, id, device, type, systemReady)
+ ThreadBase::type_t type, bool systemReady, uint32_t bitRate)
+ : PlaybackThread(audioFlinger, output, id, device, type, systemReady, bitRate)
// mLeftVolFloat, mRightVolFloat
{
}
@@ -4601,7 +4588,6 @@
void AudioFlinger::DirectOutputThread::processVolume_l(Track *track, bool lastTrack)
{
- audio_track_cblk_t* cblk = track->cblk();
float left, right;
if (mMasterMute || mStreamTypes[track->streamType()].mute) {
@@ -4690,7 +4676,9 @@
}
Track* const track = t.get();
+#ifdef VERY_VERY_VERBOSE_LOGGING
audio_track_cblk_t* cblk = track->cblk();
+#endif
// Only consider last track started for volume and mixer state control.
// In theory an older track could underrun and restart after the new one starts
// but as we only care about the transition phase between two tracks on a
@@ -4814,7 +4802,7 @@
tracksToRemove->add(track);
// indicate to client process that the track was disabled because of underrun;
// it will then automatically call start() when data is available
- android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
+ track->disable();
} else if (last) {
ALOGW("pause because of UNDERRUN, framesReady = %zu,"
"minFrames = %u, mFormat = %#x",
@@ -4869,7 +4857,10 @@
buffer.frameCount = frameCount;
status_t status = mActiveTrack->getNextBuffer(&buffer);
if (status != NO_ERROR || buffer.raw == NULL) {
- memset(curBuf, 0, frameCount * mFrameSize);
+ // no need to pad with 0 for compressed audio
+ if (audio_has_proportional_frames(mFormat)) {
+ memset(curBuf, 0, frameCount * mFrameSize);
+ }
break;
}
memcpy(curBuf, buffer.raw, buffer.frameCount * mFrameSize);
@@ -4892,7 +4883,14 @@
}
if (mSleepTimeUs == 0) {
if (mMixerStatus == MIXER_TRACKS_ENABLED) {
- mSleepTimeUs = mActiveSleepTimeUs;
+ // For compressed offload, use faster sleep time when underruning until more than an
+ // entire buffer was written to the audio HAL
+ if (!audio_has_proportional_frames(mFormat) &&
+ (mType == OFFLOAD) && (mBytesWritten < (int64_t) mBufferSize)) {
+ mSleepTimeUs = kDirectMinSleepTimeUs;
+ } else {
+ mSleepTimeUs = mActiveSleepTimeUs;
+ }
} else {
mSleepTimeUs = mIdleSleepTimeUs;
}
@@ -4925,6 +4923,10 @@
bool trackPaused = false;
bool trackStopped = false;
+ if ((mType == DIRECT) && audio_is_linear_pcm(mFormat) && !usesHwAvSync()) {
+ return !mStandby;
+ }
+
// do not put the HAL in standby when paused. AwesomePlayer clear the offloaded AudioTrack
// after a timeout and we will enter standby then.
if (mTracks.size() > 0) {
@@ -4938,7 +4940,7 @@
// getTrackName_l() must be called with ThreadBase::mLock held
int AudioFlinger::DirectOutputThread::getTrackName_l(audio_channel_mask_t channelMask __unused,
- audio_format_t format __unused, int sessionId __unused)
+ audio_format_t format __unused, audio_session_t sessionId __unused)
{
return 0;
}
@@ -5006,7 +5008,7 @@
if (audio_has_proportional_frames(mFormat)) {
time = PlaybackThread::activeSleepTimeUs();
} else {
- time = 10000;
+ time = kDirectMinSleepTimeUs;
}
return time;
}
@@ -5017,7 +5019,7 @@
if (audio_has_proportional_frames(mFormat)) {
time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000) / 2;
} else {
- time = 10000;
+ time = kDirectMinSleepTimeUs;
}
return time;
}
@@ -5028,7 +5030,7 @@
if (audio_has_proportional_frames(mFormat)) {
time = (uint32_t)(((mFrameCount * 1000) / mSampleRate) * 1000);
} else {
- time = 10000;
+ time = kDirectMinSleepTimeUs;
}
return time;
}
@@ -5160,8 +5162,9 @@
// ----------------------------------------------------------------------------
AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
- AudioStreamOut* output, audio_io_handle_t id, uint32_t device, bool systemReady)
- : DirectOutputThread(audioFlinger, output, id, device, OFFLOAD, systemReady),
+ AudioStreamOut* output, audio_io_handle_t id, uint32_t device, bool systemReady,
+ uint32_t bitRate)
+ : DirectOutputThread(audioFlinger, output, id, device, OFFLOAD, systemReady, bitRate),
mPausedBytesRemaining(0)
{
//FIXME: mStandby should be set to true by ThreadBase constructor
@@ -5194,7 +5197,7 @@
bool doHwPause = false;
bool doHwResume = false;
- ALOGV("OffloadThread::prepareTracks_l active tracks %d", count);
+ ALOGV("OffloadThread::prepareTracks_l active tracks %zu", count);
// find out which tracks need to be processed
for (size_t i = 0; i < count; i++) {
@@ -5204,7 +5207,9 @@
continue;
}
Track* const track = t.get();
+#ifdef VERY_VERY_VERBOSE_LOGGING
audio_track_cblk_t* cblk = track->cblk();
+#endif
// Only consider last track started for volume and mixer state control.
// In theory an older track could underrun and restart after the new one starts
// but as we only care about the transition phase between two tracks on a
@@ -5242,6 +5247,7 @@
}
tracksToRemove->add(track);
} else if (track->isFlushPending()) {
+ track->mRetryCount = kMaxTrackRetriesOffload;
track->flushAck();
if (last) {
mFlushPending = true;
@@ -5356,7 +5362,7 @@
tracksToRemove->add(track);
// indicate to client process that the track was disabled because of underrun;
// it will then automatically call start() when data is available
- android_atomic_or(CBLK_DISABLED, &cblk->mFlags);
+ track->disable();
} else if (last){
mixerStatus = MIXER_TRACKS_ENABLED;
}
@@ -5422,6 +5428,20 @@
}
}
+uint32_t AudioFlinger::OffloadThread::activeSleepTimeUs() const
+{
+ uint32_t time;
+ if (audio_has_proportional_frames(mFormat)) {
+ time = PlaybackThread::activeSleepTimeUs();
+ } else {
+ // sleep time is half the duration of an audio HAL buffer.
+ // Note: This can be problematic in case of underrun with variable bit rate and
+ // current rate is much less than initial rate.
+ time = (uint32_t)max(kDirectMinSleepTimeUs, mBufferDurationUs / 2);
+ }
+ return time;
+}
+
// ----------------------------------------------------------------------------
AudioFlinger::DuplicatingThread::DuplicatingThread(const sp<AudioFlinger>& audioFlinger,
@@ -5641,7 +5661,12 @@
mInputSource = new AudioStreamInSource(input->stream);
size_t numCounterOffers = 0;
const NBAIO_Format offers[1] = {Format_from_SR_C(mSampleRate, mChannelCount, mFormat)};
- ssize_t index = mInputSource->negotiate(offers, 1, NULL, numCounterOffers);
+#if !LOG_NDEBUG
+ ssize_t index =
+#else
+ (void)
+#endif
+ mInputSource->negotiate(offers, 1, NULL, numCounterOffers);
ALOG_ASSERT(index == 0);
// initialize fast capture depending on configuration
@@ -5719,7 +5744,7 @@
// start the fast capture
mFastCapture->run("FastCapture", ANDROID_PRIORITY_URGENT_AUDIO);
pid_t tid = mFastCapture->getTid();
- sendPrioConfigEvent(getpid_cached, tid, kPriorityFastMixer);
+ sendPrioConfigEvent(getpid_cached, tid, kPriorityFastCapture);
#ifdef AUDIO_WATCHDOG
// FIXME
#endif
@@ -5991,8 +6016,10 @@
}
// otherwise use the HAL / AudioStreamIn directly
} else {
+ ATRACE_BEGIN("read");
ssize_t bytesRead = mInput->stream->read(mInput->stream,
(uint8_t*)mRsmpInBuffer + rear * mFrameSize, mBufferSize);
+ ATRACE_END();
if (bytesRead < 0) {
framesRead = bytesRead;
} else {
@@ -6023,7 +6050,7 @@
// ALOGD("%s", mTimestamp.toString().c_str());
if (framesRead < 0 || (framesRead == 0 && mPipeSource == 0)) {
- ALOGE("read failed: framesRead=%d", framesRead);
+ ALOGE("read failed: framesRead=%zd", framesRead);
// Force input into standby so that it tries to recover at next read attempt
inputStandBy();
sleepUs = kRecordThreadSleepUs;
@@ -6120,7 +6147,8 @@
(activeTrack->mFramesToDrop >= 0) ? "timed out" : "cancelled",
activeTrack->sessionId(),
(activeTrack->mSyncStartEvent != 0) ?
- activeTrack->mSyncStartEvent->triggerSession() : 0);
+ activeTrack->mSyncStartEvent->triggerSession() :
+ AUDIO_SESSION_NONE);
activeTrack->clearSyncStartEvent();
}
}
@@ -6226,7 +6254,7 @@
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
- int sessionId,
+ audio_session_t sessionId,
size_t *notificationFrames,
int uid,
IAudioFlinger::track_flags_t *flags,
@@ -6247,21 +6275,21 @@
((frameCount == 0) || (frameCount == mPipeFramesP2)) &&
// PCM data
audio_is_linear_pcm(format) &&
- // native format
+ // hardware format
(format == mFormat) &&
- // native channel mask
+ // hardware channel mask
(channelMask == mChannelMask) &&
- // native hardware sample rate
+ // hardware sample rate
(sampleRate == mSampleRate) &&
// record thread has an associated fast capture
hasFastCapture() &&
// there are sufficient fast track slots available
mFastTrackAvail
) {
- ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%u mFrameCount=%u",
+ ALOGV("AUDIO_INPUT_FLAG_FAST accepted: frameCount=%zu mFrameCount=%zu",
frameCount, mFrameCount);
} else {
- ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%u mFrameCount=%u mPipeFramesP2=%u "
+ ALOGV("AUDIO_INPUT_FLAG_FAST denied: frameCount=%zu mFrameCount=%zu mPipeFramesP2=%zu "
"format=%#x isLinear=%d channelMask=%#x sampleRate=%u mSampleRate=%u "
"hasFastCapture=%d tid=%d mFastTrackAvail=%d",
frameCount, mFrameCount, mPipeFramesP2,
@@ -6345,7 +6373,7 @@
status_t AudioFlinger::RecordThread::start(RecordThread::RecordTrack* recordTrack,
AudioSystem::sync_event_t event,
- int triggerSession)
+ audio_session_t triggerSession)
{
ALOGV("RecordThread::start event %d, triggerSession %d", event, triggerSession);
sp<ThreadBase> strongMe = this;
@@ -6392,7 +6420,7 @@
status_t status = NO_ERROR;
if (recordTrack->isExternalTrack()) {
mLock.unlock();
- status = AudioSystem::startInput(mId, (audio_session_t)recordTrack->sessionId());
+ status = AudioSystem::startInput(mId, recordTrack->sessionId());
mLock.lock();
// FIXME should verify that recordTrack is still in mActiveTracks
if (status != NO_ERROR) {
@@ -6424,7 +6452,7 @@
startError:
if (recordTrack->isExternalTrack()) {
- AudioSystem::stopInput(mId, (audio_session_t)recordTrack->sessionId());
+ AudioSystem::stopInput(mId, recordTrack->sessionId());
}
recordTrack->clearSyncStartEvent();
// FIXME I wonder why we do not reset the state here?
@@ -6478,7 +6506,7 @@
return BAD_VALUE;
}
- int eventSession = event->triggerSession();
+ audio_session_t eventSession = event->triggerSession();
status_t ret = NAME_NOT_FOUND;
Mutex::Autolock _l(mLock);
@@ -6554,9 +6582,9 @@
size_t numtracks = mTracks.size();
size_t numactive = mActiveTracks.size();
size_t numactiveseen = 0;
- dprintf(fd, " %d Tracks", numtracks);
+ dprintf(fd, " %zu Tracks", numtracks);
if (numtracks) {
- dprintf(fd, " of which %d are active\n", numactive);
+ dprintf(fd, " of which %zu are active\n", numactive);
RecordTrack::appendDumpHeader(result);
for (size_t i = 0; i < numtracks ; ++i) {
sp<RecordTrack> track = mTracks[i];
@@ -6949,6 +6977,10 @@
AudioParameter param = AudioParameter(keyValuePair);
int value;
+
+ // scope for AutoPark extends to end of method
+ AutoPark<FastCapture> park(mFastCapture);
+
// TODO Investigate when this code runs. Check with audio policy when a sample rate and
// channel count change can be requested. Do we mandate the first client defines the
// HAL sampling rate and channel count or do we allow changes on the fly?
@@ -7144,7 +7176,7 @@
return mInput->stream->get_input_frames_lost(mInput->stream);
}
-uint32_t AudioFlinger::RecordThread::hasAudioSession(int sessionId) const
+uint32_t AudioFlinger::RecordThread::hasAudioSession(audio_session_t sessionId) const
{
Mutex::Autolock _l(mLock);
uint32_t result = 0;
@@ -7162,13 +7194,13 @@
return result;
}
-KeyedVector<int, bool> AudioFlinger::RecordThread::sessionIds() const
+KeyedVector<audio_session_t, bool> AudioFlinger::RecordThread::sessionIds() const
{
- KeyedVector<int, bool> ids;
+ KeyedVector<audio_session_t, bool> ids;
Mutex::Autolock _l(mLock);
for (size_t j = 0; j < mTracks.size(); ++j) {
sp<RecordThread::RecordTrack> track = mTracks[j];
- int sessionId = track->sessionId();
+ audio_session_t sessionId = track->sessionId();
if (ids.indexOfKey(sessionId) < 0) {
ids.add(sessionId, true);
}
@@ -7220,7 +7252,7 @@
{
ALOGV("removeEffectChain_l() %p from thread %p", chain.get(), this);
ALOGW_IF(mEffectChains.size() != 1,
- "removeEffectChain_l() %p invalid chain size %d on thread %p",
+ "removeEffectChain_l() %p invalid chain size %zu on thread %p",
chain.get(), mEffectChains.size(), this);
if (mEffectChains.size() == 1) {
mEffectChains.removeAt(0);
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 42b3266..761fc71 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -288,7 +288,7 @@
const sp<AudioFlinger::Client>& client,
const sp<IEffectClient>& effectClient,
int32_t priority,
- int sessionId,
+ audio_session_t sessionId,
effect_descriptor_t *desc,
int *enabled,
status_t *status /*non-NULL*/);
@@ -302,9 +302,9 @@
};
// get effect chain corresponding to session Id.
- sp<EffectChain> getEffectChain(int sessionId);
+ sp<EffectChain> getEffectChain(audio_session_t sessionId);
// same as getEffectChain() but must be called with ThreadBase mutex locked
- sp<EffectChain> getEffectChain_l(int sessionId) const;
+ sp<EffectChain> getEffectChain_l(audio_session_t sessionId) const;
// add an effect chain to the chain list (mEffectChains)
virtual status_t addEffectChain_l(const sp<EffectChain>& chain) = 0;
// remove an effect chain from the chain list (mEffectChains)
@@ -321,8 +321,8 @@
// set audio mode to all effect chains
void setMode(audio_mode_t mode);
// get effect module with corresponding ID on specified audio session
- sp<AudioFlinger::EffectModule> getEffect(int sessionId, int effectId);
- sp<AudioFlinger::EffectModule> getEffect_l(int sessionId, int effectId);
+ sp<AudioFlinger::EffectModule> getEffect(audio_session_t sessionId, int effectId);
+ sp<AudioFlinger::EffectModule> getEffect_l(audio_session_t sessionId, int effectId);
// add and effect module. Also creates the effect chain is none exists for
// the effects audio session
status_t addEffect_l(const sp< EffectModule>& effect);
@@ -333,24 +333,27 @@
virtual void detachAuxEffect_l(int effectId __unused) {}
// returns either EFFECT_SESSION if effects on this audio session exist in one
// chain, or TRACK_SESSION if tracks on this audio session exist, or both
- virtual uint32_t hasAudioSession(int sessionId) const = 0;
+ virtual uint32_t hasAudioSession(audio_session_t sessionId) const = 0;
// the value returned by default implementation is not important as the
// strategy is only meaningful for PlaybackThread which implements this method
- virtual uint32_t getStrategyForSession_l(int sessionId __unused) { return 0; }
+ virtual uint32_t getStrategyForSession_l(audio_session_t sessionId __unused)
+ { return 0; }
// suspend or restore effect according to the type of effect passed. a NULL
// type pointer means suspend all effects in the session
void setEffectSuspended(const effect_uuid_t *type,
bool suspend,
- int sessionId = AUDIO_SESSION_OUTPUT_MIX);
+ audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX);
// check if some effects must be suspended/restored when an effect is enabled
// or disabled
void checkSuspendOnEffectEnabled(const sp<EffectModule>& effect,
bool enabled,
- int sessionId = AUDIO_SESSION_OUTPUT_MIX);
+ audio_session_t sessionId =
+ AUDIO_SESSION_OUTPUT_MIX);
void checkSuspendOnEffectEnabled_l(const sp<EffectModule>& effect,
bool enabled,
- int sessionId = AUDIO_SESSION_OUTPUT_MIX);
+ audio_session_t sessionId =
+ AUDIO_SESSION_OUTPUT_MIX);
virtual status_t setSyncEvent(const sp<SyncEvent>& event) = 0;
virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const = 0;
@@ -389,11 +392,11 @@
void getPowerManager_l();
void setEffectSuspended_l(const effect_uuid_t *type,
bool suspend,
- int sessionId);
+ audio_session_t sessionId);
// updated mSuspendedSessions when an effect suspended or restored
void updateSuspendedSessions_l(const effect_uuid_t *type,
bool suspend,
- int sessionId);
+ audio_session_t sessionId);
// check if some effects must be suspended when an effect chain is added
void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain);
@@ -452,9 +455,9 @@
sp<IPowerManager> mPowerManager;
sp<IBinder> mWakeLockToken;
const sp<PMDeathRecipient> mDeathRecipient;
- // list of suspended effects per session and per type. The first vector is
- // keyed by session ID, the second by type UUID timeLow field
- KeyedVector< int, KeyedVector< int, sp<SuspendedSessionDesc> > >
+ // list of suspended effects per session and per type. The first (outer) vector is
+ // keyed by session ID, the second (inner) by type UUID timeLow field
+ KeyedVector< audio_session_t, KeyedVector< int, sp<SuspendedSessionDesc> > >
mSuspendedSessions;
static const size_t kLogSize = 4 * 1024;
sp<NBLog::Writer> mNBLogWriter;
@@ -479,14 +482,9 @@
// suspend by audio policy manager is orthogonal to mixer state
};
- // retry count before removing active track in case of underrun on offloaded thread:
- // we need to make sure that AudioTrack client has enough time to send large buffers
-//FIXME may be more appropriate if expressed in time units. Need to revise how underrun is handled
- // for offloaded tracks
- static const int8_t kMaxTrackRetriesOffload = 20;
-
PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, audio_devices_t device, type_t type, bool systemReady);
+ audio_io_handle_t id, audio_devices_t device, type_t type, bool systemReady,
+ uint32_t bitRate = 0);
virtual ~PlaybackThread();
void dump(int fd, const Vector<String16>& args);
@@ -554,7 +552,7 @@
audio_channel_mask_t channelMask,
size_t *pFrameCount,
const sp<IMemory>& sharedBuffer,
- int sessionId,
+ audio_session_t sessionId,
IAudioFlinger::track_flags_t *flags,
pid_t tid,
int uid,
@@ -594,8 +592,8 @@
virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
- virtual uint32_t hasAudioSession(int sessionId) const;
- virtual uint32_t getStrategyForSession_l(int sessionId);
+ virtual uint32_t hasAudioSession(audio_session_t sessionId) const;
+ virtual uint32_t getStrategyForSession_l(audio_session_t sessionId);
virtual status_t setSyncEvent(const sp<SyncEvent>& event);
@@ -709,7 +707,7 @@
// Allocate a track name for a given channel mask.
// Returns name >= 0 if successful, -1 on failure.
virtual int getTrackName_l(audio_channel_mask_t channelMask,
- audio_format_t format, int sessionId) = 0;
+ audio_format_t format, audio_session_t sessionId) = 0;
virtual void deleteTrackName_l(int name) = 0;
// Time to sleep between cycles when:
@@ -841,6 +839,8 @@
bool mHwSupportsPause;
bool mHwPaused;
bool mFlushPending;
+ uint32_t mBufferDurationUs; // estimated duration of an audio HAL buffer
+ // based on initial bit rate (offload only)
};
class MixerThread : public PlaybackThread {
@@ -862,7 +862,7 @@
protected:
virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
virtual int getTrackName_l(audio_channel_mask_t channelMask,
- audio_format_t format, int sessionId);
+ audio_format_t format, audio_session_t sessionId);
virtual void deleteTrackName_l(int name);
virtual uint32_t idleSleepTimeUs() const;
virtual uint32_t suspendSleepTimeUs() const;
@@ -931,7 +931,8 @@
public:
DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, audio_devices_t device, bool systemReady);
+ audio_io_handle_t id, audio_devices_t device, bool systemReady,
+ uint32_t bitRate = 0);
virtual ~DirectOutputThread();
// Thread virtuals
@@ -942,7 +943,7 @@
protected:
virtual int getTrackName_l(audio_channel_mask_t channelMask,
- audio_format_t format, int sessionId);
+ audio_format_t format, audio_session_t sessionId);
virtual void deleteTrackName_l(int name);
virtual uint32_t activeSleepTimeUs() const;
virtual uint32_t idleSleepTimeUs() const;
@@ -964,7 +965,7 @@
DirectOutputThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
audio_io_handle_t id, uint32_t device, ThreadBase::type_t type,
- bool systemReady);
+ bool systemReady, uint32_t bitRate = 0);
void processVolume_l(Track *track, bool lastTrack);
// prepareTracks_l() tells threadLoop_mix() the name of the single active track
@@ -980,7 +981,8 @@
public:
OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, uint32_t device, bool systemReady);
+ audio_io_handle_t id, uint32_t device,
+ bool systemReady, uint32_t bitRate);
virtual ~OffloadThread() {};
virtual void flushHw_l();
@@ -989,6 +991,8 @@
virtual mixer_state prepareTracks_l(Vector< sp<Track> > *tracksToRemove);
virtual void threadLoop_exit();
+ virtual uint32_t activeSleepTimeUs() const;
+
virtual bool waitingAsyncCallback();
virtual bool waitingAsyncCallback_l();
@@ -1242,7 +1246,7 @@
audio_format_t format,
audio_channel_mask_t channelMask,
size_t *pFrameCount,
- int sessionId,
+ audio_session_t sessionId,
size_t *notificationFrames,
int uid,
IAudioFlinger::track_flags_t *flags,
@@ -1251,7 +1255,7 @@
status_t start(RecordTrack* recordTrack,
AudioSystem::sync_event_t event,
- int triggerSession);
+ audio_session_t triggerSession);
// ask the thread to stop the specified track, and
// return true if the caller should then do it's part of the stopping process
@@ -1279,12 +1283,12 @@
virtual status_t addEffectChain_l(const sp<EffectChain>& chain);
virtual size_t removeEffectChain_l(const sp<EffectChain>& chain);
- virtual uint32_t hasAudioSession(int sessionId) const;
+ virtual uint32_t hasAudioSession(audio_session_t sessionId) const;
// Return the set of unique session IDs across all tracks.
// The keys are the session IDs, and the associated values are meaningless.
// FIXME replace by Set [and implement Bag/Multiset for other uses].
- KeyedVector<int, bool> sessionIds() const;
+ KeyedVector<audio_session_t, bool> sessionIds() const;
virtual status_t setSyncEvent(const sp<SyncEvent>& event);
virtual bool isValidSyncEvent(const sp<SyncEvent>& event) const;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 26067e3..67a5e58 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -61,7 +61,7 @@
audio_channel_mask_t channelMask,
size_t frameCount,
void *buffer,
- int sessionId,
+ audio_session_t sessionId,
int uid,
IAudioFlinger::track_flags_t flags,
bool isOut,
@@ -71,11 +71,11 @@
virtual status_t initCheck() const;
virtual status_t start(AudioSystem::sync_event_t event,
- int triggerSession) = 0;
+ audio_session_t triggerSession) = 0;
virtual void stop() = 0;
sp<IMemory> getCblk() const { return mCblkMemory; }
audio_track_cblk_t* cblk() const { return mCblk; }
- int sessionId() const { return mSessionId; }
+ audio_session_t sessionId() const { return mSessionId; }
int uid() const { return mUid; }
virtual status_t setSyncEvent(const sp<SyncEvent>& event);
@@ -153,7 +153,7 @@
const size_t mFrameCount;// size of track buffer given at createTrack() or
// openRecord(), and then adjusted as needed
- const int mSessionId;
+ const audio_session_t mSessionId;
int mUid;
Vector < sp<SyncEvent> >mSyncEvents;
const IAudioFlinger::track_flags_t mFlags;
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index e684fc2..7cbb6b8 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -71,7 +71,7 @@
audio_channel_mask_t channelMask,
size_t frameCount,
void *buffer,
- int sessionId,
+ audio_session_t sessionId,
int clientUid,
IAudioFlinger::track_flags_t flags,
bool isOut,
@@ -122,7 +122,7 @@
mCblkMemory = client->heap()->allocate(size);
if (mCblkMemory == 0 ||
(mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) {
- ALOGE("not enough memory for AudioTrack size=%u", size);
+ ALOGE("not enough memory for AudioTrack size=%zu", size);
client->heap()->dump("AudioTrack");
mCblkMemory.clear();
return;
@@ -343,7 +343,7 @@
size_t frameCount,
void *buffer,
const sp<IMemory>& sharedBuffer,
- int sessionId,
+ audio_session_t sessionId,
int uid,
IAudioFlinger::track_flags_t flags,
track_type type)
@@ -454,7 +454,7 @@
wasActive = playbackThread->destroyTrack_l(this);
}
if (isExternalTrack() && !wasActive) {
- AudioSystem::releaseOutput(mThreadIoHandle, mStreamType, (audio_session_t)mSessionId);
+ AudioSystem::releaseOutput(mThreadIoHandle, mStreamType, mSessionId);
}
}
}
@@ -617,7 +617,7 @@
return true;
}
- if (framesReady() >= mFrameCount ||
+ if (framesReady() >= mServerProxy->getBufferSizeInFrames() ||
(mCblk->mFlags & CBLK_FORCEREADY)) {
mFillingUpStatus = FS_FILLED;
android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
@@ -627,7 +627,7 @@
}
status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused,
- int triggerSession __unused)
+ audio_session_t triggerSession __unused)
{
status_t status = NO_ERROR;
ALOGV("start(%d), calling pid %d session %d",
@@ -792,11 +792,6 @@
mState = ACTIVE;
}
- if (mState == ACTIVE) {
- ALOGV("flush called in active state, resetting buffer time out retry count");
- mRetryCount = PlaybackThread::kMaxTrackRetriesOffload;
- }
-
mFlushHwPending = true;
mResumeToStopping = false;
} else {
@@ -964,9 +959,9 @@
if (isOffloaded()) {
complete = true;
} else if (isDirect() || isFastTrack()) { // these do not go through linear map
- complete = framesWritten >= mPresentationCompleteFrames;
+ complete = framesWritten >= (int64_t) mPresentationCompleteFrames;
} else { // Normal tracks, OutputTracks, and PatchTracks
- complete = framesWritten >= mPresentationCompleteFrames
+ complete = framesWritten >= (int64_t) mPresentationCompleteFrames
&& mAudioTrackServerProxy->isDrained();
}
@@ -1021,7 +1016,7 @@
if (isTerminated() || mState == PAUSED ||
((framesReady() == 0) && ((mSharedBuffer != 0) ||
(mState == STOPPED)))) {
- ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
+ ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %zu",
mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
event->cancel();
return INVALID_OPERATION;
@@ -1032,13 +1027,23 @@
void AudioFlinger::PlaybackThread::Track::invalidate()
{
+ signalClientFlag(CBLK_INVALID);
+ mIsInvalid = true;
+}
+
+void AudioFlinger::PlaybackThread::Track::disable()
+{
+ signalClientFlag(CBLK_DISABLED);
+}
+
+void AudioFlinger::PlaybackThread::Track::signalClientFlag(int32_t flag)
+{
// FIXME should use proxy, and needs work
audio_track_cblk_t* cblk = mCblk;
- android_atomic_or(CBLK_INVALID, &cblk->mFlags);
+ android_atomic_or(flag, &cblk->mFlags);
android_atomic_release_store(0x40000000, &cblk->mFutex);
// client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
(void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
- mIsInvalid = true;
}
void AudioFlinger::PlaybackThread::Track::signal()
@@ -1123,7 +1128,8 @@
int uid)
: Track(playbackThread, NULL, AUDIO_STREAM_PATCH,
sampleRate, format, channelMask, frameCount,
- NULL, 0, 0, uid, IAudioFlinger::TRACK_DEFAULT, TYPE_OUTPUT),
+ NULL, 0, AUDIO_SESSION_NONE, uid, IAudioFlinger::TRACK_DEFAULT,
+ TYPE_OUTPUT),
mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
{
@@ -1131,7 +1137,7 @@
mOutBuffer.frameCount = 0;
playbackThread->mTracks.add(this);
ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
- "frameCount %u, mChannelMask 0x%08x",
+ "frameCount %zu, mChannelMask 0x%08x",
mCblk, mBuffer,
frameCount, mChannelMask);
// since client and server are in the same process,
@@ -1154,7 +1160,7 @@
}
status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
- int triggerSession)
+ audio_session_t triggerSession)
{
status_t status = Track::start(event, triggerSession);
if (status != NO_ERROR) {
@@ -1204,7 +1210,7 @@
mOutBuffer.frameCount = pInBuffer->frameCount;
nsecs_t startTime = systemTime();
status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
- if (status != NO_ERROR) {
+ if (status != NO_ERROR && status != NOT_ENOUGH_DATA) {
ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this,
mThread.unsafe_get(), status);
outputBufferFull = true;
@@ -1216,6 +1222,10 @@
} else {
waitTimeLeftMs = 0;
}
+ if (status == NOT_ENOUGH_DATA) {
+ restartIfDisabled();
+ continue;
+ }
}
uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
@@ -1225,6 +1235,7 @@
buf.mFrameCount = outFrames;
buf.mRaw = NULL;
mClientProxy->releaseBuffer(&buf);
+ restartIfDisabled();
pInBuffer->frameCount -= outFrames;
pInBuffer->raw = (int8_t *)pInBuffer->raw + outFrames * mFrameSize;
mOutBuffer.frameCount -= outFrames;
@@ -1235,7 +1246,7 @@
mBufferQueue.removeAt(0);
free(pInBuffer->mBuffer);
delete pInBuffer;
- ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
+ ALOGV("OutputTrack::write() %p thread %p released overflow buffer %zu", this,
mThread.unsafe_get(), mBufferQueue.size());
} else {
break;
@@ -1254,7 +1265,7 @@
pInBuffer->raw = pInBuffer->mBuffer;
memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * mFrameSize);
mBufferQueue.add(pInBuffer);
- ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
+ ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %zu", this,
mThread.unsafe_get(), mBufferQueue.size());
} else {
ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
@@ -1298,6 +1309,13 @@
mBufferQueue.clear();
}
+void AudioFlinger::PlaybackThread::OutputTrack::restartIfDisabled()
+{
+ int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+ if (mActive && (flags & CBLK_DISABLED)) {
+ start();
+ }
+}
AudioFlinger::PlaybackThread::PatchTrack::PatchTrack(PlaybackThread *playbackThread,
audio_stream_type_t streamType,
@@ -1309,7 +1327,7 @@
IAudioFlinger::track_flags_t flags)
: Track(playbackThread, NULL, streamType,
sampleRate, format, channelMask, frameCount,
- buffer, 0, 0, getuid(), flags, TYPE_PATCH),
+ buffer, 0, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, true, true))
{
uint64_t mixBufferNs = ((uint64_t)2 * playbackThread->frameCount() * 1000000000) /
@@ -1327,6 +1345,17 @@
{
}
+status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
+ audio_session_t triggerSession)
+{
+ status_t status = Track::start(event, triggerSession);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
+ return status;
+}
+
// AudioBufferProvider interface
status_t AudioFlinger::PlaybackThread::PatchTrack::getNextBuffer(
AudioBufferProvider::Buffer* buffer)
@@ -1357,17 +1386,31 @@
status_t AudioFlinger::PlaybackThread::PatchTrack::obtainBuffer(Proxy::Buffer* buffer,
const struct timespec *timeOut)
{
- return mProxy->obtainBuffer(buffer, timeOut);
+ status_t status = NO_ERROR;
+ static const int32_t kMaxTries = 5;
+ int32_t tryCounter = kMaxTries;
+ do {
+ if (status == NOT_ENOUGH_DATA) {
+ restartIfDisabled();
+ }
+ status = mProxy->obtainBuffer(buffer, timeOut);
+ } while ((status == NOT_ENOUGH_DATA) && (tryCounter-- > 0));
+ return status;
}
void AudioFlinger::PlaybackThread::PatchTrack::releaseBuffer(Proxy::Buffer* buffer)
{
mProxy->releaseBuffer(buffer);
+ restartIfDisabled();
+ android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
+}
+
+void AudioFlinger::PlaybackThread::PatchTrack::restartIfDisabled()
+{
if (android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags) & CBLK_DISABLED) {
ALOGW("PatchTrack::releaseBuffer() disabled due to previous underrun, restarting");
start();
}
- android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
}
// ----------------------------------------------------------------------------
@@ -1387,7 +1430,7 @@
}
status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
- int triggerSession) {
+ audio_session_t triggerSession) {
ALOGV("RecordHandle::start()");
return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
}
@@ -1418,7 +1461,7 @@
audio_channel_mask_t channelMask,
size_t frameCount,
void *buffer,
- int sessionId,
+ audio_session_t sessionId,
int uid,
IAudioFlinger::track_flags_t flags,
track_type type)
@@ -1495,7 +1538,7 @@
}
status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
- int triggerSession)
+ audio_session_t triggerSession)
{
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
@@ -1512,7 +1555,7 @@
if (thread != 0) {
RecordThread *recordThread = (RecordThread *)thread.get();
if (recordThread->stop(this) && isExternalTrack()) {
- AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId);
+ AudioSystem::stopInput(mThreadIoHandle, mSessionId);
}
}
}
@@ -1524,9 +1567,9 @@
{
if (isExternalTrack()) {
if (mState == ACTIVE || mState == RESUMING) {
- AudioSystem::stopInput(mThreadIoHandle, (audio_session_t)mSessionId);
+ AudioSystem::stopInput(mThreadIoHandle, mSessionId);
}
- AudioSystem::releaseInput(mThreadIoHandle, (audio_session_t)mSessionId);
+ AudioSystem::releaseInput(mThreadIoHandle, mSessionId);
}
sp<ThreadBase> thread = mThread.promote();
if (thread != 0) {
@@ -1618,7 +1661,7 @@
void *buffer,
IAudioFlinger::track_flags_t flags)
: RecordTrack(recordThread, NULL, sampleRate, format, channelMask, frameCount,
- buffer, 0, getuid(), flags, TYPE_PATCH),
+ buffer, AUDIO_SESSION_NONE, getuid(), flags, TYPE_PATCH),
mProxy(new ClientProxy(mCblk, mBuffer, frameCount, mFrameSize, false, true))
{
uint64_t mixBufferNs = ((uint64_t)2 * recordThread->frameCount() * 1000000000) /
diff --git a/services/audioflinger/audio-resampler/Android.mk b/services/audioflinger/audio-resampler/Android.mk
index ba37b19..bb2807c 100644
--- a/services/audioflinger/audio-resampler/Android.mk
+++ b/services/audioflinger/audio-resampler/Android.mk
@@ -11,4 +11,6 @@
LOCAL_SHARED_LIBRARIES := libutils liblog
+LOCAL_CFLAGS += -Werror -Wall
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audioflinger/tests/Android.mk b/services/audioflinger/tests/Android.mk
index 6182de0..3505e0f 100644
--- a/services/audioflinger/tests/Android.mk
+++ b/services/audioflinger/tests/Android.mk
@@ -23,6 +23,8 @@
LOCAL_MODULE := resampler_tests
LOCAL_MODULE_TAGS := tests
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_NATIVE_TEST)
#
@@ -61,4 +63,6 @@
LOCAL_CXX_STL := libc++
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_EXECUTABLE)
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index 8218edd..8b45adc 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -45,13 +45,14 @@
libmedia_helper \
libaudiopolicycomponents
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libaudiopolicyservice
LOCAL_CFLAGS += -fvisibility=hidden
include $(BUILD_SHARED_LIBRARY)
-
ifneq ($(USE_LEGACY_AUDIO_POLICY), 1)
include $(CLEAR_VARS)
@@ -101,6 +102,8 @@
LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
endif #ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libaudiopolicymanagerdefault
include $(BUILD_SHARED_LIBRARY)
@@ -122,6 +125,8 @@
$(TOPDIR)frameworks/av/services/audiopolicy/common/include \
$(TOPDIR)frameworks/av/services/audiopolicy/engine/interface
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libaudiopolicymanager
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 40ca899..b1347f4 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -312,7 +312,7 @@
virtual status_t setVoiceVolume(float volume, int delayMs = 0) = 0;
// move effect to the specified output
- virtual status_t moveEffects(int session,
+ virtual status_t moveEffects(audio_session_t session,
audio_io_handle_t srcOutput,
audio_io_handle_t dstOutput) = 0;
@@ -332,14 +332,15 @@
virtual void onAudioPatchListUpdate() = 0;
- virtual audio_unique_id_t newAudioUniqueId() = 0;
+ virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use) = 0;
virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state) = 0;
virtual void onRecordingConfigurationUpdate(int event, audio_session_t session,
audio_source_t source,
const struct audio_config_base *clientConfig,
- const struct audio_config_base *deviceConfig) = 0;
+ const struct audio_config_base *deviceConfig,
+ audio_patch_handle_t patchHandle) = 0;
};
extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface);
diff --git a/services/audiopolicy/common/include/policy.h b/services/audiopolicy/common/include/policy.h
index d9e7212..f73548d 100755
--- a/services/audiopolicy/common/include/policy.h
+++ b/services/audiopolicy/common/include/policy.h
@@ -22,11 +22,14 @@
// For mixed output and inputs, the policy will use max mixer sampling rates.
// Do not limit sampling rate otherwise
-#define MAX_MIXER_SAMPLING_RATE 192000
+#define SAMPLE_RATE_HZ_MAX 192000
+
+// Used when a client opens a capture stream, without specifying a desired sample rate.
+#define SAMPLE_RATE_HZ_DEFAULT 48000
// For mixed output and inputs, the policy will use max mixer channel count.
// Do not limit channel count otherwise
-#define MAX_MIXER_CHANNEL_COUNT 8
+#define MAX_MIXER_CHANNEL_COUNT FCC_8
/**
* A device mask for all audio input devices that are considered "virtual" when evaluating
@@ -39,9 +42,9 @@
* A device mask for all audio input and output devices where matching inputs/outputs on device
* type alone is not enough: the address must match too
*/
-#define APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL (AUDIO_DEVICE_OUT_REMOTE_SUBMIX)
+#define APM_AUDIO_DEVICE_OUT_MATCH_ADDRESS_ALL (AUDIO_DEVICE_OUT_REMOTE_SUBMIX|AUDIO_DEVICE_OUT_BUS)
-#define APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX)
+#define APM_AUDIO_DEVICE_IN_MATCH_ADDRESS_ALL (AUDIO_DEVICE_IN_REMOTE_SUBMIX|AUDIO_DEVICE_IN_BUS)
/**
* Check if the state given correspond to an in call state.
diff --git a/services/audiopolicy/common/managerdefinitions/Android.mk b/services/audiopolicy/common/managerdefinitions/Android.mk
index 5c81410..3b4ae6b 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.mk
+++ b/services/audiopolicy/common/managerdefinitions/Android.mk
@@ -58,6 +58,8 @@
LOCAL_EXPORT_C_INCLUDE_DIRS := \
$(LOCAL_PATH)/include
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE := libaudiopolicycomponents
include $(BUILD_STATIC_LIBRARY)
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 77c0d07..46309ed 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -18,6 +18,7 @@
#include "AudioPort.h"
#include "AudioSession.h"
+#include "AudioSessionInfoProvider.h"
#include <utils/Errors.h>
#include <system/audio.h>
#include <utils/SortedVector.h>
@@ -30,7 +31,7 @@
// descriptor for audio inputs. Used to maintain current configuration of each opened audio input
// and keep track of the usage of this input.
-class AudioInputDescriptor: public AudioPortConfig
+class AudioInputDescriptor: public AudioPortConfig, public AudioSessionInfoProvider
{
public:
AudioInputDescriptor(const sp<IOProfile>& profile);
@@ -44,7 +45,6 @@
audio_io_handle_t mIoHandle; // input handle
audio_devices_t mDevice; // current device this input is routed to
AudioMix *mPolicyMix; // non NULL when used by a dynamic policy
- audio_patch_handle_t mPatchHandle;
const sp<IOProfile> mProfile; // I/O profile this output derives from
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
@@ -65,7 +65,14 @@
sp<AudioSession> getAudioSession(audio_session_t session) const;
AudioSessionCollection getActiveAudioSessions() const;
+ // implementation of AudioSessionInfoProvider
+ virtual audio_config_base_t getConfig() const;
+ virtual audio_patch_handle_t getPatchHandle() const;
+
+ void setPatchHandle(audio_patch_handle_t handle);
+
private:
+ audio_patch_handle_t mPatchHandle;
audio_port_handle_t mId;
// audio sessions attached to this input
AudioSessionCollection mSessions;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index f8439be..dd3f8ae 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -71,9 +71,11 @@
audio_module_handle_t getModuleHandle() const;
+ audio_patch_handle_t getPatchHandle() const { return mPatchHandle; };
+ void setPatchHandle(audio_patch_handle_t handle) { mPatchHandle = handle; };
+
sp<AudioPort> mPort;
audio_devices_t mDevice; // current device this output is routed to
- audio_patch_handle_t mPatchHandle;
uint32_t mRefCount[AUDIO_STREAM_CNT]; // number of streams of each type using this output
nsecs_t mStopTime[AUDIO_STREAM_CNT];
float mCurVolume[AUDIO_STREAM_CNT]; // current stream volume in dB
@@ -83,6 +85,7 @@
AudioPolicyClientInterface *mClientInterface;
protected:
+ audio_patch_handle_t mPatchHandle;
audio_port_handle_t mId;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index c952831..8f5ebef 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -54,7 +54,7 @@
public:
status_t getAudioPolicyMix(String8 address, sp<AudioPolicyMix> &policyMix) const;
- status_t registerMix(String8 address, AudioMix mix);
+ status_t registerMix(String8 address, AudioMix mix, sp<SwAudioOutputDescriptor> desc);
status_t unregisterMix(String8 address);
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index 5958f4f..211ec98 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -66,6 +66,7 @@
virtual void attach(const sp<HwModule>& module);
bool isAttached() { return mModule != 0; }
+ // Audio port IDs are in a different namespace than AudioFlinger unique IDs
static audio_port_handle_t getNextUniqueId();
virtual void toAudioPort(struct audio_port *port) const;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
index 799410b..388c25d 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSession.h
@@ -22,12 +22,13 @@
#include <utils/Errors.h>
#include <utils/KeyedVector.h>
#include <media/AudioPolicy.h>
+#include "AudioSessionInfoProvider.h"
namespace android {
class AudioPolicyClientInterface;
-class AudioSession : public RefBase
+class AudioSession : public RefBase, public AudioSessionInfoUpdateListener
{
public:
AudioSession(audio_session_t session,
@@ -58,14 +59,14 @@
uint32_t changeOpenCount(int delta);
uint32_t changeActiveCount(int delta);
- void setDeviceConfig(audio_format_t format, uint32_t sampleRate,
- audio_channel_mask_t channelMask);
+ void setInfoProvider(AudioSessionInfoProvider *provider);
+ // implementation of AudioSessionInfoUpdateListener
+ virtual void onSessionInfoUpdate() const;
private:
const audio_session_t mSession;
const audio_source_t mInputSource;
const struct audio_config_base mConfig;
- struct audio_config_base mDeviceConfig;
const audio_input_flags_t mFlags;
const uid_t mUid;
bool mIsSoundTrigger;
@@ -73,14 +74,17 @@
uint32_t mActiveCount;
AudioMix* mPolicyMix; // non NULL when used by a dynamic policy
AudioPolicyClientInterface* mClientInterface;
+ const AudioSessionInfoProvider* mInfoProvider;
};
class AudioSessionCollection :
- public DefaultKeyedVector<audio_session_t, sp<AudioSession> >
+ public DefaultKeyedVector<audio_session_t, sp<AudioSession> >,
+ public AudioSessionInfoUpdateListener
{
public:
status_t addSession(audio_session_t session,
- const sp<AudioSession>& audioSession);
+ const sp<AudioSession>& audioSession,
+ AudioSessionInfoProvider *provider);
status_t removeSession(audio_session_t session);
@@ -90,6 +94,9 @@
bool hasActiveSession() const;
bool isSourceActive(audio_source_t source) const;
+ // implementation of AudioSessionInfoUpdateListener
+ virtual void onSessionInfoUpdate() const;
+
status_t dump(int fd, int spaces) const;
};
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h b/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h
new file mode 100644
index 0000000..e0037fc
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioSessionInfoProvider.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+namespace android {
+
+/**
+ * Interface for input descriptors to implement so dependent audio sessions can query information
+ * about their context
+ */
+class AudioSessionInfoProvider
+{
+public:
+ virtual ~AudioSessionInfoProvider() {};
+
+ virtual audio_config_base_t getConfig() const = 0;
+
+ virtual audio_patch_handle_t getPatchHandle() const = 0;
+
+};
+
+class AudioSessionInfoUpdateListener
+{
+public:
+ virtual ~AudioSessionInfoUpdateListener() {};
+
+ virtual void onSessionInfoUpdate() const = 0;;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
index eae9586..dd20e93 100644
--- a/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
+++ b/services/audiopolicy/common/managerdefinitions/include/IOProfile.h
@@ -39,7 +39,7 @@
// For a Profile aka MixPort, tag name and name are equivalent.
virtual const String8 getTagName() const { return getName(); }
- // This method is used for both output and input.
+ // This method is used for input and direct output, and is not used for other output.
// If parameter updatedSamplingRate is non-NULL, it is assigned the actual sample rate.
// For input, flags is interpreted as audio_input_flags_t.
// TODO: merge audio_output_flags_t and audio_input_flags_t.
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 5523aff..d4992b0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -28,8 +28,8 @@
AudioInputDescriptor::AudioInputDescriptor(const sp<IOProfile>& profile)
: mIoHandle(0),
- mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL), mPatchHandle(0),
- mProfile(profile), mId(0)
+ mDevice(AUDIO_DEVICE_NONE), mPolicyMix(NULL),
+ mProfile(profile), mPatchHandle(0), mId(0)
{
if (profile != NULL) {
profile->pickAudioProfile(mSamplingRate, mChannelMask, mFormat);
@@ -150,14 +150,31 @@
status_t AudioInputDescriptor::addAudioSession(audio_session_t session,
const sp<AudioSession>& audioSession) {
- audioSession->setDeviceConfig(mFormat, mSamplingRate, mChannelMask);
- return mSessions.addSession(session, audioSession);
+ return mSessions.addSession(session, audioSession, /*AudioSessionInfoProvider*/this);
}
status_t AudioInputDescriptor::removeAudioSession(audio_session_t session) {
return mSessions.removeSession(session);
}
+audio_port_handle_t AudioInputDescriptor::getPatchHandle() const
+{
+ return mPatchHandle;
+}
+
+void AudioInputDescriptor::setPatchHandle(audio_patch_handle_t handle)
+{
+ mPatchHandle = handle;
+ mSessions.onSessionInfoUpdate();
+}
+
+audio_config_base_t AudioInputDescriptor::getConfig() const
+{
+ const audio_config_base_t config = { .sample_rate = mSamplingRate, .channel_mask = mChannelMask,
+ .format = mFormat };
+ return config;
+}
+
status_t AudioInputDescriptor::dump(int fd)
{
const size_t SIZE = 256;
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 5d0f03f..c5fee50 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -34,7 +34,7 @@
AudioOutputDescriptor::AudioOutputDescriptor(const sp<AudioPort>& port,
AudioPolicyClientInterface *clientInterface)
: mPort(port), mDevice(AUDIO_DEVICE_NONE),
- mPatchHandle(0), mClientInterface(clientInterface), mId(0)
+ mClientInterface(clientInterface), mPatchHandle(0), mId(0)
{
// clear usage count for all stream types
for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
@@ -315,14 +315,14 @@
if ((oldGlobalRefCount == 0) && (mGlobalRefCount > 0)) {
if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
{
- mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mRegistrationId,
+ mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
MIX_STATE_MIXING);
}
} else if ((oldGlobalRefCount > 0) && (mGlobalRefCount == 0)) {
if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
{
- mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mRegistrationId,
+ mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
MIX_STATE_IDLE);
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 3735c05..7ee98b6 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "APM::AudioPolicyMix"
+#define LOG_TAG "APM_AudioPolicyMix"
//#define LOG_NDEBUG 0
#include "AudioPolicyMix.h"
@@ -51,7 +51,8 @@
return &mMix;
}
-status_t AudioPolicyMixCollection::registerMix(String8 address, AudioMix mix)
+status_t AudioPolicyMixCollection::registerMix(String8 address, AudioMix mix,
+ sp<SwAudioOutputDescriptor> desc)
{
ssize_t index = indexOfKey(address);
if (index >= 0) {
@@ -61,6 +62,11 @@
sp<AudioPolicyMix> policyMix = new AudioPolicyMix();
policyMix->setMix(mix);
add(address, policyMix);
+
+ if (desc != 0) {
+ desc->mPolicyMix = policyMix->getMix();
+ policyMix->setOutput(desc);
+ }
return NO_ERROR;
}
@@ -101,6 +107,7 @@
status_t AudioPolicyMixCollection::getOutputForAttr(audio_attributes_t attributes, uid_t uid,
sp<SwAudioOutputDescriptor> &desc)
{
+ ALOGV("getOutputForAttr() querying %zu mixes:", size());
desc = 0;
for (size_t i = 0; i < size(); i++) {
sp<AudioPolicyMix> policyMix = valueAt(i);
@@ -123,12 +130,13 @@
// iterate over all mix criteria to list what rules this mix contains
for (size_t j = 0; j < mix->mCriteria.size(); j++) {
- ALOGV("getOutputForAttr: inspecting mix %zu of %zu", i, mix->mCriteria.size());
+ ALOGV(" getOutputForAttr: mix %zu: inspecting mix criteria %zu of %zu",
+ i, j, mix->mCriteria.size());
// if there is an address match, prioritize that match
if (strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
strncmp(attributes.tags + strlen("addr="),
- mix->mRegistrationId.string(),
+ mix->mDeviceAddress.string(),
AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
hasAddrMatch = true;
break;
@@ -207,7 +215,7 @@
if (attributes.usage == AUDIO_USAGE_VIRTUAL_SOURCE &&
strncmp(attributes.tags, "addr=", strlen("addr=")) == 0 &&
strncmp(attributes.tags + strlen("addr="),
- mix->mRegistrationId.string(),
+ mix->mDeviceAddress.string(),
AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - strlen("addr=") - 1) == 0) {
desc = policyMix->getOutput();
}
@@ -260,7 +268,7 @@
for (size_t i = 0; i < size(); i++) {
sp<AudioPolicyMix> policyMix = valueAt(i);
AudioMix *mix = policyMix->getMix();
- ALOGV("\tmix %zu address=%s", i, mix->mRegistrationId.string());
+ ALOGV("\tmix %zu address=%s", i, mix->mDeviceAddress.string());
}
#endif
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index bda59ad..19b179e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -36,6 +36,7 @@
mModule = module;
}
+// Note that is a different namespace than AudioFlinger unique IDs
audio_port_handle_t AudioPort::getNextUniqueId()
{
return static_cast<audio_port_handle_t>(android_atomic_inc(&mNextUniqueId));
@@ -164,7 +165,7 @@
}
pickedRate = (samplingRate == UINT_MAX) ? 0 : samplingRate;
} else {
- uint32_t maxRate = MAX_MIXER_SAMPLING_RATE;
+ uint32_t maxRate = SAMPLE_RATE_HZ_MAX;
// For mixed output and inputs, use max mixer sampling rates. Do not
// limit sampling rate otherwise
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
index 961072e..5987d1a 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioProfile.cpp
@@ -63,10 +63,13 @@
status_t AudioProfile::checkCompatibleSamplingRate(uint32_t samplingRate,
uint32_t &updatedSamplingRate) const
{
+ ALOG_ASSERT(samplingRate > 0);
+
if (mSamplingRates.isEmpty()) {
updatedSamplingRate = samplingRate;
return NO_ERROR;
}
+
// Search for the closest supported sampling rate that is above (preferred)
// or below (acceptable) the desired sampling rate, within a permitted ratio.
// The sampling rates are sorted in ascending order.
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
index 2a0b477..da983c5 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioSession.cpp
@@ -38,9 +38,9 @@
AudioPolicyClientInterface *clientInterface) :
mSession(session), mInputSource(inputSource),
mConfig({ .format = format, .sample_rate = sampleRate, .channel_mask = channelMask}),
- mDeviceConfig(AUDIO_CONFIG_BASE_INITIALIZER),
mFlags(flags), mUid(uid), mIsSoundTrigger(isSoundTrigger),
- mOpenCount(1), mActiveCount(0), mPolicyMix(policyMix), mClientInterface(clientInterface)
+ mOpenCount(1), mActiveCount(0), mPolicyMix(policyMix), mClientInterface(clientInterface),
+ mInfoProvider(NULL)
{
}
@@ -66,25 +66,31 @@
}
mActiveCount += delta;
ALOGV("%s active count %d", __FUNCTION__, mActiveCount);
+ int event = RECORD_CONFIG_EVENT_NONE;
if ((oldActiveCount == 0) && (mActiveCount > 0)) {
- // if input maps to a dynamic policy with an activity listener, notify of state change
- if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
- {
- mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mRegistrationId,
- MIX_STATE_MIXING);
- }
- mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
- mSession, mInputSource, &mConfig, &mDeviceConfig);
+ event = RECORD_CONFIG_EVENT_START;
} else if ((oldActiveCount > 0) && (mActiveCount == 0)) {
+ event = RECORD_CONFIG_EVENT_STOP;
+ }
+
+ if (event != RECORD_CONFIG_EVENT_NONE) {
+ // Dynamic policy callback:
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
{
- mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mRegistrationId,
- MIX_STATE_IDLE);
+ mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
+ (event == RECORD_CONFIG_EVENT_START) ? MIX_STATE_MIXING : MIX_STATE_IDLE);
}
- mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_STOP,
- mSession, mInputSource, &mConfig, &mDeviceConfig);
+
+ // Recording configuration callback:
+ const AudioSessionInfoProvider* provider = mInfoProvider;
+ const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
+ AUDIO_CONFIG_BASE_INITIALIZER;
+ const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
+ AUDIO_PATCH_HANDLE_NONE;
+ mClientInterface->onRecordingConfigurationUpdate(event, mSession, mInputSource,
+ &mConfig, &deviceConfig, patchHandle);
}
return mActiveCount;
@@ -104,11 +110,24 @@
return false;
}
-void AudioSession::setDeviceConfig(audio_format_t format, uint32_t sampleRate,
- audio_channel_mask_t channelMask) {
- mDeviceConfig.format = format;
- mDeviceConfig.sample_rate = sampleRate;
- mDeviceConfig.channel_mask = channelMask;
+void AudioSession::setInfoProvider(AudioSessionInfoProvider *provider)
+{
+ mInfoProvider = provider;
+}
+
+void AudioSession::onSessionInfoUpdate() const
+{
+ if (mActiveCount > 0) {
+ // resend the callback after requerying the informations from the info provider
+ const AudioSessionInfoProvider* provider = mInfoProvider;
+ const audio_config_base_t deviceConfig = (provider != NULL) ? provider->getConfig() :
+ AUDIO_CONFIG_BASE_INITIALIZER;
+ const audio_patch_handle_t patchHandle = (provider != NULL) ? provider->getPatchHandle() :
+ AUDIO_PATCH_HANDLE_NONE;
+ mClientInterface->onRecordingConfigurationUpdate(RECORD_CONFIG_EVENT_START,
+ mSession, mInputSource,
+ &mConfig, &deviceConfig, patchHandle);
+ }
}
status_t AudioSession::dump(int fd, int spaces, int index) const
@@ -145,7 +164,8 @@
}
status_t AudioSessionCollection::addSession(audio_session_t session,
- const sp<AudioSession>& audioSession)
+ const sp<AudioSession>& audioSession,
+ AudioSessionInfoProvider *provider)
{
ssize_t index = indexOfKey(session);
@@ -153,6 +173,7 @@
ALOGW("addSession() session %d already in", session);
return ALREADY_EXISTS;
}
+ audioSession->setInfoProvider(provider);
add(session, audioSession);
ALOGV("addSession() session %d client %d source %d",
session, audioSession->uid(), audioSession->inputSource());
@@ -168,6 +189,7 @@
return ALREADY_EXISTS;
}
ALOGV("removeSession() session %d", session);
+ valueAt(index)->setInfoProvider(NULL);
removeItemsAt(index);
return NO_ERROR;
}
@@ -214,6 +236,13 @@
return false;
}
+void AudioSessionCollection::onSessionInfoUpdate() const
+{
+ for (size_t i = 0; i < size(); i++) {
+ valueAt(i)->onSessionInfoUpdate();
+ }
+}
+
status_t AudioSessionCollection::dump(int fd, int spaces) const
{
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 787f53f..cf7c8fc 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -50,15 +50,11 @@
{
// Devices are considered equal if they:
// - are of the same type (a device type cannot be AUDIO_DEVICE_NONE)
- // - have the same address or one device does not specify the address
- // - have the same channel mask or one device does not specify the channel mask
+ // - have the same address
if (other == 0) {
return false;
}
- return (mDeviceType == other->mDeviceType) &&
- (mAddress == "" || other->mAddress == "" || mAddress == other->mAddress) &&
- (mChannelMask == 0 || other->mChannelMask == 0 ||
- mChannelMask == other->mChannelMask);
+ return (mDeviceType == other->mDeviceType) && (mAddress == other->mAddress);
}
void DeviceVector::refreshTypes()
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index dd2a60a..b7c7879 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -310,12 +310,6 @@
if (!deviceList.isEmpty()) {
return deviceList.itemAt(0);
}
- deviceList = hwModule->getDeclaredDevices().getDevicesFromType(device);
- if (!deviceList.isEmpty()) {
- deviceList.itemAt(0)->setName(String8(device_name));
- deviceList.itemAt(0)->mAddress = address;
- return deviceList.itemAt(0);
- }
}
sp<DeviceDescriptor> devDesc = new DeviceDescriptor(device);
diff --git a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
index 204eb04..abf2dd4 100644
--- a/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/IOProfile.cpp
@@ -55,8 +55,8 @@
}
}
- if (samplingRate == 0 || !audio_is_valid_format(format) ||
- (isPlaybackThread && (!audio_is_output_channel(channelMask))) ||
+ if (!audio_is_valid_format(format) ||
+ (isPlaybackThread && (samplingRate == 0 || !audio_is_output_channel(channelMask))) ||
(isRecordThread && (!audio_is_input_channel(channelMask)))) {
return false;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
index f613f94..c6ed53e 100644
--- a/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/TypeConverter.cpp
@@ -50,6 +50,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_FM),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_AUX_LINE),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_IP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_OUT_BUS),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_AMBIENT),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUILTIN_MIC),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET),
@@ -72,6 +73,7 @@
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BLUETOOTH_A2DP),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_LOOPBACK),
MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_IP),
+ MAKE_STRING_FROM_ENUM(AUDIO_DEVICE_IN_BUS),
};
template<>
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
index e6b5f85..6dba75b 100755
--- a/services/audiopolicy/engineconfigurable/Android.mk
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -35,6 +35,7 @@
$(call include-path-for, audio-utils) \
$(TOPDIR)frameworks/av/services/audiopolicy/common/include
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
LOCAL_MODULE := libaudiopolicyengineconfigurable
LOCAL_MODULE_TAGS := optional
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/example/Android.mk
index e15e418..5775556 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Android.mk
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Android.mk
@@ -13,7 +13,7 @@
PFW_CORE := external/parameter-framework
BUILD_PFW_SETTINGS := $(PFW_CORE)/support/android/build_pfw_settings.mk
-PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/Schemas
+PFW_DEFAULT_SCHEMAS_DIR := $(PFW_CORE)/upstream/schemas
PFW_SCHEMAS_DIR := $(PFW_DEFAULT_SCHEMAS_DIR)
##################################################################
@@ -26,7 +26,17 @@
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE_CLASS := ETC
LOCAL_MODULE_PATH := $(TARGET_OUT_ETC)/parameter-framework
-LOCAL_SRC_FILES := $(LOCAL_MODULE)
+LOCAL_SRC_FILES := $(LOCAL_MODULE).in
+
+AUDIO_PATTERN = @TUNING_ALLOWED@
+ifeq ($(TARGET_BUILD_VARIANT),user)
+AUDIO_VALUE = false
+else
+AUDIO_VALUE = true
+endif
+
+LOCAL_POST_INSTALL_CMD := $(hide) sed -i -e 's|$(AUDIO_PATTERN)|$(AUDIO_VALUE)|g' $(LOCAL_MODULE_PATH)/$(LOCAL_MODULE)
+
include $(BUILD_PREBUILT)
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml b/services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml.in
similarity index 78%
rename from services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml
rename to services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml.in
index 6905201..f5615cd 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/ParameterFrameworkConfigurationPolicy.xml.in
@@ -1,7 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<ParameterFrameworkConfiguration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:noNamespaceSchemaLocation="Schemas/ParameterFrameworkConfiguration.xsd"
- SystemClassName="Policy" ServerPort="5019" TuningAllowed="true">
+ SystemClassName="Policy" ServerPort="5019" TuningAllowed="@TUNING_ALLOWED@">
<SubsystemPlugins>
<Location Folder="">
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/PolicyConfigurableDomains.xml b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/PolicyConfigurableDomains.xml
index 8c3917a..be86b8d 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/PolicyConfigurableDomains.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/PolicyConfigurableDomains.xml
@@ -1,6 +1,6 @@
<?xml version="1.0" encoding="UTF-8"?>
<!-- #### DO NOT EDIT THIS FILE #### -->
-<ConfigurableDomains xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="Schemas/ConfigurableDomains.xsd" SystemClassName="Policy">
+<ConfigurableDomains SystemClassName="Policy">
<ConfigurableDomain Name="DeviceForStrategy.Media.UnreachableDevices" SequenceAware="false">
<Configurations>
<Configuration Name="Calibration">
@@ -5220,16 +5220,16 @@
</Configuration>
</Configurations>
<ConfigurableElements>
- <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc"/>
+ <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc"/>
</ConfigurableElements>
<Settings>
<Configuration Name="Selected">
- <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc">
+ <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc">
<BitParameter Name="hdmi_arc">1</BitParameter>
</ConfigurableElement>
</Configuration>
<Configuration Name="NotSelected">
- <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc">
+ <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc">
<BitParameter Name="hdmi_arc">0</BitParameter>
</ConfigurableElement>
</Configuration>
@@ -5249,16 +5249,16 @@
</Configuration>
</Configurations>
<ConfigurableElements>
- <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif"/>
+ <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif"/>
</ConfigurableElements>
<Settings>
<Configuration Name="Selected">
- <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif">
+ <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif">
<BitParameter Name="spdif">1</BitParameter>
</ConfigurableElement>
</Configuration>
<Configuration Name="NotSelected">
- <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/spdif">
+ <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif">
<BitParameter Name="spdif">0</BitParameter>
</ConfigurableElement>
</Configuration>
@@ -5278,16 +5278,16 @@
</Configuration>
</Configurations>
<ConfigurableElements>
- <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line"/>
+ <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line"/>
</ConfigurableElements>
<Settings>
<Configuration Name="Selected">
- <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line">
+ <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line">
<BitParameter Name="aux_line">1</BitParameter>
</ConfigurableElement>
</Configuration>
<Configuration Name="NotSelected">
- <ConfigurableElement Path="/Policy/policy/strategies/media/selected_output_devices/mask/aux_line">
+ <ConfigurableElement Path="/Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line">
<BitParameter Name="aux_line">0</BitParameter>
</ConfigurableElement>
</Configuration>
@@ -5300,7 +5300,6 @@
</Configuration>
</Configurations>
<ConfigurableElements>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix"/>
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi_arc"/>
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/spdif"/>
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/aux_line"/>
@@ -5308,9 +5307,6 @@
</ConfigurableElements>
<Settings>
<Configuration Name="Calibration">
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
- <BitParameter Name="remote_submix">0</BitParameter>
- </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/hdmi_arc">
<BitParameter Name="hdmi_arc">0</BitParameter>
</ConfigurableElement>
@@ -5474,7 +5470,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/telephony_tx"/>
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/line"/>
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm"/>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe"/>
</ConfigurableElements>
<Settings>
<Configuration Name="RemoteSubmix">
@@ -5532,9 +5527,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
<Configuration Name="BluetoothA2dp">
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5591,9 +5583,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
<Configuration Name="BluetoothA2dpHeadphones">
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5650,9 +5639,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
<Configuration Name="BluetoothA2dpSpeaker">
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5709,9 +5695,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
<Configuration Name="WiredHeadphone">
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5768,9 +5751,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
<Configuration Name="Line">
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5827,9 +5807,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
<Configuration Name="WiredHeadset">
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5886,9 +5863,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
<Configuration Name="UsbAccessory">
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -5945,9 +5919,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
<Configuration Name="UsbDevice">
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -6004,9 +5975,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
<Configuration Name="DgtlDockHeadset">
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -6063,9 +6031,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
<Configuration Name="Hdmi">
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -6122,9 +6087,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
<Configuration Name="AnlgDockHeadset">
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/remote_submix">
@@ -6181,9 +6143,6 @@
<ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/fm">
<BitParameter Name="fm">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/strategies/enforced_audible/selected_output_devices/mask/speaker_safe">
- <BitParameter Name="speaker_safe">0</BitParameter>
- </ConfigurableElement>
</Configuration>
</Settings>
</ConfigurableDomain>
@@ -8225,7 +8184,6 @@
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/dgtl_dock_headset"/>
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/usb_accessory"/>
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/usb_device"/>
- <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/fm_tuner"/>
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/tv_tuner"/>
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/line"/>
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/spdif"/>
@@ -8837,9 +8795,6 @@
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/usb_device">
<BitParameter Name="usb_device">0</BitParameter>
</ConfigurableElement>
- <ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/fm_tuner">
- <BitParameter Name="fm_tuner">0</BitParameter>
- </ConfigurableElement>
<ConfigurableElement Path="/Policy/policy/input_sources/fm_tuner/applicable_input_device/mask/tv_tuner">
<BitParameter Name="tv_tuner">0</BitParameter>
</ConfigurableElement>
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_input_source.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_input_source.pfw
index 07a3c81..16bcb01 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_input_source.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_input_source.pfw
@@ -218,7 +218,6 @@
dgtl_dock_headset = 0
usb_accessory = 0
usb_device = 0
- fm_tuner = 0
tv_tuner = 0
line = 0
spdif = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw
index 85273b2..d8b5b9d 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_dtmf.pfw
@@ -599,10 +599,10 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes HdmiArc
- /Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 1
+ /Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc = 1
conf: NotSelected
- /Policy/policy/strategies/media/selected_output_devices/mask/hdmi_arc = 0
+ /Policy/policy/strategies/dtmf/selected_output_devices/mask/hdmi_arc = 0
domain: Spdif
#
@@ -615,10 +615,10 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes Spdif
- /Policy/policy/strategies/media/selected_output_devices/mask/spdif = 1
+ /Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif = 1
conf: NotSelected
- /Policy/policy/strategies/media/selected_output_devices/mask/spdif = 0
+ /Policy/policy/strategies/dtmf/selected_output_devices/mask/spdif = 0
domain: AuxLine
#
@@ -631,7 +631,7 @@
TelephonyMode IsNot InCommunication
AvailableOutputDevices Includes AuxLine
- /Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 1
+ /Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line = 1
conf: NotSelected
- /Policy/policy/strategies/media/selected_output_devices/mask/aux_line = 0
+ /Policy/policy/strategies/dtmf/selected_output_devices/mask/aux_line = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_enforced_audible.pfw b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_enforced_audible.pfw
index d714743..593ef64 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_enforced_audible.pfw
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Settings/device_for_strategy_enforced_audible.pfw
@@ -6,7 +6,6 @@
conf: Calibration
component: /Policy/policy/strategies/enforced_audible/selected_output_devices/mask
# no enforced_audible on remote submix (e.g. WFD)
- remote_submix = 0
hdmi_arc = 0
spdif = 0
aux_line = 0
@@ -78,7 +77,6 @@
telephony_tx = 0
line = 0
fm = 0
- speaker_safe = 0
conf: BluetoothA2dp
AvailableOutputDevices Includes BluetoothA2dp
@@ -103,7 +101,6 @@
telephony_tx = 0
line = 0
fm = 0
- speaker_safe = 0
conf: BluetoothA2dpHeadphones
AvailableOutputDevices Includes BluetoothA2dpHeadphones
@@ -128,7 +125,6 @@
telephony_tx = 0
line = 0
fm = 0
- speaker_safe = 0
conf: BluetoothA2dpSpeaker
AvailableOutputDevices Includes BluetoothA2dpSpeaker
@@ -153,7 +149,6 @@
telephony_tx = 0
line = 0
fm = 0
- speaker_safe = 0
conf: WiredHeadphone
ForceUseForMedia IsNot ForceSpeaker
@@ -178,7 +173,6 @@
telephony_tx = 0
line = 0
fm = 0
- speaker_safe = 0
conf: Line
ForceUseForMedia IsNot ForceSpeaker
@@ -203,7 +197,6 @@
telephony_tx = 0
line = 1
fm = 0
- speaker_safe = 0
conf: WiredHeadset
ForceUseForMedia IsNot ForceSpeaker
@@ -228,7 +221,6 @@
telephony_tx = 0
line = 0
fm = 0
- speaker_safe = 0
conf: UsbAccessory
ForceUseForMedia IsNot ForceSpeaker
@@ -253,7 +245,6 @@
telephony_tx = 0
line = 0
fm = 0
- speaker_safe = 0
conf: UsbDevice
ForceUseForMedia IsNot ForceSpeaker
@@ -278,7 +269,6 @@
telephony_tx = 0
line = 0
fm = 0
- speaker_safe = 0
conf: DgtlDockHeadset
ForceUseForMedia IsNot ForceSpeaker
@@ -303,7 +293,6 @@
telephony_tx = 0
line = 0
fm = 0
- speaker_safe = 0
conf: Hdmi
ForceUseForMedia IsNot ForceSpeaker
@@ -328,7 +317,6 @@
telephony_tx = 0
line = 0
fm = 0
- speaker_safe = 0
conf: AnlgDockHeadset
ForceUseForMedia IsNot ForceSpeaker
@@ -354,5 +342,4 @@
telephony_tx = 0
line = 0
fm = 0
- speaker_safe = 0
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem.xml b/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem.xml
index e35511c..71b2b62 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem.xml
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/example/Structure/PolicySubsystem.xml
@@ -2,7 +2,7 @@
<Subsystem xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:xi="http://www.w3.org/2001/XInclude"
xsi:noNamespaceSchemaLocation="Schemas/Subsystem.xsd"
- Name="policy" Type="Policy" Endianness="Little">
+ Name="policy" Type="Policy">
<ComponentLibrary>
<!--#################### GLOBAL COMPONENTS BEGIN ####################-->
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
index c65de92..0e44f2c 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
@@ -1,5 +1,7 @@
LOCAL_PATH := $(call my-dir)
+ifneq ($(USE_CUSTOM_PARAMETER_FRAMEWORK), true)
+
include $(CLEAR_VARS)
LOCAL_MODULE_TAGS := optional
@@ -15,9 +17,10 @@
-Wall \
-Werror \
-Wextra \
+ -fvisibility-inlines-hidden \
+ -fvisibility=hidden
LOCAL_C_INCLUDES := \
- $(TOPDIR)external/parameter-framework/parameter \
$(TOPDIR)frameworks/av/services/audiopolicy/common/include \
$(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/include \
$(TOPDIR)frameworks/av/services/audiopolicy/engineconfigurable/interface \
@@ -25,11 +28,15 @@
LOCAL_SHARED_LIBRARIES := \
libaudiopolicyengineconfigurable \
libparameter \
- libxmlserializer \
liblog \
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
+LOCAL_STATIC_LIBRARIES := libpfw_utility
+
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE := libpolicy-subsystem
include $(BUILD_SHARED_LIBRARY)
+endif # ifneq ($(USE_CUSTOM_PARAMETER_FRAMEWORK), true)
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
index ccb10ae..eac4efe 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.cpp
@@ -21,9 +21,10 @@
using std::string;
InputSource::InputSource(const string &mappingValue,
- CInstanceConfigurableElement *instanceConfigurableElement,
- const CMappingContext &context)
+ CInstanceConfigurableElement *instanceConfigurableElement,
+ const CMappingContext &context, core::log::Logger &logger)
: CFormattedSubsystemObject(instanceConfigurableElement,
+ logger,
mappingValue,
MappingKeyAmend1,
(MappingKeyAmendEnd - MappingKeyAmend1 + 1),
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h
index 0db4f70..58f3c06 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/InputSource.h
@@ -28,8 +28,9 @@
{
public:
InputSource(const std::string &mappingValue,
- CInstanceConfigurableElement *instanceConfigurableElement,
- const CMappingContext &context);
+ CInstanceConfigurableElement *instanceConfigurableElement,
+ const CMappingContext &context,
+ core::log::Logger& logger);
protected:
virtual bool sendToHW(std::string &error);
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
index 6412134..98d10a9 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.cpp
@@ -40,8 +40,8 @@
const char *const PolicySubsystem::mInputSourceComponentName = "InputSource";
const char *const PolicySubsystem::mUsageComponentName = "Usage";
-PolicySubsystem::PolicySubsystem(const std::string &name)
- : CSubsystem(name),
+PolicySubsystem::PolicySubsystem(const std::string &name, core::log::Logger &logger)
+ : CSubsystem(name, logger),
mPluginInterface(NULL)
{
// Try to connect a Plugin Interface from Audio Policy Engine
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
index e3143a5..822eeb9 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystem.h
@@ -28,7 +28,7 @@
class PolicySubsystem : public CSubsystem
{
public:
- PolicySubsystem(const std::string &strName);
+ PolicySubsystem(const std::string &strName, core::log::Logger& logger);
/**
* Retrieve Route Manager interface.
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp
index b14d446..348d5e7 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/PolicySubsystemBuilder.cpp
@@ -14,16 +14,16 @@
* limitations under the License.
*/
-#include "SubsystemLibrary.h"
-#include "NamedElementBuilderTemplate.h"
+#include <Plugin.h>
+#include "LoggingElementBuilderTemplate.h"
#include "PolicySubsystem.h"
static const char *const POLICY_SUBSYSTEM_NAME = "Policy";
extern "C"
{
-void getPOLICYSubsystemBuilder(CSubsystemLibrary *subsystemLibrary)
+void PARAMETER_FRAMEWORK_PLUGIN_ENTRYPOINT_V1(CSubsystemLibrary *subsystemLibrary, core::log::Logger& logger)
{
subsystemLibrary->addElementBuilder(POLICY_SUBSYSTEM_NAME,
- new TNamedElementBuilderTemplate<PolicySubsystem>());
+ new TLoggingElementBuilderTemplate<PolicySubsystem>(logger));
}
}
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
index 5c536d5..746c3a8 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.cpp
@@ -23,8 +23,10 @@
Strategy::Strategy(const string &mappingValue,
CInstanceConfigurableElement *instanceConfigurableElement,
- const CMappingContext &context)
+ const CMappingContext &context,
+ core::log::Logger& logger)
: CFormattedSubsystemObject(instanceConfigurableElement,
+ logger,
mappingValue,
MappingKeyAmend1,
(MappingKeyAmendEnd - MappingKeyAmend1 + 1),
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
index cbb72e2..c02b82c 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Strategy.h
@@ -29,7 +29,8 @@
public:
Strategy(const std::string &mappingValue,
CInstanceConfigurableElement *instanceConfigurableElement,
- const CMappingContext &context);
+ const CMappingContext &context,
+ core::log::Logger& logger);
protected:
virtual bool sendToHW(std::string &error);
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
index 4387634..c642a23 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.cpp
@@ -21,10 +21,10 @@
using std::string;
using android::routing_strategy;
-Stream::Stream(const string &mappingValue,
- CInstanceConfigurableElement *instanceConfigurableElement,
- const CMappingContext &context)
- : CSubsystemObject(instanceConfigurableElement),
+Stream::Stream(const string &/*mappingValue*/,
+ CInstanceConfigurableElement *instanceConfigurableElement,
+ const CMappingContext &context, core::log::Logger &logger)
+ : CSubsystemObject(instanceConfigurableElement, logger),
mPolicySubsystem(static_cast<const PolicySubsystem *>(
instanceConfigurableElement->getBelongingSubsystem())),
mPolicyPluginInterface(mPolicySubsystem->getPolicyPluginInterface())
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
index 4b0e081..4a875db 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Stream.h
@@ -35,8 +35,9 @@
public:
Stream(const std::string &mappingValue,
- CInstanceConfigurableElement *instanceConfigurableElement,
- const CMappingContext &context);
+ CInstanceConfigurableElement *instanceConfigurableElement,
+ const CMappingContext &context,
+ core::log::Logger& logger);
protected:
virtual bool sendToHW(std::string &error);
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
index eb7d78f..78199f8 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.cpp
@@ -23,8 +23,9 @@
Usage::Usage(const string &mappingValue,
CInstanceConfigurableElement *instanceConfigurableElement,
- const CMappingContext &context)
+ const CMappingContext &context, core::log::Logger &logger)
: CFormattedSubsystemObject(instanceConfigurableElement,
+ logger,
mappingValue,
MappingKeyAmend1,
(MappingKeyAmendEnd - MappingKeyAmend1 + 1),
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
index 3b82f8c..860204f 100755
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Usage.h
@@ -28,8 +28,9 @@
{
public:
Usage(const std::string &mappingValue,
- CInstanceConfigurableElement *instanceConfigurableElement,
- const CMappingContext &context);
+ CInstanceConfigurableElement *instanceConfigurableElement,
+ const CMappingContext &context,
+ core::log::Logger& logger);
protected:
virtual bool sendToHW(std::string &error);
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index ed807c6..7f8ed1f 100755
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -142,6 +142,8 @@
{
const SwAudioOutputCollection &outputs = mPolicyEngine->mApmObserver->getOutputs();
+ //FIXME: getStrategyForUsage() should return STRATEGY_ACCESSIBILITY and getDeviceForStrategy()
+ // should be implemented accordingly for STRATEGY_ACCESSIBILITY
if (usage == AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY &&
(outputs.isStreamActive(AUDIO_STREAM_RING) ||
outputs.isStreamActive(AUDIO_STREAM_ALARM))) {
diff --git a/services/audiopolicy/engineconfigurable/wrapper/Android.mk b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
index 096f913..f4283a8 100644
--- a/services/audiopolicy/engineconfigurable/wrapper/Android.mk
+++ b/services/audiopolicy/engineconfigurable/wrapper/Android.mk
@@ -18,6 +18,8 @@
LOCAL_STATIC_LIBRARIES := \
libmedia_helper \
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libaudiopolicypfwwrapper
LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
diff --git a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
index cc4d4db..6872e52 100755
--- a/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
+++ b/services/audiopolicy/engineconfigurable/wrapper/ParameterManagerWrapper.cpp
@@ -46,15 +46,13 @@
public:
ParameterMgrPlatformConnectorLogger() {}
- virtual void log(bool isWarning, const string &log)
+ virtual void info(const string &log)
{
- const static string format("policy-parameter-manager: ");
-
- if (isWarning) {
- ALOGW("%s %s", format.c_str(), log.c_str());
- } else {
- ALOGD("%s %s", format.c_str(), log.c_str());
- }
+ ALOGD("policy-parameter-manager: %s", log.c_str());
+ }
+ virtual void warning(const string &log)
+ {
+ ALOGW("policy-parameter-manager: %s", log.c_str());
}
};
@@ -134,7 +132,8 @@
ALOGV("%s: Adding new value pair (%d,%s) for criterionType %s", __FUNCTION__,
numericValue, literalValue.c_str(), typeName.c_str());
ISelectionCriterionTypeInterface *criterionType = mPolicyCriterionTypes[typeName];
- criterionType->addValuePair(numericValue, literalValue.c_str());
+ std::string error;
+ criterionType->addValuePair(numericValue, literalValue, error);
}
void ParameterManagerWrapper::loadCriterionType(cnode *root, bool isInclusive)
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
index bb12714..85d1822 100755
--- a/services/audiopolicy/enginedefault/Android.mk
+++ b/services/audiopolicy/enginedefault/Android.mk
@@ -29,6 +29,7 @@
$(call include-path-for, bionic) \
$(TOPDIR)frameworks/av/services/audiopolicy/common/include
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
LOCAL_MODULE := libaudiopolicyenginedefault
LOCAL_MODULE_TAGS := optional
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 37f79fe..f2224fd 100755
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -141,13 +141,22 @@
case AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO:
if (config != AUDIO_POLICY_FORCE_NONE &&
config != AUDIO_POLICY_FORCE_HDMI_SYSTEM_AUDIO_ENFORCED) {
- ALOGW("setForceUse() invalid config %d forHDMI_SYSTEM_AUDIO", config);
+ ALOGW("setForceUse() invalid config %d for HDMI_SYSTEM_AUDIO", config);
+ }
+ mForceUse[usage] = config;
+ break;
+ case AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND:
+ if (config != AUDIO_POLICY_FORCE_NONE &&
+ config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER &&
+ config != AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS) {
+ ALOGW("setForceUse() invalid config %d for ENCODED_SURROUND", config);
+ return BAD_VALUE;
}
mForceUse[usage] = config;
break;
default:
ALOGW("setForceUse() invalid usage %d", usage);
- break;
+ break; // TODO return BAD_VALUE?
}
return NO_ERROR;
}
@@ -186,18 +195,9 @@
routing_strategy Engine::getStrategyForUsage(audio_usage_t usage)
{
- const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
-
// usage to strategy mapping
switch (usage) {
case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
- if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
- outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
- return STRATEGY_SONIFICATION;
- }
- if (isInCall()) {
- return STRATEGY_PHONE;
- }
return STRATEGY_ACCESSIBILITY;
case AUDIO_USAGE_MEDIA:
@@ -231,11 +231,22 @@
audio_devices_t Engine::getDeviceForStrategy(routing_strategy strategy) const
{
- const DeviceVector &availableOutputDevices = mApmObserver->getAvailableOutputDevices();
- const DeviceVector &availableInputDevices = mApmObserver->getAvailableInputDevices();
+ DeviceVector availableOutputDevices = mApmObserver->getAvailableOutputDevices();
+ DeviceVector availableInputDevices = mApmObserver->getAvailableInputDevices();
const SwAudioOutputCollection &outputs = mApmObserver->getOutputs();
+ return getDeviceForStrategyInt(strategy, availableOutputDevices,
+ availableInputDevices, outputs);
+}
+
+
+
+audio_devices_t Engine::getDeviceForStrategyInt(routing_strategy strategy,
+ DeviceVector availableOutputDevices,
+ DeviceVector availableInputDevices,
+ const SwAudioOutputCollection &outputs) const
+{
uint32_t device = AUDIO_DEVICE_NONE;
uint32_t availableOutputDevicesType = availableOutputDevices.types();
@@ -251,14 +262,16 @@
case STRATEGY_SONIFICATION_RESPECTFUL:
if (isInCall()) {
- device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+ device = getDeviceForStrategyInt(
+ STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
} else if (outputs.isStreamActiveRemotely(AUDIO_STREAM_MUSIC,
SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
// while media is playing on a remote device, use the the sonification behavior.
// Note that we test this usecase before testing if media is playing because
// the isStreamActive() method only informs about the activity of a stream, not
// if it's for local playback. Note also that we use the same delay between both tests
- device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+ device = getDeviceForStrategyInt(
+ STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
//user "safe" speaker if available instead of normal speaker to avoid triggering
//other acoustic safety mechanisms for notification
if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
@@ -266,12 +279,15 @@
device |= AUDIO_DEVICE_OUT_SPEAKER_SAFE;
device &= ~AUDIO_DEVICE_OUT_SPEAKER;
}
- } else if (outputs.isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
+ } else if (outputs.isStreamActive(
+ AUDIO_STREAM_MUSIC, SONIFICATION_RESPECTFUL_AFTER_MUSIC_DELAY)) {
// while media is playing (or has recently played), use the same device
- device = getDeviceForStrategy(STRATEGY_MEDIA);
+ device = getDeviceForStrategyInt(
+ STRATEGY_MEDIA, availableOutputDevices, availableInputDevices, outputs);
} else {
// when media is not playing anymore, fall back on the sonification behavior
- device = getDeviceForStrategy(STRATEGY_SONIFICATION);
+ device = getDeviceForStrategyInt(
+ STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
//user "safe" speaker if available instead of normal speaker to avoid triggering
//other acoustic safety mechanisms for notification
if ((device & AUDIO_DEVICE_OUT_SPEAKER) &&
@@ -285,7 +301,8 @@
case STRATEGY_DTMF:
if (!isInCall()) {
// when off call, DTMF strategy follows the same rules as MEDIA strategy
- device = getDeviceForStrategy(STRATEGY_MEDIA);
+ device = getDeviceForStrategyInt(
+ STRATEGY_MEDIA, availableOutputDevices, availableInputDevices, outputs);
break;
}
// when in call, DTMF and PHONE strategies follow the same rules
@@ -312,8 +329,8 @@
availableOutputDevicesType = availPrimaryOutputDevices;
}
}
- // for phone strategy, we first consider the forced use and then the available devices by order
- // of priority
+ // for phone strategy, we first consider the forced use and then the available devices by
+ // order of priority
switch (mForceUse[AUDIO_POLICY_FORCE_FOR_COMMUNICATION]) {
case AUDIO_POLICY_FORCE_BT_SCO:
if (!isInCall() || strategy != STRATEGY_DTMF) {
@@ -341,6 +358,8 @@
if (device) break;
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_WIRED_HEADSET;
if (device) break;
+ device = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
+ if (device) break;
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_USB_DEVICE;
if (device) break;
if (!isInCall()) {
@@ -382,8 +401,6 @@
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET;
if (device) break;
}
- device = availableOutputDevicesType & AUDIO_DEVICE_OUT_LINE;
- if (device) break;
device = availableOutputDevicesType & AUDIO_DEVICE_OUT_SPEAKER;
if (device) break;
device = mApmObserver->getDefaultOutputDevice()->type();
@@ -399,7 +416,8 @@
// If incall, just select the STRATEGY_PHONE device: The rest of the behavior is handled by
// handleIncallSonification().
if (isInCall()) {
- device = getDeviceForStrategy(STRATEGY_PHONE);
+ device = getDeviceForStrategyInt(
+ STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
break;
}
// FALL THROUGH
@@ -420,7 +438,6 @@
// The second device used for sonification is the same as the device used by media strategy
// FALL THROUGH
- // FIXME: STRATEGY_ACCESSIBILITY and STRATEGY_REROUTING follow STRATEGY_MEDIA for now
case STRATEGY_ACCESSIBILITY:
if (strategy == STRATEGY_ACCESSIBILITY) {
// do not route accessibility prompts to a digital output currently configured with a
@@ -434,20 +451,35 @@
availableOutputDevicesType = availableOutputDevices.types() & ~devices;
}
}
+ availableOutputDevices =
+ availableOutputDevices.getDevicesFromType(availableOutputDevicesType);
+ if (outputs.isStreamActive(AUDIO_STREAM_RING) ||
+ outputs.isStreamActive(AUDIO_STREAM_ALARM)) {
+ return getDeviceForStrategyInt(
+ STRATEGY_SONIFICATION, availableOutputDevices, availableInputDevices, outputs);
+ }
+ if (isInCall()) {
+ return getDeviceForStrategyInt(
+ STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
+ }
}
+ // For other cases, STRATEGY_ACCESSIBILITY behaves like STRATEGY_MEDIA
// FALL THROUGH
+ // FIXME: STRATEGY_REROUTING follow STRATEGY_MEDIA for now
case STRATEGY_REROUTING:
case STRATEGY_MEDIA: {
uint32_t device2 = AUDIO_DEVICE_NONE;
if (strategy != STRATEGY_SONIFICATION) {
// no sonification on remote submix (e.g. WFD)
- if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, String8("0")) != 0) {
+ if (availableOutputDevices.getDevice(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ String8("0")) != 0) {
device2 = availableOutputDevices.types() & AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
}
}
if (isInCall() && (strategy == STRATEGY_MEDIA)) {
- device = getDeviceForStrategy(STRATEGY_PHONE);
+ device = getDeviceForStrategyInt(
+ STRATEGY_PHONE, availableOutputDevices, availableInputDevices, outputs);
break;
}
if ((device2 == AUDIO_DEVICE_NONE) &&
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 8b6eaf6..606ad28 100755
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -125,6 +125,10 @@
routing_strategy getStrategyForStream(audio_stream_type_t stream);
routing_strategy getStrategyForUsage(audio_usage_t usage);
audio_devices_t getDeviceForStrategy(routing_strategy strategy) const;
+ audio_devices_t getDeviceForStrategyInt(routing_strategy strategy,
+ DeviceVector availableOutputDevices,
+ DeviceVector availableInputDevices,
+ const SwAudioOutputCollection &outputs) const;
audio_devices_t getDeviceForInputSource(audio_source_t inputSource) const;
audio_mode_t mPhoneState; /**< current phone state. */
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 758673b..ae8cf15 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#define LOG_TAG "APM::AudioPolicyManager"
+#define LOG_TAG "APM_AudioPolicyManager"
//#define LOG_NDEBUG 0
//#define VERY_VERBOSE_LOGGING
@@ -459,10 +459,7 @@
// pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(oldState)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
- for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
- if (stream == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
handleIncallSonification((audio_stream_type_t)stream, false, true);
}
@@ -538,10 +535,7 @@
// pertaining to sonification strategy see handleIncallSonification()
if (isStateInCall(state)) {
ALOGV("setPhoneState() in call state management: new state is %d", state);
- for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
- if (stream == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
handleIncallSonification((audio_stream_type_t)stream, true, true);
}
@@ -579,6 +573,7 @@
checkA2dpSuspend();
checkOutputForAllStrategies();
updateDevicesAndOutputs();
+
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
audio_devices_t newDevice = getNewOutputDevice(mPrimaryOutput, true /*fromCache*/);
updateCallRouting(newDevice);
@@ -843,7 +838,7 @@
// skip direct output selection if the request can obviously be attached to a mixed output
// and not explicitly requested
if (((flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
- audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE &&
+ audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX &&
audio_channel_count_from_out_mask(channelMask) <= 2) {
goto non_direct_output;
}
@@ -934,7 +929,7 @@
mpClientInterface->closeOutput(output);
}
// fall back to mixer output if possible when the direct output could not be open
- if (audio_is_linear_pcm(format) && samplingRate <= MAX_MIXER_SAMPLING_RATE) {
+ if (audio_is_linear_pcm(format) && samplingRate <= SAMPLE_RATE_HZ_MAX) {
goto non_direct_output;
}
return AUDIO_IO_HANDLE_NONE;
@@ -959,6 +954,13 @@
}
non_direct_output:
+
+ // A request for HW A/V sync cannot fallback to a mixed output because time
+ // stamps are embedded in audio data
+ if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
+ return AUDIO_IO_HANDLE_NONE;
+ }
+
// ignoring channel mask due to downmix capability in mixer
// open a non direct output
@@ -1101,7 +1103,7 @@
outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
- outputDesc->mPolicyMix->mRegistrationId,
+ outputDesc->mPolicyMix->mDeviceAddress,
"remote-submix");
}
@@ -1218,7 +1220,7 @@
outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- outputDesc->mPolicyMix->mRegistrationId,
+ outputDesc->mPolicyMix->mDeviceAddress,
"remote-submix");
}
}
@@ -1393,7 +1395,7 @@
return BAD_VALUE;
}
if (policyMix != NULL) {
- address = policyMix->mRegistrationId;
+ address = policyMix->mDeviceAddress;
if (policyMix->mMixType == MIX_TYPE_RECORDERS) {
// there is an external policy, but this input is attached to a mix of recorders,
// meaning it receives audio injected into the framework, so the recorder doesn't
@@ -1457,7 +1459,7 @@
// find a compatible input profile (not necessarily identical in parameters)
sp<IOProfile> profile;
// samplingRate and flags may be updated by getInputProfile
- uint32_t profileSamplingRate = samplingRate;
+ uint32_t profileSamplingRate = (samplingRate == 0) ? SAMPLE_RATE_HZ_DEFAULT : samplingRate;
audio_format_t profileFormat = format;
audio_channel_mask_t profileChannelMask = channelMask;
audio_input_flags_t profileFlags = flags;
@@ -1476,6 +1478,10 @@
return input;
}
}
+ // Pick input sampling rate if not specified by client
+ if (samplingRate == 0) {
+ samplingRate = profileSamplingRate;
+ }
if (profile->getModuleHandle() == 0) {
ALOGE("getInputForAttr(): HW module %s not opened", profile->getModuleName());
@@ -1623,7 +1629,7 @@
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
- mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mRegistrationId,
+ mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
MIX_STATE_MIXING);
}
@@ -1640,7 +1646,7 @@
if (inputDesc->mPolicyMix == NULL) {
address = String8("0");
} else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
- address = inputDesc->mPolicyMix->mRegistrationId;
+ address = inputDesc->mPolicyMix->mDeviceAddress;
}
if (address != "") {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
@@ -1687,7 +1693,7 @@
// if input maps to a dynamic policy with an activity listener, notify of state change
if ((inputDesc->mPolicyMix != NULL)
&& ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
- mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mRegistrationId,
+ mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
MIX_STATE_IDLE);
}
@@ -1698,7 +1704,7 @@
if (inputDesc->mPolicyMix == NULL) {
address = String8("0");
} else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
- address = inputDesc->mPolicyMix->mRegistrationId;
+ address = inputDesc->mPolicyMix->mDeviceAddress;
}
if (address != "") {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
@@ -1765,7 +1771,7 @@
for(size_t input_index = 0; input_index < mInputs.size(); input_index++) {
sp<AudioInputDescriptor> inputDesc = mInputs.valueAt(input_index);
- ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+ ssize_t patch_index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (patch_index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(patch_index);
status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
@@ -1789,8 +1795,13 @@
{
ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
mVolumeCurves->initStreamVolume(stream, indexMin, indexMax);
- if (stream == AUDIO_STREAM_MUSIC) {
- mVolumeCurves->initStreamVolume(AUDIO_STREAM_ACCESSIBILITY, indexMin, indexMax);
+
+ // initialize other private stream volumes which follow this one
+ for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+ if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+ continue;
+ }
+ mVolumeCurves->initStreamVolume((audio_stream_type_t)curStream, indexMin, indexMax);
}
}
@@ -1818,38 +1829,41 @@
if (device == AUDIO_DEVICE_OUT_DEFAULT) {
mVolumeCurves->clearCurrentVolumeIndex(stream);
}
- mVolumeCurves->addCurrentVolumeIndex(stream, device, index);
+
+ // update other private stream volumes which follow this one
+ for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+ if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+ continue;
+ }
+ mVolumeCurves->addCurrentVolumeIndex((audio_stream_type_t)curStream, device, index);
+ }
// update volume on all outputs whose current device is also selected by the same
// strategy as the device specified by the caller
- audio_devices_t selectedDevices = getDeviceForStrategy(getStrategy(stream), true /*fromCache*/);
- // it is possible that the requested device is not selected by the strategy (e.g an explicit
- // audio patch is active causing getDevicesForStream() to return this device. We must make
- // sure that the device passed is part of the devices considered when applying volume below.
- selectedDevices |= device;
-
- //FIXME: AUDIO_STREAM_ACCESSIBILITY volume follows AUDIO_STREAM_MUSIC for now
- audio_devices_t accessibilityDevice = AUDIO_DEVICE_NONE;
- if (stream == AUDIO_STREAM_MUSIC) {
- mVolumeCurves->addCurrentVolumeIndex(AUDIO_STREAM_ACCESSIBILITY, device, index);
- accessibilityDevice = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, true /*fromCache*/);
- }
-
status_t status = NO_ERROR;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
- if ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & selectedDevices) != 0)) {
- status_t volStatus = checkAndSetVolume(stream, index, desc, curDevice);
- if (volStatus != NO_ERROR) {
- status = volStatus;
+ for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+ if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+ continue;
}
- }
- if ((accessibilityDevice != AUDIO_DEVICE_NONE) &&
- ((device == AUDIO_DEVICE_OUT_DEFAULT) || ((curDevice & accessibilityDevice) != 0)))
- {
- status_t volStatus = checkAndSetVolume(AUDIO_STREAM_ACCESSIBILITY,
- index, desc, curDevice);
+ routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
+ audio_devices_t curStreamDevice = getDeviceForStrategy(curStrategy, true /*fromCache*/);
+ // it is possible that the requested device is not selected by the strategy
+ // (e.g an explicit audio patch is active causing getDevicesForStream()
+ // to return this device. We must make sure that the device passed is part of the
+ // devices considered when applying volume below.
+ curStreamDevice |= device;
+
+ if (((device == AUDIO_DEVICE_OUT_DEFAULT) ||
+ ((curDevice & curStreamDevice) != 0))) {
+ status_t volStatus =
+ checkAndSetVolume((audio_stream_type_t)curStream, index, desc, curDevice);
+ if (volStatus != NO_ERROR) {
+ status = volStatus;
+ }
+ }
}
}
return status;
@@ -1952,7 +1966,14 @@
bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
{
- return mOutputs.isStreamActive(stream, inPastMs);
+ bool active = false;
+ for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT && !active; curStream++) {
+ if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+ continue;
+ }
+ active = mOutputs.isStreamActive((audio_stream_type_t)curStream, inPastMs);
+ }
+ return active;
}
bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
@@ -1996,94 +2017,161 @@
status_t AudioPolicyManager::registerPolicyMixes(Vector<AudioMix> mixes)
{
- sp<HwModule> module;
- for (size_t i = 0; i < mHwModules.size(); i++) {
- if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[i]->mName) == 0 &&
- mHwModules[i]->mHandle != 0) {
- module = mHwModules[i];
+ ALOGV("registerPolicyMixes() %zu mix(es)", mixes.size());
+ status_t res = NO_ERROR;
+
+ sp<HwModule> rSubmixModule;
+ // examine each mix's route type
+ for (size_t i = 0; i < mixes.size(); i++) {
+ // we only support MIX_ROUTE_FLAG_LOOP_BACK or MIX_ROUTE_FLAG_RENDER, not the combination
+ if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_ALL) == MIX_ROUTE_FLAG_ALL) {
+ res = INVALID_OPERATION;
break;
}
- }
+ if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
+ // Loop back through "remote submix"
+ if (rSubmixModule == 0) {
+ for (size_t j = 0; i < mHwModules.size(); j++) {
+ if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
+ && mHwModules[j]->mHandle != 0) {
+ rSubmixModule = mHwModules[j];
+ break;
+ }
+ }
+ }
- if (module == 0) {
- return INVALID_OPERATION;
- }
+ ALOGV("registerPolicyMixes() mix %zu of %zu is LOOP_BACK", i, mixes.size());
- ALOGV("registerPolicyMixes() num mixes %zu", mixes.size());
+ if (rSubmixModule == 0) {
+ ALOGE(" Unable to find audio module for submix, aborting mix %zu registration", i);
+ res = INVALID_OPERATION;
+ break;
+ }
- for (size_t i = 0; i < mixes.size(); i++) {
- String8 address = mixes[i].mRegistrationId;
+ String8 address = mixes[i].mDeviceAddress;
- if (mPolicyMixes.registerMix(address, mixes[i]) != NO_ERROR) {
- continue;
- }
- audio_config_t outputConfig = mixes[i].mFormat;
- audio_config_t inputConfig = mixes[i].mFormat;
- // NOTE: audio flinger mixer does not support mono output: configure remote submix HAL in
- // stereo and let audio flinger do the channel conversion if needed.
- outputConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
- inputConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
- module->addOutputProfile(address, &outputConfig,
- AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address);
- module->addInputProfile(address, &inputConfig,
- AUDIO_DEVICE_IN_REMOTE_SUBMIX, address);
+ if (mPolicyMixes.registerMix(address, mixes[i], 0 /*output desc*/) != NO_ERROR) {
+ ALOGE(" Error registering mix %zu for address %s", i, address.string());
+ res = INVALID_OPERATION;
+ break;
+ }
+ audio_config_t outputConfig = mixes[i].mFormat;
+ audio_config_t inputConfig = mixes[i].mFormat;
+ // NOTE: audio flinger mixer does not support mono output: configure remote submix HAL in
+ // stereo and let audio flinger do the channel conversion if needed.
+ outputConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ inputConfig.channel_mask = AUDIO_CHANNEL_IN_STEREO;
+ rSubmixModule->addOutputProfile(address, &outputConfig,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address);
+ rSubmixModule->addInputProfile(address, &inputConfig,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, address);
- if (mixes[i].mMixType == MIX_TYPE_PLAYERS) {
- setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
- AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
- address.string(), "remote-submix");
- } else {
- setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
- AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
- address.string(), "remote-submix");
+ if (mixes[i].mMixType == MIX_TYPE_PLAYERS) {
+ setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+ AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ address.string(), "remote-submix");
+ } else {
+ setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ AUDIO_POLICY_DEVICE_STATE_AVAILABLE,
+ address.string(), "remote-submix");
+ }
+ } else if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
+ String8 address = mixes[i].mDeviceAddress;
+ audio_devices_t device = mixes[i].mDeviceType;
+ ALOGV(" registerPolicyMixes() mix %zu of %zu is RENDER, dev=0x%X addr=%s",
+ i, mixes.size(), device, address.string());
+
+ bool foundOutput = false;
+ for (size_t j = 0 ; j < mOutputs.size() ; j++) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(j);
+ sp<AudioPatch> patch = mAudioPatches.valueFor(desc->getPatchHandle());
+ if ((patch != 0) && (patch->mPatch.num_sinks != 0)
+ && (patch->mPatch.sinks[0].type == AUDIO_PORT_TYPE_DEVICE)
+ && (patch->mPatch.sinks[0].ext.device.type == device)
+ && (strncmp(patch->mPatch.sinks[0].ext.device.address, address.string(),
+ AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0)) {
+
+ if (mPolicyMixes.registerMix(address, mixes[i], desc) != NO_ERROR) {
+ res = INVALID_OPERATION;
+ } else {
+ foundOutput = true;
+ }
+ break;
+ }
+ }
+
+ if (res != NO_ERROR) {
+ ALOGE(" Error registering mix %zu for device 0x%X addr %s",
+ i, device, address.string());
+ res = INVALID_OPERATION;
+ break;
+ } else if (!foundOutput) {
+ ALOGE(" Output not found for mix %zu for device 0x%X addr %s",
+ i, device, address.string());
+ res = INVALID_OPERATION;
+ break;
+ }
}
}
- return NO_ERROR;
+ if (res != NO_ERROR) {
+ unregisterPolicyMixes(mixes);
+ }
+ return res;
}
status_t AudioPolicyManager::unregisterPolicyMixes(Vector<AudioMix> mixes)
{
- sp<HwModule> module;
- for (size_t i = 0; i < mHwModules.size(); i++) {
- if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[i]->mName) == 0 &&
- mHwModules[i]->mHandle != 0) {
- module = mHwModules[i];
- break;
- }
- }
-
- if (module == 0) {
- return INVALID_OPERATION;
- }
-
ALOGV("unregisterPolicyMixes() num mixes %zu", mixes.size());
-
+ status_t res = NO_ERROR;
+ sp<HwModule> rSubmixModule;
+ // examine each mix's route type
for (size_t i = 0; i < mixes.size(); i++) {
- String8 address = mixes[i].mRegistrationId;
+ if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
- if (mPolicyMixes.unregisterMix(address) != NO_ERROR) {
- continue;
- }
+ if (rSubmixModule == 0) {
+ for (size_t j = 0; i < mHwModules.size(); j++) {
+ if (strcmp(AUDIO_HARDWARE_MODULE_ID_REMOTE_SUBMIX, mHwModules[j]->mName) == 0
+ && mHwModules[j]->mHandle != 0) {
+ rSubmixModule = mHwModules[j];
+ break;
+ }
+ }
+ }
+ if (rSubmixModule == 0) {
+ res = INVALID_OPERATION;
+ continue;
+ }
- if (getDeviceConnectionState(AUDIO_DEVICE_IN_REMOTE_SUBMIX, address.string()) ==
- AUDIO_POLICY_DEVICE_STATE_AVAILABLE)
- {
- setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
- AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- address.string(), "remote-submix");
- }
+ String8 address = mixes[i].mDeviceAddress;
- if (getDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address.string()) ==
- AUDIO_POLICY_DEVICE_STATE_AVAILABLE)
- {
- setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
- AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- address.string(), "remote-submix");
+ if (mPolicyMixes.unregisterMix(address) != NO_ERROR) {
+ res = INVALID_OPERATION;
+ continue;
+ }
+
+ if (getDeviceConnectionState(AUDIO_DEVICE_IN_REMOTE_SUBMIX, address.string()) ==
+ AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
+ setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
+ AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ address.string(), "remote-submix");
+ }
+ if (getDeviceConnectionState(AUDIO_DEVICE_OUT_REMOTE_SUBMIX, address.string()) ==
+ AUDIO_POLICY_DEVICE_STATE_AVAILABLE) {
+ setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
+ address.string(), "remote-submix");
+ }
+ rSubmixModule->removeOutputProfile(address);
+ rSubmixModule->removeInputProfile(address);
+
+ } if ((mixes[i].mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER) {
+ if (mPolicyMixes.unregisterMix(mixes[i].mDeviceAddress) != NO_ERROR) {
+ res = INVALID_OPERATION;
+ continue;
+ }
}
- module->removeOutputProfile(address);
- module->removeInputProfile(address);
}
- return NO_ERROR;
+ return res;
}
@@ -2115,6 +2203,9 @@
snprintf(buffer, SIZE, " Force use for hdmi system audio %d\n",
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_HDMI_SYSTEM_AUDIO));
result.append(buffer);
+ snprintf(buffer, SIZE, " Force use for encoded surround output %d\n",
+ mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND));
+ result.append(buffer);
snprintf(buffer, SIZE, " TTS output %s\n", mTtsOutputAvailable ? "available" : "not available");
result.append(buffer);
snprintf(buffer, SIZE, " Master mono: %s\n", mMasterMono ? "on" : "off");
@@ -2166,15 +2257,6 @@
return false;
}
- // Check if streaming is off, then only allow offload as of now.
- // This is a temporary work around until the root cause is fixed in offload
- // playback path.
- if (offloadInfo.is_streaming)
- {
- ALOGV("isOffloadSupported: is_streaming == true, returning false");
- return false;
- }
-
//TODO: enable audio offloading with video when ready
const bool allowOffloadWithVideo =
property_get_bool("audio.offload.video", false /* default_value */);
@@ -2713,10 +2795,7 @@
// invalidate all tracks in this strategy to force re connection.
// Otherwise select new device on the output mix.
if (outputs.indexOf(mOutputs.keyAt(j)) < 0) {
- for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
- if (stream == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
if (getStrategy((audio_stream_type_t)stream) == strategy) {
mpClientInterface->invalidateStream((audio_stream_type_t)stream);
}
@@ -2784,8 +2863,8 @@
audio_io_handle_t *ioHandle,
audio_devices_t *device)
{
- *session = (audio_session_t)mpClientInterface->newAudioUniqueId();
- *ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId();
+ *session = (audio_session_t)mpClientInterface->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ *ioHandle = (audio_io_handle_t)mpClientInterface->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_INPUT);
*device = getDeviceAndMixForInputSource(AUDIO_SOURCE_HOTWORD);
return mSoundTriggerSessions.acquireSession(*session, *ioHandle);
@@ -2839,7 +2918,7 @@
disconnectAudioSource(sourceDesc);
routing_strategy strategy = (routing_strategy) getStrategyForAttr(&sourceDesc->mAttributes);
- audio_stream_type_t stream = audio_attributes_to_stream_type(&sourceDesc->mAttributes);
+ audio_stream_type_t stream = streamTypefromAttributesInt(&sourceDesc->mAttributes);
sp<DeviceDescriptor> srcDeviceDesc = sourceDesc->mDevice;
audio_devices_t sinkDevice = getDeviceForStrategy(strategy, true);
@@ -2972,7 +3051,7 @@
}
removeAudioPatch(sourceDesc->mPatchDesc->mHandle);
- audio_stream_type_t stream = audio_attributes_to_stream_type(&sourceDesc->mAttributes);
+ audio_stream_type_t stream = streamTypefromAttributesInt(&sourceDesc->mAttributes);
sp<SwAudioOutputDescriptor> swOutputDesc = sourceDesc->mSwOutput.promote();
if (swOutputDesc != 0) {
stopSource(swOutputDesc, stream, false);
@@ -3606,6 +3685,7 @@
output = AUDIO_IO_HANDLE_NONE;
} else if (profile->hasDynamicAudioProfile()) {
mpClientInterface->closeOutput(output);
+ output = AUDIO_IO_HANDLE_NONE;
profile->pickAudioProfile(config.sample_rate, config.channel_mask, config.format);
config.offload_info.sample_rate = config.sample_rate;
config.offload_info.channel_mask = config.channel_mask;
@@ -3939,7 +4019,7 @@
nextAudioPortGeneration();
- ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+ ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
if (index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
@@ -3968,7 +4048,7 @@
nextAudioPortGeneration();
- ssize_t index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+ ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
@@ -4075,10 +4155,7 @@
}
}
// Move tracks associated to this strategy from previous output to new output
- for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
- if (i == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int i = 0; i < AUDIO_STREAM_FOR_POLICY_CNT; i++) {
if (getStrategy((audio_stream_type_t)i) == strategy) {
mpClientInterface->invalidateStream((audio_stream_type_t)i);
}
@@ -4153,12 +4230,12 @@
{
audio_devices_t device = AUDIO_DEVICE_NONE;
- ssize_t index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+ ssize_t index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
if (index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
if (patchDesc->mUid != mUidCached) {
ALOGV("getNewOutputDevice() device %08x forced by patch %d",
- outputDesc->device(), outputDesc->mPatchHandle);
+ outputDesc->device(), outputDesc->getPatchHandle());
return outputDesc->device();
}
}
@@ -4170,10 +4247,10 @@
// use device for strategy phone
// 3: the strategy for enforced audible is active but not enforced on the output:
// use the device for strategy enforced audible
- // 4: the strategy accessibility is active on the output:
- // use device for strategy accessibility
- // 5: the strategy sonification is active on the output:
+ // 4: the strategy sonification is active on the output:
// use device for strategy sonification
+ // 5: the strategy accessibility is active on the output:
+ // use device for strategy accessibility
// 6: the strategy "respectful" sonification is active on the output:
// use device for strategy "respectful" sonification
// 7: the strategy media is active on the output:
@@ -4190,10 +4267,10 @@
device = getDeviceForStrategy(STRATEGY_PHONE, fromCache);
} else if (isStrategyActive(outputDesc, STRATEGY_ENFORCED_AUDIBLE)) {
device = getDeviceForStrategy(STRATEGY_ENFORCED_AUDIBLE, fromCache);
- } else if (isStrategyActive(outputDesc, STRATEGY_ACCESSIBILITY)) {
- device = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, fromCache);
} else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION)) {
device = getDeviceForStrategy(STRATEGY_SONIFICATION, fromCache);
+ } else if (isStrategyActive(outputDesc, STRATEGY_ACCESSIBILITY)) {
+ device = getDeviceForStrategy(STRATEGY_ACCESSIBILITY, fromCache);
} else if (isStrategyActive(outputDesc, STRATEGY_SONIFICATION_RESPECTFUL)) {
device = getDeviceForStrategy(STRATEGY_SONIFICATION_RESPECTFUL, fromCache);
} else if (isStrategyActive(outputDesc, STRATEGY_MEDIA)) {
@@ -4214,12 +4291,12 @@
{
sp<AudioInputDescriptor> inputDesc = mInputs.valueFor(input);
- ssize_t index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+ ssize_t index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
if (index >= 0) {
sp<AudioPatch> patchDesc = mAudioPatches.valueAt(index);
if (patchDesc->mUid != mUidCached) {
ALOGV("getNewInputDevice() device %08x forced by patch %d",
- inputDesc->mDevice, inputDesc->mPatchHandle);
+ inputDesc->mDevice, inputDesc->getPatchHandle());
return inputDesc->mDevice;
}
}
@@ -4229,6 +4306,13 @@
return device;
}
+bool AudioPolicyManager::streamsMatchForvolume(audio_stream_type_t stream1,
+ audio_stream_type_t stream2) {
+ return ((stream1 == stream2) ||
+ ((stream1 == AUDIO_STREAM_ACCESSIBILITY) && (stream2 == AUDIO_STREAM_MUSIC)) ||
+ ((stream1 == AUDIO_STREAM_MUSIC) && (stream2 == AUDIO_STREAM_ACCESSIBILITY)));
+}
+
uint32_t AudioPolicyManager::getStrategyForStream(audio_stream_type_t stream) {
return (uint32_t)getStrategy(stream);
}
@@ -4240,16 +4324,22 @@
if (stream < (audio_stream_type_t) 0 || stream >= AUDIO_STREAM_PUBLIC_CNT) {
return AUDIO_DEVICE_NONE;
}
- audio_devices_t devices;
- routing_strategy strategy = getStrategy(stream);
- devices = getDeviceForStrategy(strategy, true /*fromCache*/);
- SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(devices, mOutputs);
- for (size_t i = 0; i < outputs.size(); i++) {
- sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
- if (isStrategyActive(outputDesc, strategy)) {
- devices = outputDesc->device();
- break;
+ audio_devices_t devices = AUDIO_DEVICE_NONE;
+ for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
+ if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
+ continue;
}
+ routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
+ audio_devices_t curDevices =
+ getDeviceForStrategy((routing_strategy)curStrategy, true /*fromCache*/);
+ SortedVector<audio_io_handle_t> outputs = getOutputsForDevice(curDevices, mOutputs);
+ for (size_t i = 0; i < outputs.size(); i++) {
+ sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(outputs[i]);
+ if (outputDesc->isStreamActive((audio_stream_type_t)curStream)) {
+ curDevices |= outputDesc->device();
+ }
+ }
+ devices |= curDevices;
}
/*Filter SPEAKER_SAFE out of results, as AudioService doesn't know about it
@@ -4361,15 +4451,8 @@
// the device = the device from the descriptor in the RouteMap, and exit.
for (size_t routeIndex = 0; routeIndex < mOutputRoutes.size(); routeIndex++) {
sp<SessionRoute> route = mOutputRoutes.valueAt(routeIndex);
- routing_strategy strat = getStrategy(route->mStreamType);
- // Special case for accessibility strategy which must follow any strategy it is
- // currently remapped to
- bool strategyMatch = (strat == strategy) ||
- ((strategy == STRATEGY_ACCESSIBILITY) &&
- ((mEngine->getStrategyForUsage(
- AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY) == strat) ||
- (strat == STRATEGY_MEDIA)));
- if (strategyMatch && route->isActive()) {
+ routing_strategy routeStrategy = getStrategy(route->mStreamType);
+ if ((routeStrategy == strategy) && route->isActive()) {
return route->mDeviceDescriptor->type();
}
}
@@ -4513,7 +4596,7 @@
// Doing this check here allows the caller to call setOutputDevice() without conditions
if ((device == AUDIO_DEVICE_NONE || device == prevDevice) &&
!force &&
- outputDesc->mPatchHandle != 0) {
+ outputDesc->getPatchHandle() != 0) {
ALOGV("setOutputDevice() setting same device 0x%04x or null device", device);
return muteWaitMs;
}
@@ -4540,7 +4623,7 @@
if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
index = mAudioPatches.indexOfKey(*patchHandle);
} else {
- index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+ index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
}
sp< AudioPatch> patchDesc;
audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
@@ -4566,7 +4649,7 @@
if (patchHandle) {
*patchHandle = patchDesc->mHandle;
}
- outputDesc->mPatchHandle = patchDesc->mHandle;
+ outputDesc->setPatchHandle(patchDesc->mHandle);
nextAudioPortGeneration();
mpClientInterface->onAudioPatchListUpdate();
}
@@ -4601,7 +4684,7 @@
if (patchHandle) {
index = mAudioPatches.indexOfKey(*patchHandle);
} else {
- index = mAudioPatches.indexOfKey(outputDesc->mPatchHandle);
+ index = mAudioPatches.indexOfKey(outputDesc->getPatchHandle());
}
if (index < 0) {
return INVALID_OPERATION;
@@ -4609,7 +4692,7 @@
sp< AudioPatch> patchDesc = mAudioPatches.valueAt(index);
status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, delayMs);
ALOGV("resetOutputDevice() releaseAudioPatch returned %d", status);
- outputDesc->mPatchHandle = 0;
+ outputDesc->setPatchHandle(0);
removeAudioPatch(patchDesc->mHandle);
nextAudioPortGeneration();
mpClientInterface->onAudioPatchListUpdate();
@@ -4645,7 +4728,7 @@
if (patchHandle && *patchHandle != AUDIO_PATCH_HANDLE_NONE) {
index = mAudioPatches.indexOfKey(*patchHandle);
} else {
- index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+ index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
}
sp< AudioPatch> patchDesc;
audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
@@ -4670,7 +4753,7 @@
if (patchHandle) {
*patchHandle = patchDesc->mHandle;
}
- inputDesc->mPatchHandle = patchDesc->mHandle;
+ inputDesc->setPatchHandle(patchDesc->mHandle);
nextAudioPortGeneration();
mpClientInterface->onAudioPatchListUpdate();
}
@@ -4687,7 +4770,7 @@
if (patchHandle) {
index = mAudioPatches.indexOfKey(*patchHandle);
} else {
- index = mAudioPatches.indexOfKey(inputDesc->mPatchHandle);
+ index = mAudioPatches.indexOfKey(inputDesc->getPatchHandle());
}
if (index < 0) {
return INVALID_OPERATION;
@@ -4695,7 +4778,7 @@
sp< AudioPatch> patchDesc = mAudioPatches.valueAt(index);
status_t status = mpClientInterface->releaseAudioPatch(patchDesc->mAfPatchHandle, 0);
ALOGV("resetInputDevice() releaseAudioPatch returned %d", status);
- inputDesc->mPatchHandle = 0;
+ inputDesc->setPatchHandle(0);
removeAudioPatch(patchDesc->mHandle);
nextAudioPortGeneration();
mpClientInterface->onAudioPatchListUpdate();
@@ -4869,10 +4952,7 @@
{
ALOGVV("applyStreamVolumes() for device %08x", device);
- for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
- if (stream == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
checkAndSetVolume((audio_stream_type_t)stream,
mVolumeCurves->getVolumeIndex((audio_stream_type_t)stream, device),
outputDesc,
@@ -4890,10 +4970,7 @@
{
ALOGVV("setStrategyMute() strategy %d, mute %d, output ID %d",
strategy, on, outputDesc->getId());
- for (int stream = 0; stream < AUDIO_STREAM_CNT; stream++) {
- if (stream == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
if (getStrategy((audio_stream_type_t)stream) == strategy) {
setStreamMute((audio_stream_type_t)stream, on, outputDesc, delayMs, device);
}
@@ -5007,15 +5084,6 @@
case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
return AUDIO_STREAM_MUSIC;
case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
- if (isStreamActive(AUDIO_STREAM_ALARM)) {
- return AUDIO_STREAM_ALARM;
- }
- if (isStreamActive(AUDIO_STREAM_RING)) {
- return AUDIO_STREAM_RING;
- }
- if (isInCall()) {
- return AUDIO_STREAM_VOICE_CALL;
- }
return AUDIO_STREAM_ACCESSIBILITY;
case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
return AUDIO_STREAM_SYSTEM;
@@ -5082,10 +5150,7 @@
if ((sysTime == 0) && (inPastMs != 0)) {
sysTime = systemTime();
}
- for (int i = 0; i < (int)AUDIO_STREAM_CNT; i++) {
- if (i == AUDIO_STREAM_PATCH) {
- continue;
- }
+ for (int i = 0; i < (int)AUDIO_STREAM_FOR_POLICY_CNT; i++) {
if (((getStrategy((audio_stream_type_t)i) == strategy) ||
(NUM_STRATEGIES == strategy)) &&
outputDesc->isStreamActive((audio_stream_type_t)i, inPastMs, sysTime)) {
@@ -5144,6 +5209,103 @@
}
}
+// Modify the list of surround sound formats supported.
+void AudioPolicyManager::filterSurroundFormats(FormatVector &formats) {
+ // TODO Change the ALOGIs to ALOGVs in this function after the feature is verified.
+
+ // TODO Set this based on Config properties.
+ const bool alwaysForceAC3 = true;
+
+ audio_policy_forced_cfg_t forceUse = mEngine->getForceUse(
+ AUDIO_POLICY_FORCE_FOR_ENCODED_SURROUND);
+ ALOGI("%s: forced use = %d", __FUNCTION__, forceUse);
+
+ // Analyze original support for various formats.
+ bool supportsAC3 = false;
+ bool supportsOtherSurround = false;
+ bool supportsIEC61937 = false;
+ for (size_t formatIndex = 0; formatIndex < formats.size(); formatIndex++) {
+ audio_format_t format = formats[formatIndex];
+ ALOGI("%s: original formats: 0x%08x", __FUNCTION__, format);
+ switch (format) {
+ case AUDIO_FORMAT_AC3:
+ supportsAC3 = true;
+ break;
+ case AUDIO_FORMAT_E_AC3:
+ case AUDIO_FORMAT_DTS:
+ case AUDIO_FORMAT_DTS_HD:
+ supportsOtherSurround = true;
+ break;
+ case AUDIO_FORMAT_IEC61937:
+ supportsIEC61937 = true;
+ break;
+ default:
+ break;
+ }
+ }
+ ALOGI("%s: original, supportsAC3 = %d, supportsOtherSurround = %d, supportsIEC61937 = %d",
+ __FUNCTION__, supportsAC3, supportsOtherSurround, supportsIEC61937);
+
+ // Modify formats based on surround preferences.
+ // If NEVER, remove support for surround formats.
+ if (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_NEVER) {
+ if (supportsAC3 || supportsOtherSurround || supportsIEC61937) {
+ // Remove surround sound related formats.
+ for (size_t formatIndex = 0; formatIndex < formats.size(); ) {
+ audio_format_t format = formats[formatIndex];
+ switch(format) {
+ case AUDIO_FORMAT_AC3:
+ case AUDIO_FORMAT_E_AC3:
+ case AUDIO_FORMAT_DTS:
+ case AUDIO_FORMAT_DTS_HD:
+ case AUDIO_FORMAT_IEC61937:
+ ALOGI("%s: remove format 0x%08x", __FUNCTION__, format);
+ formats.removeAt(formatIndex);
+ break;
+ default:
+ formatIndex++; // keep it
+ break;
+ }
+ }
+ supportsAC3 = false;
+ supportsOtherSurround = false;
+ supportsIEC61937 = false;
+ }
+ } else { // AUTO or ALWAYS
+ // Most TVs support AC3 even if they do not report it in the EDID.
+ if ((alwaysForceAC3 || (forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS))
+ && !supportsAC3) {
+ formats.add(AUDIO_FORMAT_AC3);
+ supportsAC3 = true;
+ }
+
+ // If ALWAYS, add support for raw surround formats if all are missing.
+ // This assumes that if any of these formats are reported by the HAL
+ // then the report is valid and should not be modified.
+ if ((forceUse == AUDIO_POLICY_FORCE_ENCODED_SURROUND_ALWAYS)
+ && !supportsOtherSurround) {
+ formats.add(AUDIO_FORMAT_E_AC3);
+ formats.add(AUDIO_FORMAT_DTS);
+ formats.add(AUDIO_FORMAT_DTS_HD);
+ supportsOtherSurround = true;
+ }
+
+ // Add support for IEC61937 if any raw surround supported.
+ // The HAL could do this but add it here, just in case.
+ if ((supportsAC3 || supportsOtherSurround) && !supportsIEC61937) {
+ formats.add(AUDIO_FORMAT_IEC61937);
+ supportsIEC61937 = true;
+ }
+ }
+ // Just for debugging.
+ for (size_t formatIndex = 0; formatIndex < formats.size(); formatIndex++) {
+ audio_format_t format = formats[formatIndex];
+ ALOGI("%s: final formats: 0x%08x", __FUNCTION__, format);
+ }
+ ALOGI("%s: final, supportsAC3 = %d, supportsOtherSurround = %d, supportsIEC61937 = %d",
+ __FUNCTION__, supportsAC3, supportsOtherSurround, supportsIEC61937);
+}
+
void AudioPolicyManager::updateAudioProfiles(audio_io_handle_t ioHandle,
AudioProfileVector &profiles)
{
@@ -5153,14 +5315,16 @@
if (profiles.hasDynamicFormat()) {
reply = mpClientInterface->getParameters(ioHandle,
String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS));
- ALOGV("%s: supported formats %s", __FUNCTION__, reply.string());
+ ALOGI("%s: supported formats %s", __FUNCTION__, reply.string());
AudioParameter repliedParameters(reply);
if (repliedParameters.get(
String8(AUDIO_PARAMETER_STREAM_SUP_FORMATS), reply) != NO_ERROR) {
ALOGE("%s: failed to retrieve format, bailing out", __FUNCTION__);
return;
}
- profiles.setFormats(formatsFromString(reply.string()));
+ FormatVector formats = formatsFromString(reply.string());
+ filterSurroundFormats(formats);
+ profiles.setFormats(formats);
}
const FormatVector &supportedFormats = profiles.getSupportedFormats();
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index fb9b46b..1ef896f 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -506,6 +506,9 @@
void clearAudioSources(uid_t uid);
+ static bool streamsMatchForvolume(audio_stream_type_t stream1,
+ audio_stream_type_t stream2);
+
uid_t mUidCached;
AudioPolicyClientInterface *mpClientInterface; // audio policy client interface
sp<SwAudioOutputDescriptor> mPrimaryOutput; // primary output descriptor
@@ -582,6 +585,9 @@
// Audio Policy Engine Interface.
AudioPolicyManagerInterface *mEngine;
private:
+ // Add or remove AC3 DTS encodings based on user preferences.
+ void filterSurroundFormats(FormatVector &formats);
+
// If any, resolve any "dynamic" fields of an Audio Profiles collection
void updateAudioProfiles(audio_io_handle_t ioHandle, AudioProfileVector &profiles);
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index ce6b2dc..08f9cc1 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -171,7 +171,7 @@
return mAudioPolicyService->setVoiceVolume(volume, delay_ms);
}
-status_t AudioPolicyService::AudioPolicyClient::moveEffects(int session,
+status_t AudioPolicyService::AudioPolicyClient::moveEffects(audio_session_t session,
audio_io_handle_t src_output,
audio_io_handle_t dst_output)
{
@@ -221,15 +221,16 @@
void AudioPolicyService::AudioPolicyClient::onRecordingConfigurationUpdate(
int event, audio_session_t session, audio_source_t source,
- const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig)
+ const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
+ audio_patch_handle_t patchHandle)
{
mAudioPolicyService->onRecordingConfigurationUpdate(event, session, source,
- clientConfig, deviceConfig);
+ clientConfig, deviceConfig, patchHandle);
}
-audio_unique_id_t AudioPolicyService::AudioPolicyClient::newAudioUniqueId()
+audio_unique_id_t AudioPolicyService::AudioPolicyClient::newAudioUniqueId(audio_unique_id_use_t use)
{
- return AudioSystem::newAudioUniqueId();
+ return AudioSystem::newAudioUniqueId(use);
}
}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
index a79f8ae..580d740 100644
--- a/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImplLegacy.cpp
@@ -252,7 +252,7 @@
return af->invalidateStream(stream);
}
-int aps_move_effects(void *service __unused, int session,
+int aps_move_effects(void *service __unused, audio_session_t session,
audio_io_handle_t src_output,
audio_io_handle_t dst_output)
{
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index ce77814..b732b20 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -79,7 +79,7 @@
status_t AudioPolicyEffects::addInputEffects(audio_io_handle_t input,
audio_source_t inputSource,
- int audioSession)
+ audio_session_t audioSession)
{
status_t status = NO_ERROR;
@@ -152,7 +152,7 @@
return status;
}
-status_t AudioPolicyEffects::queryDefaultInputEffects(int audioSession,
+status_t AudioPolicyEffects::queryDefaultInputEffects(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count)
{
@@ -185,7 +185,7 @@
}
-status_t AudioPolicyEffects::queryDefaultOutputSessionEffects(int audioSession,
+status_t AudioPolicyEffects::queryDefaultOutputSessionEffects(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count)
{
@@ -220,7 +220,7 @@
status_t AudioPolicyEffects::addOutputSessionEffects(audio_io_handle_t output,
audio_stream_type_t stream,
- int audioSession)
+ audio_session_t audioSession)
{
status_t status = NO_ERROR;
@@ -275,7 +275,7 @@
status_t AudioPolicyEffects::releaseOutputSessionEffects(audio_io_handle_t output,
audio_stream_type_t stream,
- int audioSession)
+ audio_session_t audioSession)
{
status_t status = NO_ERROR;
(void) output; // argument not used for now
diff --git a/services/audiopolicy/service/AudioPolicyEffects.h b/services/audiopolicy/service/AudioPolicyEffects.h
index 266a45e..ee9bd50 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.h
+++ b/services/audiopolicy/service/AudioPolicyEffects.h
@@ -51,7 +51,7 @@
// Return a list of effect descriptors for default input effects
// associated with audioSession
- status_t queryDefaultInputEffects(int audioSession,
+ status_t queryDefaultInputEffects(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count);
@@ -59,7 +59,7 @@
// Effects are attached depending on the audio_source_t
status_t addInputEffects(audio_io_handle_t input,
audio_source_t inputSource,
- int audioSession);
+ audio_session_t audioSession);
// Add all input effects associated to this input
status_t releaseInputEffects(audio_io_handle_t input);
@@ -67,7 +67,7 @@
// Return a list of effect descriptors for default output effects
// associated with audioSession
- status_t queryDefaultOutputSessionEffects(int audioSession,
+ status_t queryDefaultOutputSessionEffects(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count);
@@ -75,12 +75,12 @@
// Effects are attached depending on the audio_stream_type_t
status_t addOutputSessionEffects(audio_io_handle_t output,
audio_stream_type_t stream,
- int audioSession);
+ audio_session_t audioSession);
// release all output effects associated with this output stream and audiosession
status_t releaseOutputSessionEffects(audio_io_handle_t output,
audio_stream_type_t stream,
- int audioSession);
+ audio_session_t audioSession);
private:
@@ -135,13 +135,13 @@
// class to store voctor of AudioEffects
class EffectVector {
public:
- EffectVector(int session) : mSessionId(session), mRefCount(0) {}
+ EffectVector(audio_session_t session) : mSessionId(session), mRefCount(0) {}
/*virtual*/ ~EffectVector() {}
// Enable or disable all effects in effect vector
void setProcessorEnabled(bool enabled);
- const int mSessionId;
+ const audio_session_t mSessionId;
// AudioPolicyManager keeps mLock, no need for lock on reference count here
int mRefCount;
Vector< sp<AudioEffect> >mEffects;
@@ -188,7 +188,7 @@
// Automatic output effects are organized per audio_stream_type_t
KeyedVector< audio_stream_type_t, EffectDescVector* > mOutputStreams;
// Automatic output effects are unique for audiosession ID
- KeyedVector< int32_t, EffectVector* > mOutputSessions;
+ KeyedVector< audio_session_t, EffectVector* > mOutputSessions;
};
}; // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index fdd6dd2..92a1285 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -476,7 +476,7 @@
status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
uint32_t strategy,
- int session,
+ audio_session_t session,
int id)
{
if (mAudioPolicyManager == NULL) {
@@ -537,7 +537,7 @@
return mAudioPolicyManager->isSourceActive(source);
}
-status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
+status_t AudioPolicyService::queryDefaultPreProcessing(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count)
{
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
index 08b2a3b..c830454 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImplLegacy.cpp
@@ -427,7 +427,7 @@
status_t AudioPolicyService::registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
uint32_t strategy,
- int session,
+ audio_session_t session,
int id)
{
if (mpAudioPolicy == NULL) {
@@ -488,7 +488,7 @@
return mpAudioPolicy->is_source_active(mpAudioPolicy, source);
}
-status_t AudioPolicyService::queryDefaultPreProcessing(int audioSession,
+status_t AudioPolicyService::queryDefaultPreProcessing(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count)
{
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 45f260a..a6cd50e 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -240,20 +240,20 @@
void AudioPolicyService::onRecordingConfigurationUpdate(int event, audio_session_t session,
audio_source_t source, const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig)
+ const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle)
{
mOutputCommandThread->recordingConfigurationUpdateCommand(event, session, source,
- clientConfig, deviceConfig);
+ clientConfig, deviceConfig, patchHandle);
}
void AudioPolicyService::doOnRecordingConfigurationUpdate(int event, audio_session_t session,
audio_source_t source, const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig)
+ const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle)
{
Mutex::Autolock _l(mNotificationClientsLock);
for (size_t i = 0; i < mNotificationClients.size(); i++) {
mNotificationClients.valueAt(i)->onRecordingConfigurationUpdate(event, session, source,
- clientConfig, deviceConfig);
+ clientConfig, deviceConfig, patchHandle);
}
}
@@ -321,11 +321,12 @@
void AudioPolicyService::NotificationClient::onRecordingConfigurationUpdate(
int event, audio_session_t session, audio_source_t source,
- const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig)
+ const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
+ audio_patch_handle_t patchHandle)
{
if (mAudioPolicyServiceClient != 0) {
mAudioPolicyServiceClient->onRecordingConfigurationUpdate(event, session, source,
- clientConfig, deviceConfig);
+ clientConfig, deviceConfig, patchHandle);
}
}
@@ -607,7 +608,8 @@
}
mLock.unlock();
svc->doOnRecordingConfigurationUpdate(data->mEvent, data->mSession,
- data->mSource, &data->mClientConfig, &data->mDeviceConfig);
+ data->mSource, &data->mClientConfig, &data->mDeviceConfig,
+ data->mPatchHandle);
mLock.lock();
} break;
default:
@@ -872,7 +874,8 @@
void AudioPolicyService::AudioCommandThread::recordingConfigurationUpdateCommand(
int event, audio_session_t session, audio_source_t source,
- const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig)
+ const audio_config_base_t *clientConfig, const audio_config_base_t *deviceConfig,
+ audio_patch_handle_t patchHandle)
{
sp<AudioCommand>command = new AudioCommand();
command->mCommand = RECORDING_CONFIGURATION_UPDATE;
@@ -882,6 +885,7 @@
data->mSource = source;
data->mClientConfig = *clientConfig;
data->mDeviceConfig = *deviceConfig;
+ data->mPatchHandle = patchHandle;
command->mParam = data;
ALOGV("AudioCommandThread() adding recording configuration update event %d, source %d",
event, source);
@@ -1178,7 +1182,7 @@
audio_channel_mask_t *pChannelMask);
int aps_close_input(void *service __unused, audio_io_handle_t input);
int aps_invalidate_stream(void *service __unused, audio_stream_type_t stream);
-int aps_move_effects(void *service __unused, int session,
+int aps_move_effects(void *service __unused, audio_session_t session,
audio_io_handle_t src_output,
audio_io_handle_t dst_output);
char * aps_get_parameters(void *service __unused, audio_io_handle_t io_handle,
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 7089014..2710ac7 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -128,7 +128,7 @@
virtual status_t registerEffect(const effect_descriptor_t *desc,
audio_io_handle_t io,
uint32_t strategy,
- int session,
+ audio_session_t session,
int id);
virtual status_t unregisterEffect(int id);
virtual status_t setEffectEnabled(int id, bool enabled);
@@ -136,7 +136,7 @@
virtual bool isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs = 0) const;
virtual bool isSourceActive(audio_source_t source) const;
- virtual status_t queryDefaultPreProcessing(int audioSession,
+ virtual status_t queryDefaultPreProcessing(audio_session_t audioSession,
effect_descriptor_t *descriptors,
uint32_t *count);
virtual status_t onTransact(
@@ -230,10 +230,10 @@
void doOnDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
void onRecordingConfigurationUpdate(int event, audio_session_t session,
audio_source_t source, const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig);
+ const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
void doOnRecordingConfigurationUpdate(int event, audio_session_t session,
audio_source_t source, const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig);
+ const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
private:
AudioPolicyService() ANDROID_API;
@@ -309,7 +309,8 @@
int event, audio_session_t session,
audio_source_t source,
const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig);
+ const audio_config_base_t *deviceConfig,
+ audio_patch_handle_t patchHandle);
void insertCommand_l(AudioCommand *command, int delayMs = 0);
private:
@@ -407,6 +408,7 @@
audio_source_t mSource;
struct audio_config_base mClientConfig;
struct audio_config_base mDeviceConfig;
+ audio_patch_handle_t mPatchHandle;
};
Mutex mLock;
@@ -496,7 +498,7 @@
virtual status_t setVoiceVolume(float volume, int delayMs = 0);
// move effect to the specified output
- virtual status_t moveEffects(int session,
+ virtual status_t moveEffects(audio_session_t session,
audio_io_handle_t srcOutput,
audio_io_handle_t dstOutput);
@@ -518,9 +520,9 @@
virtual void onRecordingConfigurationUpdate(int event,
audio_session_t session, audio_source_t source,
const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig);
+ const audio_config_base_t *deviceConfig, audio_patch_handle_t patchHandle);
- virtual audio_unique_id_t newAudioUniqueId();
+ virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use);
private:
AudioPolicyService *mAudioPolicyService;
@@ -541,7 +543,8 @@
int event, audio_session_t session,
audio_source_t source,
const audio_config_base_t *clientConfig,
- const audio_config_base_t *deviceConfig);
+ const audio_config_base_t *deviceConfig,
+ audio_patch_handle_t patchHandle);
void setAudioPortCallbacksEnabled(bool enabled);
// IBinder::DeathRecipient
diff --git a/services/camera/libcameraservice/Android.mk b/services/camera/libcameraservice/Android.mk
index d416353..c011613 100644
--- a/services/camera/libcameraservice/Android.mk
+++ b/services/camera/libcameraservice/Android.mk
@@ -20,7 +20,9 @@
include $(CLEAR_VARS)
-LOCAL_SRC_FILES:= \
+# Camera service source
+
+LOCAL_SRC_FILES := \
CameraService.cpp \
CameraFlashlight.cpp \
common/Camera2ClientBase.cpp \
@@ -67,13 +69,14 @@
libjpeg
LOCAL_C_INCLUDES += \
- system/media/camera/include \
system/media/private/camera/include \
frameworks/native/include/media/openmax \
external/jpeg
+LOCAL_EXPORT_C_INCLUDE_DIRS := \
+ frameworks/av/services/camera/libcameraservice
-LOCAL_CFLAGS += -Wall -Wextra
+LOCAL_CFLAGS += -Wall -Wextra -Werror
LOCAL_MODULE:= libcameraservice
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index f0bcc0b..0c88dad 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -28,6 +28,9 @@
#include <inttypes.h>
#include <pthread.h>
+#include <android/hardware/ICamera.h>
+#include <android/hardware/ICameraClient.h>
+
#include <binder/AppOpsManager.h>
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
@@ -63,6 +66,9 @@
namespace android {
+using binder::Status;
+using namespace hardware;
+
// ----------------------------------------------------------------------------
// Logging support -- this is for debugging only
// Use "adb shell dumpsys media.camera -v 1" to change it.
@@ -75,6 +81,17 @@
android_atomic_write(level, &gLogLevel);
}
+// Convenience methods for constructing binder::Status objects for error returns
+
+#define STATUS_ERROR(errorCode, errorString) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
+
+#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, \
+ __VA_ARGS__))
+
// ----------------------------------------------------------------------------
extern "C" {
@@ -100,7 +117,7 @@
sp<CameraService> cs = const_cast<CameraService*>(
static_cast<const CameraService*>(callbacks));
- ICameraServiceListener::TorchStatus status;
+ int32_t status;
switch (new_status) {
case TORCH_MODE_STATUS_NOT_AVAILABLE:
status = ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
@@ -130,8 +147,8 @@
CameraService::CameraService() :
mEventLog(DEFAULT_EVENT_LOG_LENGTH),
- mSoundRef(0), mModule(nullptr),
- mNumberOfCameras(0), mNumberOfNormalCameras(0) {
+ mNumberOfCameras(0), mNumberOfNormalCameras(0),
+ mSoundRef(0), mModule(nullptr) {
ALOGI("CameraService started (pid=%d)", getpid());
gCameraService = this;
@@ -291,9 +308,9 @@
return;
}
- ICameraServiceListener::Status oldStatus = state->getStatus();
+ int32_t oldStatus = state->getStatus();
- if (oldStatus == static_cast<ICameraServiceListener::Status>(newStatus)) {
+ if (oldStatus == static_cast<int32_t>(newStatus)) {
ALOGE("%s: State transition to the same status %#x not allowed", __FUNCTION__, newStatus);
return;
}
@@ -317,7 +334,8 @@
clientToDisconnect = removeClientLocked(id);
// Notify the client of disconnection
- clientToDisconnect->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+ clientToDisconnect->notifyError(
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
CaptureResultExtras{});
}
@@ -333,27 +351,27 @@
}
} else {
- if (oldStatus == ICameraServiceListener::Status::STATUS_NOT_PRESENT) {
+ if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
logDeviceAdded(id, String8::format("Device status changed from %d to %d", oldStatus,
newStatus));
}
- updateStatus(static_cast<ICameraServiceListener::Status>(newStatus), id);
+ updateStatus(static_cast<int32_t>(newStatus), id);
}
}
void CameraService::onTorchStatusChanged(const String8& cameraId,
- ICameraServiceListener::TorchStatus newStatus) {
+ int32_t newStatus) {
Mutex::Autolock al(mTorchStatusMutex);
onTorchStatusChangedLocked(cameraId, newStatus);
}
void CameraService::onTorchStatusChangedLocked(const String8& cameraId,
- ICameraServiceListener::TorchStatus newStatus) {
+ int32_t newStatus) {
ALOGI("%s: Torch status changed for cameraId=%s, newStatus=%d",
__FUNCTION__, cameraId.string(), newStatus);
- ICameraServiceListener::TorchStatus status;
+ int32_t status;
status_t res = getTorchStatusLocked(cameraId, &status);
if (res) {
ALOGE("%s: cannot get torch status of camera %s: %s (%d)",
@@ -407,41 +425,45 @@
}
}
-int32_t CameraService::getNumberOfCameras() {
- ATRACE_CALL();
- return getNumberOfCameras(CAMERA_TYPE_BACKWARD_COMPATIBLE);
-}
-
-int32_t CameraService::getNumberOfCameras(int type) {
+Status CameraService::getNumberOfCameras(int32_t type, int32_t* numCameras) {
ATRACE_CALL();
switch (type) {
case CAMERA_TYPE_BACKWARD_COMPATIBLE:
- return mNumberOfNormalCameras;
+ *numCameras = mNumberOfNormalCameras;
+ break;
case CAMERA_TYPE_ALL:
- return mNumberOfCameras;
+ *numCameras = mNumberOfCameras;
+ break;
default:
- ALOGW("%s: Unknown camera type %d, returning 0",
+ ALOGW("%s: Unknown camera type %d",
__FUNCTION__, type);
- return 0;
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Unknown camera type %d", type);
}
+ return Status::ok();
}
-status_t CameraService::getCameraInfo(int cameraId,
- struct CameraInfo* cameraInfo) {
+Status CameraService::getCameraInfo(int cameraId,
+ CameraInfo* cameraInfo) {
ATRACE_CALL();
if (!mModule) {
- return -ENODEV;
+ return STATUS_ERROR(ERROR_DISCONNECTED,
+ "Camera subsystem is not available");
}
if (cameraId < 0 || cameraId >= mNumberOfCameras) {
- return BAD_VALUE;
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
+ "CameraId is not valid");
}
struct camera_info info;
- status_t rc = filterGetInfoErrorCode(
+ Status rc = filterGetInfoErrorCode(
mModule->getCameraInfo(cameraId, &info));
- cameraInfo->facing = info.facing;
- cameraInfo->orientation = info.orientation;
+
+ if (rc.isOk()) {
+ cameraInfo->facing = info.facing;
+ cameraInfo->orientation = info.orientation;
+ }
return rc;
}
@@ -455,28 +477,33 @@
return ret;
}
-status_t CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo) {
+Status CameraService::generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo) {
ATRACE_CALL();
- status_t ret = OK;
+
+ Status ret = Status::ok();
+
struct CameraInfo info;
- if ((ret = getCameraInfo(cameraId, &info)) != OK) {
+ if (!(ret = getCameraInfo(cameraId, &info)).isOk()) {
return ret;
}
CameraMetadata shimInfo;
int32_t orientation = static_cast<int32_t>(info.orientation);
- if ((ret = shimInfo.update(ANDROID_SENSOR_ORIENTATION, &orientation, 1)) != OK) {
- return ret;
+ status_t rc;
+ if ((rc = shimInfo.update(ANDROID_SENSOR_ORIENTATION, &orientation, 1)) != OK) {
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Error updating metadata: %d (%s)", rc, strerror(-rc));
}
uint8_t facing = (info.facing == CAMERA_FACING_FRONT) ?
ANDROID_LENS_FACING_FRONT : ANDROID_LENS_FACING_BACK;
- if ((ret = shimInfo.update(ANDROID_LENS_FACING, &facing, 1)) != OK) {
- return ret;
+ if ((rc = shimInfo.update(ANDROID_LENS_FACING, &facing, 1)) != OK) {
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Error updating metadata: %d (%s)", rc, strerror(-rc));
}
CameraParameters shimParams;
- if ((ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)) != OK) {
+ if (!(ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)).isOk()) {
// Error logged by callee
return ret;
}
@@ -517,49 +544,54 @@
streamConfigs.add(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT);
}
- if ((ret = shimInfo.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
+ if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
streamConfigs.array(), streamConfigSize)) != OK) {
- return ret;
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Error updating metadata: %d (%s)", rc, strerror(-rc));
}
int64_t fakeMinFrames[0];
// TODO: Fixme, don't fake min frame durations.
- if ((ret = shimInfo.update(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
+ if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS,
fakeMinFrames, 0)) != OK) {
- return ret;
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Error updating metadata: %d (%s)", rc, strerror(-rc));
}
int64_t fakeStalls[0];
// TODO: Fixme, don't fake stall durations.
- if ((ret = shimInfo.update(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
+ if ((rc = shimInfo.update(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS,
fakeStalls, 0)) != OK) {
- return ret;
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Error updating metadata: %d (%s)", rc, strerror(-rc));
}
*cameraInfo = shimInfo;
- return OK;
+ return ret;
}
-status_t CameraService::getCameraCharacteristics(int cameraId,
+Status CameraService::getCameraCharacteristics(int cameraId,
CameraMetadata* cameraInfo) {
ATRACE_CALL();
if (!cameraInfo) {
ALOGE("%s: cameraInfo is NULL", __FUNCTION__);
- return BAD_VALUE;
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "cameraInfo is NULL");
}
if (!mModule) {
ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
- return -ENODEV;
+ return STATUS_ERROR(ERROR_DISCONNECTED,
+ "Camera subsystem is not available");;
}
if (cameraId < 0 || cameraId >= mNumberOfCameras) {
ALOGE("%s: Invalid camera id: %d", __FUNCTION__, cameraId);
- return BAD_VALUE;
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Invalid camera id: %d", cameraId);
}
int facing;
- status_t ret = OK;
+ Status ret;
if (mModule->getModuleApiVersion() < CAMERA_MODULE_API_VERSION_2_0 ||
getDeviceVersion(cameraId, &facing) < CAMERA_DEVICE_API_VERSION_3_0) {
/**
@@ -572,17 +604,16 @@
*/
ALOGI("%s: Switching to HAL1 shim implementation...", __FUNCTION__);
- if ((ret = generateShimMetadata(cameraId, cameraInfo)) != OK) {
- return ret;
- }
-
+ ret = generateShimMetadata(cameraId, cameraInfo);
} else {
/**
* Normal HAL 2.1+ codepath.
*/
struct camera_info info;
ret = filterGetInfoErrorCode(mModule->getCameraInfo(cameraId, &info));
- *cameraInfo = info.static_camera_characteristics;
+ if (ret.isOk()) {
+ *cameraInfo = info.static_camera_characteristics;
+ }
}
return ret;
@@ -619,15 +650,19 @@
return INT_MAX - procState;
}
-status_t CameraService::getCameraVendorTagDescriptor(/*out*/sp<VendorTagDescriptor>& desc) {
+Status CameraService::getCameraVendorTagDescriptor(
+ /*out*/
+ hardware::camera2::params::VendorTagDescriptor* desc) {
ATRACE_CALL();
if (!mModule) {
ALOGE("%s: camera hardware module doesn't exist", __FUNCTION__);
- return -ENODEV;
+ return STATUS_ERROR(ERROR_DISCONNECTED, "Camera subsystem not available");
}
-
- desc = VendorTagDescriptor::getGlobalVendorTagDescriptor();
- return OK;
+ sp<VendorTagDescriptor> globalDescriptor = VendorTagDescriptor::getGlobalVendorTagDescriptor();
+ if (globalDescriptor != nullptr) {
+ *desc = *(globalDescriptor.get());
+ }
+ return Status::ok();
}
int CameraService::getDeviceVersion(int cameraId, int* facing) {
@@ -651,15 +686,21 @@
return deviceVersion;
}
-status_t CameraService::filterGetInfoErrorCode(status_t err) {
+Status CameraService::filterGetInfoErrorCode(status_t err) {
switch(err) {
case NO_ERROR:
+ return Status::ok();
case -EINVAL:
- return err;
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
+ "CameraId is not valid for HAL module");
+ case -ENODEV:
+ return STATUS_ERROR(ERROR_DISCONNECTED,
+ "Camera device not available");
default:
- break;
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Camera HAL encountered error %d: %s",
+ err, strerror(-err));
}
- return -ENODEV;
}
bool CameraService::setUpVendorTags() {
@@ -699,20 +740,12 @@
return true;
}
-status_t CameraService::makeClient(const sp<CameraService>& cameraService,
- const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
+Status CameraService::makeClient(const sp<CameraService>& cameraService,
+ const sp<IInterface>& cameraCb, const String16& packageName, int cameraId,
int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client) {
- // TODO: Update CameraClients + HAL interface to use strings for Camera IDs
- int id = cameraIdToInt(cameraId);
- if (id == -1) {
- ALOGE("%s: Invalid camera ID %s, cannot convert to integer.", __FUNCTION__,
- cameraId.string());
- return BAD_VALUE;
- }
-
if (halVersion < 0 || halVersion == deviceVersion) {
// Default path: HAL version is unspecified by caller, create CameraClient
// based on device version reported by the HAL.
@@ -720,11 +753,13 @@
case CAMERA_DEVICE_API_VERSION_1_0:
if (effectiveApiLevel == API_1) { // Camera1 API route
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new CameraClient(cameraService, tmp, packageName, id, facing,
+ *client = new CameraClient(cameraService, tmp, packageName, cameraId, facing,
clientPid, clientUid, getpid(), legacyMode);
} else { // Camera2 API route
ALOGW("Camera using old HAL version: %d", deviceVersion);
- return -EOPNOTSUPP;
+ return STATUS_ERROR_FMT(ERROR_DEPRECATED_HAL,
+ "Camera device \"%d\" HAL version %d does not support camera2 API",
+ cameraId, deviceVersion);
}
break;
case CAMERA_DEVICE_API_VERSION_3_0:
@@ -733,19 +768,21 @@
case CAMERA_DEVICE_API_VERSION_3_3:
if (effectiveApiLevel == API_1) { // Camera1 API route
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new Camera2Client(cameraService, tmp, packageName, id, facing,
+ *client = new Camera2Client(cameraService, tmp, packageName, cameraId, facing,
clientPid, clientUid, servicePid, legacyMode);
} else { // Camera2 API route
- sp<ICameraDeviceCallbacks> tmp =
- static_cast<ICameraDeviceCallbacks*>(cameraCb.get());
- *client = new CameraDeviceClient(cameraService, tmp, packageName, id,
+ sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
+ static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
+ *client = new CameraDeviceClient(cameraService, tmp, packageName, cameraId,
facing, clientPid, clientUid, servicePid);
}
break;
default:
// Should not be reachable
ALOGE("Unknown camera device HAL version: %d", deviceVersion);
- return INVALID_OPERATION;
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Camera device \"%d\" has unknown HAL version %d",
+ cameraId, deviceVersion);
}
} else {
// A particular HAL version is requested by caller. Create CameraClient
@@ -754,17 +791,19 @@
halVersion == CAMERA_DEVICE_API_VERSION_1_0) {
// Only support higher HAL version device opened as HAL1.0 device.
sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new CameraClient(cameraService, tmp, packageName, id, facing,
+ *client = new CameraClient(cameraService, tmp, packageName, cameraId, facing,
clientPid, clientUid, servicePid, legacyMode);
} else {
// Other combinations (e.g. HAL3.x open as HAL2.x) are not supported yet.
ALOGE("Invalid camera HAL version %x: HAL %x device can only be"
" opened as HAL %x device", halVersion, deviceVersion,
CAMERA_DEVICE_API_VERSION_1_0);
- return INVALID_OPERATION;
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Camera device \"%d\" (HAL version %d) cannot be opened as HAL version %d",
+ cameraId, deviceVersion, halVersion);
}
}
- return NO_ERROR;
+ return Status::ok();
}
String8 CameraService::toString(std::set<userid_t> intSet) {
@@ -781,33 +820,35 @@
return s;
}
-status_t CameraService::initializeShimMetadata(int cameraId) {
+Status CameraService::initializeShimMetadata(int cameraId) {
int uid = getCallingUid();
String16 internalPackageName("cameraserver");
String8 id = String8::format("%d", cameraId);
- status_t ret = NO_ERROR;
+ Status ret = Status::ok();
sp<Client> tmp = nullptr;
- if ((ret = connectHelper<ICameraClient,Client>(sp<ICameraClient>{nullptr}, id,
- static_cast<int>(CAMERA_HAL_API_VERSION_UNSPECIFIED), internalPackageName, uid,
- USE_CALLING_PID, API_1, false, true, tmp)) != NO_ERROR) {
- ALOGE("%s: Error %d (%s) initializing shim metadata.", __FUNCTION__, ret, strerror(ret));
- return ret;
+ if (!(ret = connectHelper<ICameraClient,Client>(
+ sp<ICameraClient>{nullptr}, id, static_cast<int>(CAMERA_HAL_API_VERSION_UNSPECIFIED),
+ internalPackageName, uid, USE_CALLING_PID,
+ API_1, /*legacyMode*/ false, /*shimUpdateOnly*/ true,
+ /*out*/ tmp)
+ ).isOk()) {
+ ALOGE("%s: Error initializing shim metadata: %s", __FUNCTION__, ret.toString8().string());
}
- return NO_ERROR;
+ return ret;
}
-status_t CameraService::getLegacyParametersLazy(int cameraId,
+Status CameraService::getLegacyParametersLazy(int cameraId,
/*out*/
CameraParameters* parameters) {
ALOGV("%s: for cameraId: %d", __FUNCTION__, cameraId);
- status_t ret = 0;
+ Status ret = Status::ok();
if (parameters == NULL) {
ALOGE("%s: parameters must not be null", __FUNCTION__);
- return BAD_VALUE;
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Parameters must not be null");
}
String8 id = String8::format("%d", cameraId);
@@ -819,19 +860,20 @@
auto cameraState = getCameraState(id);
if (cameraState == nullptr) {
ALOGE("%s: Invalid camera ID: %s", __FUNCTION__, id.string());
- return BAD_VALUE;
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Invalid camera ID: %s", id.string());
}
CameraParameters p = cameraState->getShimParams();
if (!p.isEmpty()) {
*parameters = p;
- return NO_ERROR;
+ return ret;
}
}
int64_t token = IPCThreadState::self()->clearCallingIdentity();
ret = initializeShimMetadata(cameraId);
IPCThreadState::self()->restoreCallingIdentity(token);
- if (ret != NO_ERROR) {
+ if (!ret.isOk()) {
// Error already logged by callee
return ret;
}
@@ -843,18 +885,19 @@
auto cameraState = getCameraState(id);
if (cameraState == nullptr) {
ALOGE("%s: Invalid camera ID: %s", __FUNCTION__, id.string());
- return BAD_VALUE;
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Invalid camera ID: %s", id.string());
}
CameraParameters p = cameraState->getShimParams();
if (!p.isEmpty()) {
*parameters = p;
- return NO_ERROR;
+ return ret;
}
}
ALOGE("%s: Parameters were not initialized, or were empty. Device may not be present.",
__FUNCTION__);
- return INVALID_OPERATION;
+ return STATUS_ERROR(ERROR_INVALID_OPERATION, "Unable to initialize legacy parameters");
}
// Can camera service trust the caller based on the calling UID?
@@ -868,8 +911,9 @@
}
}
-status_t CameraService::validateConnectLocked(const String8& cameraId, /*inout*/int& clientUid,
- /*inout*/int& clientPid) const {
+Status CameraService::validateConnectLocked(const String8& cameraId,
+ const String8& clientName8, /*inout*/int& clientUid, /*inout*/int& clientPid,
+ /*out*/int& originalClientPid) const {
int callingPid = getCallingPid();
int callingUid = getCallingUid();
@@ -880,7 +924,11 @@
} else if (!isTrustedCallingUid(callingUid)) {
ALOGE("CameraService::connect X (calling PID %d, calling UID %d) rejected "
"(don't trust clientUid %d)", callingPid, callingUid, clientUid);
- return PERMISSION_DENIED;
+ return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+ "Untrusted caller (calling PID %d, UID %d) trying to "
+ "forward camera access to camera %s for client %s (PID %d, UID %d)",
+ callingPid, callingUid, cameraId.string(),
+ clientName8.string(), clientUid, clientPid);
}
// Check if we can trust clientPid
@@ -889,30 +937,39 @@
} else if (!isTrustedCallingUid(callingUid)) {
ALOGE("CameraService::connect X (calling PID %d, calling UID %d) rejected "
"(don't trust clientPid %d)", callingPid, callingUid, clientPid);
- return PERMISSION_DENIED;
+ return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+ "Untrusted caller (calling PID %d, UID %d) trying to "
+ "forward camera access to camera %s for client %s (PID %d, UID %d)",
+ callingPid, callingUid, cameraId.string(),
+ clientName8.string(), clientUid, clientPid);
}
// If it's not calling from cameraserver, check the permission.
if (callingPid != getpid() &&
!checkPermission(String16("android.permission.CAMERA"), clientPid, clientUid)) {
ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", clientPid, clientUid);
- return PERMISSION_DENIED;
+ return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+ "Caller \"%s\" (PID %d, UID %d) cannot open camera \"%s\" without camera permission",
+ clientName8.string(), clientUid, clientPid, cameraId.string());
}
// Only use passed in clientPid to check permission. Use calling PID as the client PID that's
// connected to camera service directly.
+ originalClientPid = clientPid;
clientPid = callingPid;
if (!mModule) {
ALOGE("CameraService::connect X (PID %d) rejected (camera HAL module not loaded)",
callingPid);
- return -ENODEV;
+ return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+ "No camera HAL module available to open camera device \"%s\"", cameraId.string());
}
if (getCameraState(cameraId) == nullptr) {
ALOGE("CameraService::connect X (PID %d) rejected (invalid camera ID %s)", callingPid,
cameraId.string());
- return -ENODEV;
+ return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+ "No camera device with ID \"%s\" available", cameraId.string());
}
userid_t clientUserId = multiuser_get_user_id(clientUid);
@@ -923,10 +980,24 @@
ALOGE("CameraService::connect X (PID %d) rejected (cannot connect from "
"device user %d, currently allowed device users: %s)", callingPid, clientUserId,
toString(mAllowedUsers).string());
- return PERMISSION_DENIED;
+ return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+ "Callers from device user %d are not currently allowed to connect to camera \"%s\"",
+ clientUserId, cameraId.string());
}
- return checkIfDeviceIsUsable(cameraId);
+ status_t err = checkIfDeviceIsUsable(cameraId);
+ if (err != NO_ERROR) {
+ switch(err) {
+ case -ENODEV:
+ case -EBUSY:
+ return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+ "No camera device with ID \"%s\" currently available", cameraId.string());
+ default:
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Unknown error connecting to ID \"%s\"", cameraId.string());
+ }
+ }
+ return Status::ok();
}
status_t CameraService::checkIfDeviceIsUsable(const String8& cameraId) const {
@@ -938,7 +1009,7 @@
return -ENODEV;
}
- ICameraServiceListener::Status currentStatus = cameraState->getStatus();
+ int32_t currentStatus = cameraState->getStatus();
if (currentStatus == ICameraServiceListener::STATUS_NOT_PRESENT) {
ALOGE("CameraService::connect X (PID %d) rejected (camera %s is not connected)",
callingPid, cameraId.string());
@@ -1015,11 +1086,6 @@
}
}
- // Return error if the device was unplugged or removed by the HAL for some reason
- if ((ret = checkIfDeviceIsUsable(cameraId)) != NO_ERROR) {
- return ret;
- }
-
// Get current active client PIDs
std::vector<int> ownerPids(mActiveClientManager.getAllOwners());
ownerPids.push_back(clientPid);
@@ -1046,6 +1112,7 @@
if (state == nullptr) {
ALOGE("CameraService::connect X (PID %d) rejected (no camera device with ID %s)",
clientPid, cameraId.string());
+ // Should never get here because validateConnectLocked should have errored out
return BAD_VALUE;
}
@@ -1118,7 +1185,7 @@
getCameraPriorityFromProcState(priorities[priorities.size() - 1])));
// Notify the client of disconnection
- clientSp->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+ clientSp->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
CaptureResultExtras());
}
}
@@ -1170,39 +1237,41 @@
return NO_ERROR;
}
-status_t CameraService::connect(
+Status CameraService::connect(
const sp<ICameraClient>& cameraClient,
int cameraId,
const String16& clientPackageName,
int clientUid,
int clientPid,
/*out*/
- sp<ICamera>& device) {
+ sp<ICamera>* device) {
ATRACE_CALL();
- status_t ret = NO_ERROR;
+ Status ret = Status::ok();
String8 id = String8::format("%d", cameraId);
sp<Client> client = nullptr;
- ret = connectHelper<ICameraClient,Client>(cameraClient, id, CAMERA_HAL_API_VERSION_UNSPECIFIED,
- clientPackageName, clientUid, clientPid, API_1, false, false, /*out*/client);
+ ret = connectHelper<ICameraClient,Client>(cameraClient, id,
+ CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName, clientUid, clientPid, API_1,
+ /*legacyMode*/ false, /*shimUpdateOnly*/ false,
+ /*out*/client);
- if(ret != NO_ERROR) {
+ if(!ret.isOk()) {
logRejected(id, getCallingPid(), String8(clientPackageName),
- String8::format("%s (%d)", strerror(-ret), ret));
+ ret.toString8());
return ret;
}
- device = client;
- return NO_ERROR;
+ *device = client;
+ return ret;
}
-status_t CameraService::connectLegacy(
+Status CameraService::connectLegacy(
const sp<ICameraClient>& cameraClient,
int cameraId, int halVersion,
const String16& clientPackageName,
int clientUid,
/*out*/
- sp<ICamera>& device) {
+ sp<ICamera>* device) {
ATRACE_CALL();
String8 id = String8::format("%d", cameraId);
@@ -1215,61 +1284,68 @@
* it's a particular version in which case the HAL must supported
* the open_legacy call
*/
- ALOGE("%s: camera HAL module version %x doesn't support connecting to legacy HAL devices!",
- __FUNCTION__, apiVersion);
+ String8 msg = String8::format("Camera HAL module version %x too old for connectLegacy!",
+ apiVersion);
+ ALOGE("%s: %s",
+ __FUNCTION__, msg.string());
logRejected(id, getCallingPid(), String8(clientPackageName),
- String8("HAL module version doesn't support legacy HAL connections"));
- return INVALID_OPERATION;
+ msg);
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
}
- status_t ret = NO_ERROR;
+ Status ret = Status::ok();
sp<Client> client = nullptr;
- ret = connectHelper<ICameraClient,Client>(cameraClient, id, halVersion, clientPackageName,
- clientUid, USE_CALLING_PID, API_1, true, false, /*out*/client);
+ ret = connectHelper<ICameraClient,Client>(cameraClient, id, halVersion,
+ clientPackageName, clientUid, USE_CALLING_PID, API_1,
+ /*legacyMode*/ true, /*shimUpdateOnly*/ false,
+ /*out*/client);
- if(ret != NO_ERROR) {
+ if(!ret.isOk()) {
logRejected(id, getCallingPid(), String8(clientPackageName),
- String8::format("%s (%d)", strerror(-ret), ret));
+ ret.toString8());
return ret;
}
- device = client;
- return NO_ERROR;
+ *device = client;
+ return ret;
}
-status_t CameraService::connectDevice(
- const sp<ICameraDeviceCallbacks>& cameraCb,
+Status CameraService::connectDevice(
+ const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
int cameraId,
const String16& clientPackageName,
int clientUid,
/*out*/
- sp<ICameraDeviceUser>& device) {
+ sp<hardware::camera2::ICameraDeviceUser>* device) {
ATRACE_CALL();
- status_t ret = NO_ERROR;
+ Status ret = Status::ok();
String8 id = String8::format("%d", cameraId);
sp<CameraDeviceClient> client = nullptr;
- ret = connectHelper<ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
- CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName, clientUid, USE_CALLING_PID,
- API_2, false, false, /*out*/client);
+ ret = connectHelper<hardware::camera2::ICameraDeviceCallbacks,CameraDeviceClient>(cameraCb, id,
+ CAMERA_HAL_API_VERSION_UNSPECIFIED, clientPackageName,
+ clientUid, USE_CALLING_PID, API_2,
+ /*legacyMode*/ false, /*shimUpdateOnly*/ false,
+ /*out*/client);
- if(ret != NO_ERROR) {
+ if(!ret.isOk()) {
logRejected(id, getCallingPid(), String8(clientPackageName),
- String8::format("%s (%d)", strerror(-ret), ret));
+ ret.toString8());
return ret;
}
- device = client;
- return NO_ERROR;
+ *device = client;
+ return ret;
}
-status_t CameraService::setTorchMode(const String16& cameraId, bool enabled,
+Status CameraService::setTorchMode(const String16& cameraId, bool enabled,
const sp<IBinder>& clientBinder) {
ATRACE_CALL();
if (enabled && clientBinder == nullptr) {
ALOGE("%s: torch client binder is NULL", __FUNCTION__);
- return -EINVAL;
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT,
+ "Torch client Binder is null");
}
String8 id = String8(cameraId.string());
@@ -1279,35 +1355,47 @@
auto state = getCameraState(id);
if (state == nullptr) {
ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
- return -EINVAL;
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Camera ID \"%s\" is a not valid camera ID", id.string());
}
- ICameraServiceListener::Status cameraStatus = state->getStatus();
+ int32_t cameraStatus = state->getStatus();
if (cameraStatus != ICameraServiceListener::STATUS_PRESENT &&
cameraStatus != ICameraServiceListener::STATUS_NOT_AVAILABLE) {
ALOGE("%s: camera id is invalid %s", __FUNCTION__, id.string());
- return -EINVAL;
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Camera ID \"%s\" is a not valid camera ID", id.string());
}
{
Mutex::Autolock al(mTorchStatusMutex);
- ICameraServiceListener::TorchStatus status;
- status_t res = getTorchStatusLocked(id, &status);
- if (res) {
+ int32_t status;
+ status_t err = getTorchStatusLocked(id, &status);
+ if (err != OK) {
+ if (err == NAME_NOT_FOUND) {
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Camera \"%s\" does not have a flash unit", id.string());
+ }
ALOGE("%s: getting current torch status failed for camera %s",
__FUNCTION__, id.string());
- return -EINVAL;
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Error updating torch status for camera \"%s\": %s (%d)", id.string(),
+ strerror(-err), err);
}
if (status == ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE) {
if (cameraStatus == ICameraServiceListener::STATUS_NOT_AVAILABLE) {
ALOGE("%s: torch mode of camera %s is not available because "
"camera is in use", __FUNCTION__, id.string());
- return -EBUSY;
+ return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+ "Torch for camera \"%s\" is not available due to an existing camera user",
+ id.string());
} else {
ALOGE("%s: torch mode of camera %s is not available due to "
"insufficient resources", __FUNCTION__, id.string());
- return -EUSERS;
+ return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+ "Torch for camera \"%s\" is not available due to insufficient resources",
+ id.string());
}
}
}
@@ -1325,12 +1413,25 @@
}
}
- status_t res = mFlashlight->setTorchMode(id, enabled);
+ status_t err = mFlashlight->setTorchMode(id, enabled);
- if (res) {
- ALOGE("%s: setting torch mode of camera %s to %d failed. %s (%d)",
- __FUNCTION__, id.string(), enabled, strerror(-res), res);
- return res;
+ if (err != OK) {
+ int32_t errorCode;
+ String8 msg;
+ switch (err) {
+ case -ENOSYS:
+ msg = String8::format("Camera \"%s\" has no flashlight",
+ id.string());
+ errorCode = ERROR_ILLEGAL_ARGUMENT;
+ break;
+ default:
+ msg = String8::format(
+ "Setting torch mode of camera \"%s\" to %d failed: %s (%d)",
+ id.string(), enabled, strerror(-err), err);
+ errorCode = ERROR_INVALID_OPERATION;
+ }
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(errorCode, msg.string());
}
{
@@ -1350,34 +1451,36 @@
}
}
- return OK;
+ return Status::ok();
}
-void CameraService::notifySystemEvent(int32_t eventId, const int32_t* args, size_t length) {
+Status CameraService::notifySystemEvent(int32_t eventId,
+ const std::vector<int32_t>& args) {
ATRACE_CALL();
switch(eventId) {
- case ICameraService::USER_SWITCHED: {
- doUserSwitch(/*newUserIds*/args, /*length*/length);
+ case ICameraService::EVENT_USER_SWITCHED: {
+ doUserSwitch(/*newUserIds*/ args);
break;
}
- case ICameraService::NO_EVENT:
+ case ICameraService::EVENT_NONE:
default: {
ALOGW("%s: Received invalid system event from system_server: %d", __FUNCTION__,
eventId);
break;
}
}
+ return Status::ok();
}
-status_t CameraService::addListener(const sp<ICameraServiceListener>& listener) {
+Status CameraService::addListener(const sp<ICameraServiceListener>& listener) {
ATRACE_CALL();
ALOGV("%s: Add listener %p", __FUNCTION__, listener.get());
if (listener == nullptr) {
ALOGE("%s: Listener must not be null", __FUNCTION__);
- return BAD_VALUE;
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Null listener given to addListener");
}
Mutex::Autolock lock(mServiceLock);
@@ -1388,7 +1491,7 @@
if (IInterface::asBinder(it) == IInterface::asBinder(listener)) {
ALOGW("%s: Tried to add listener %p which was already subscribed",
__FUNCTION__, listener.get());
- return ALREADY_EXISTS;
+ return STATUS_ERROR(ERROR_ALREADY_EXISTS, "Listener already registered");
}
}
@@ -1417,17 +1520,17 @@
}
}
- return OK;
+ return Status::ok();
}
-status_t CameraService::removeListener(const sp<ICameraServiceListener>& listener) {
+Status CameraService::removeListener(const sp<ICameraServiceListener>& listener) {
ATRACE_CALL();
ALOGV("%s: Remove listener %p", __FUNCTION__, listener.get());
if (listener == 0) {
ALOGE("%s: Listener must not be null", __FUNCTION__);
- return BAD_VALUE;
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Null listener given to removeListener");
}
Mutex::Autolock lock(mServiceLock);
@@ -1437,7 +1540,7 @@
for (auto it = mListenerList.begin(); it != mListenerList.end(); it++) {
if (IInterface::asBinder(*it) == IInterface::asBinder(listener)) {
mListenerList.erase(it);
- return OK;
+ return Status::ok();
}
}
}
@@ -1445,23 +1548,23 @@
ALOGW("%s: Tried to remove a listener %p which was not subscribed",
__FUNCTION__, listener.get());
- return BAD_VALUE;
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Unregistered listener given to removeListener");
}
-status_t CameraService::getLegacyParameters(int cameraId, /*out*/String16* parameters) {
+Status CameraService::getLegacyParameters(int cameraId, /*out*/String16* parameters) {
ATRACE_CALL();
ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
if (parameters == NULL) {
ALOGE("%s: parameters must not be null", __FUNCTION__);
- return BAD_VALUE;
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, "Parameters must not be null");
}
- status_t ret = 0;
+ Status ret = Status::ok();
CameraParameters shimParams;
- if ((ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)) != OK) {
+ if (!(ret = getLegacyParametersLazy(cameraId, /*out*/&shimParams)).isOk()) {
// Error logged by caller
return ret;
}
@@ -1471,10 +1574,10 @@
*parameters = shimParamsString16;
- return OK;
+ return ret;
}
-status_t CameraService::supportsCameraApi(int cameraId, int apiVersion) {
+Status CameraService::supportsCameraApi(int cameraId, int apiVersion, bool *isSupported) {
ATRACE_CALL();
ALOGV("%s: for camera ID = %d", __FUNCTION__, cameraId);
@@ -1484,40 +1587,48 @@
case API_VERSION_2:
break;
default:
- ALOGE("%s: Bad API version %d", __FUNCTION__, apiVersion);
- return BAD_VALUE;
+ String8 msg = String8::format("Unknown API version %d", apiVersion);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
}
int facing = -1;
int deviceVersion = getDeviceVersion(cameraId, &facing);
switch(deviceVersion) {
- case CAMERA_DEVICE_API_VERSION_1_0:
- case CAMERA_DEVICE_API_VERSION_3_0:
- case CAMERA_DEVICE_API_VERSION_3_1:
- if (apiVersion == API_VERSION_2) {
- ALOGV("%s: Camera id %d uses HAL prior to HAL3.2, doesn't support api2 without shim",
+ case CAMERA_DEVICE_API_VERSION_1_0:
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ case CAMERA_DEVICE_API_VERSION_3_1:
+ if (apiVersion == API_VERSION_2) {
+ ALOGV("%s: Camera id %d uses HAL version %d <3.2, doesn't support api2 without shim",
+ __FUNCTION__, cameraId, deviceVersion);
+ *isSupported = false;
+ } else { // if (apiVersion == API_VERSION_1) {
+ ALOGV("%s: Camera id %d uses older HAL before 3.2, but api1 is always supported",
+ __FUNCTION__, cameraId);
+ *isSupported = true;
+ }
+ break;
+ case CAMERA_DEVICE_API_VERSION_3_2:
+ case CAMERA_DEVICE_API_VERSION_3_3:
+ ALOGV("%s: Camera id %d uses HAL3.2 or newer, supports api1/api2 directly",
__FUNCTION__, cameraId);
- return -EOPNOTSUPP;
- } else { // if (apiVersion == API_VERSION_1) {
- ALOGV("%s: Camera id %d uses older HAL before 3.2, but api1 is always supported",
- __FUNCTION__, cameraId);
- return OK;
+ *isSupported = true;
+ break;
+ case -1: {
+ String8 msg = String8::format("Unknown camera ID %d", cameraId);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
}
- case CAMERA_DEVICE_API_VERSION_3_2:
- case CAMERA_DEVICE_API_VERSION_3_3:
- ALOGV("%s: Camera id %d uses HAL3.2 or newer, supports api1/api2 directly",
- __FUNCTION__, cameraId);
- return OK;
- case -1:
- ALOGE("%s: Invalid camera id %d", __FUNCTION__, cameraId);
- return BAD_VALUE;
- default:
- ALOGE("%s: Unknown camera device HAL version: %d", __FUNCTION__, deviceVersion);
- return INVALID_OPERATION;
+ default: {
+ String8 msg = String8::format("Unknown device version %d for device %d",
+ deviceVersion, cameraId);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(ERROR_INVALID_OPERATION, msg.string());
+ }
}
- return OK;
+ return Status::ok();
}
void CameraService::removeByClient(const BasicClient* client) {
@@ -1554,7 +1665,8 @@
evicted.push_back(clientSp);
// Notify the client of disconnection
- clientSp->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
+ clientSp->notifyError(
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED,
CaptureResultExtras());
}
}
@@ -1679,19 +1791,19 @@
return clientDescriptorPtr->getValue();
}
-void CameraService::doUserSwitch(const int32_t* newUserId, size_t length) {
+void CameraService::doUserSwitch(const std::vector<int32_t>& newUserIds) {
// Acquire mServiceLock and prevent other clients from connecting
std::unique_ptr<AutoConditionLock> lock =
AutoConditionLock::waitAndAcquire(mServiceLockWrapper);
std::set<userid_t> newAllowedUsers;
- for (size_t i = 0; i < length; i++) {
- if (newUserId[i] < 0) {
+ for (size_t i = 0; i < newUserIds.size(); i++) {
+ if (newUserIds[i] < 0) {
ALOGE("%s: Bad user ID %d given during user switch, ignoring.",
- __FUNCTION__, newUserId[i]);
+ __FUNCTION__, newUserIds[i]);
return;
}
- newAllowedUsers.insert(static_cast<userid_t>(newUserId[i]));
+ newAllowedUsers.insert(static_cast<userid_t>(newUserIds[i]));
}
@@ -1817,7 +1929,7 @@
// Permission checks
switch (code) {
- case BnCameraService::NOTIFY_SYSTEM_EVENT: {
+ case BnCameraService::NOTIFYSYSTEMEVENT: {
if (pid != selfPid) {
// Ensure we're being called by system_server, or similar process with
// permissions to notify the camera service about system events
@@ -1979,9 +2091,10 @@
mDestructionStarted = true;
}
-void CameraService::BasicClient::disconnect() {
+binder::Status CameraService::BasicClient::disconnect() {
+ binder::Status res = Status::ok();
if (mDisconnected) {
- return;
+ return res;
}
mDisconnected = true;
@@ -1999,6 +2112,8 @@
// client shouldn't be able to call into us anymore
mClientPid = 0;
+
+ return res;
}
status_t CameraService::BasicClient::dump(int, const Vector<String16>&) {
@@ -2080,7 +2195,7 @@
mClientPackageName);
mOpsActive = false;
- auto rejected = {ICameraServiceListener::STATUS_NOT_PRESENT,
+ std::initializer_list<int32_t> rejected = {ICameraServiceListener::STATUS_NOT_PRESENT,
ICameraServiceListener::STATUS_ENUMERATING};
// Transition to PRESENT if the camera is not in either of the rejected states
@@ -2131,7 +2246,7 @@
// and to prevent further calls by client.
mClientPid = getCallingPid();
CaptureResultExtras resultExtras; // a dummy result (invalid)
- notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE, resultExtras);
+ notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE, resultExtras);
disconnect();
}
}
@@ -2149,7 +2264,7 @@
return sp<Client>{nullptr};
}
-void CameraService::Client::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+void CameraService::Client::notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras) {
(void) errorCode;
(void) resultExtras;
@@ -2161,9 +2276,9 @@
}
// NOTE: function is idempotent
-void CameraService::Client::disconnect() {
+binder::Status CameraService::Client::disconnect() {
ALOGV("Client::disconnect");
- BasicClient::disconnect();
+ return BasicClient::disconnect();
}
bool CameraService::Client::canCastToApiClient(apiLevel level) const {
@@ -2192,7 +2307,7 @@
CameraService::CameraState::~CameraState() {}
-ICameraServiceListener::Status CameraService::CameraState::getStatus() const {
+int32_t CameraService::CameraState::getStatus() const {
Mutex::Autolock lock(mStatusLock);
return mStatus;
}
@@ -2554,12 +2669,12 @@
__FUNCTION__);
}
-void CameraService::updateStatus(ICameraServiceListener::Status status, const String8& cameraId) {
+void CameraService::updateStatus(int32_t status, const String8& cameraId) {
updateStatus(status, cameraId, {});
}
-void CameraService::updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
- std::initializer_list<ICameraServiceListener::Status> rejectSourceStates) {
+void CameraService::updateStatus(int32_t status, const String8& cameraId,
+ std::initializer_list<int32_t> rejectSourceStates) {
// Do not lock mServiceLock here or can get into a deadlock from
// connect() -> disconnect -> updateStatus
@@ -2574,15 +2689,15 @@
// Update the status for this camera state, then send the onStatusChangedCallbacks to each
// of the listeners with both the mStatusStatus and mStatusListenerLock held
state->updateStatus(status, cameraId, rejectSourceStates, [this]
- (const String8& cameraId, ICameraServiceListener::Status status) {
+ (const String8& cameraId, int32_t status) {
if (status != ICameraServiceListener::STATUS_ENUMERATING) {
// Update torch status if it has a flash unit.
Mutex::Autolock al(mTorchStatusMutex);
- ICameraServiceListener::TorchStatus torchStatus;
+ int32_t torchStatus;
if (getTorchStatusLocked(cameraId, &torchStatus) !=
NAME_NOT_FOUND) {
- ICameraServiceListener::TorchStatus newTorchStatus =
+ int32_t newTorchStatus =
status == ICameraServiceListener::STATUS_PRESENT ?
ICameraServiceListener::TORCH_STATUS_AVAILABLE_OFF :
ICameraServiceListener::TORCH_STATUS_NOT_AVAILABLE;
@@ -2612,7 +2727,7 @@
status_t CameraService::getTorchStatusLocked(
const String8& cameraId,
- ICameraServiceListener::TorchStatus *status) const {
+ int32_t *status) const {
if (!status) {
return BAD_VALUE;
}
@@ -2627,12 +2742,12 @@
}
status_t CameraService::setTorchStatusLocked(const String8& cameraId,
- ICameraServiceListener::TorchStatus status) {
+ int32_t status) {
ssize_t index = mTorchStatusMap.indexOfKey(cameraId);
if (index == NAME_NOT_FOUND) {
return BAD_VALUE;
}
- ICameraServiceListener::TorchStatus& item =
+ int32_t& item =
mTorchStatusMap.editValueAt(index);
item = status;
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 66de77f..11b1351 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -17,25 +17,22 @@
#ifndef ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
#define ANDROID_SERVERS_CAMERA_CAMERASERVICE_H
+#include <android/hardware/BnCameraService.h>
+#include <android/hardware/ICameraServiceListener.h>
+
#include <cutils/multiuser.h>
#include <utils/Vector.h>
#include <utils/KeyedVector.h>
#include <binder/AppOpsManager.h>
#include <binder/BinderService.h>
#include <binder/IAppOpsCallback.h>
-#include <camera/ICameraService.h>
#include <camera/ICameraServiceProxy.h>
#include <hardware/camera.h>
-#include <camera/ICamera.h>
-#include <camera/ICameraClient.h>
-#include <camera/camera2/ICameraDeviceUser.h>
-#include <camera/camera2/ICameraDeviceCallbacks.h>
#include <camera/VendorTagDescriptor.h>
#include <camera/CaptureResult.h>
#include <camera/CameraParameters.h>
-#include <camera/ICameraServiceListener.h>
#include "CameraFlashlight.h"
#include "common/CameraModule.h"
@@ -58,7 +55,7 @@
class CameraService :
public BinderService<CameraService>,
- public BnCameraService,
+ public ::android::hardware::BnCameraService,
public IBinder::DeathRecipient,
public camera_module_callbacks_t
{
@@ -101,55 +98,58 @@
virtual void onDeviceStatusChanged(camera_device_status_t cameraId,
camera_device_status_t newStatus);
virtual void onTorchStatusChanged(const String8& cameraId,
- ICameraServiceListener::TorchStatus
- newStatus);
+ int32_t newStatus);
/////////////////////////////////////////////////////////////////////
// ICameraService
- virtual int32_t getNumberOfCameras(int type);
- virtual int32_t getNumberOfCameras();
+ virtual binder::Status getNumberOfCameras(int32_t type, int32_t* numCameras);
- virtual status_t getCameraInfo(int cameraId,
- struct CameraInfo* cameraInfo);
- virtual status_t getCameraCharacteristics(int cameraId,
- CameraMetadata* cameraInfo);
- virtual status_t getCameraVendorTagDescriptor(/*out*/ sp<VendorTagDescriptor>& desc);
-
- virtual status_t connect(const sp<ICameraClient>& cameraClient, int cameraId,
- const String16& clientPackageName, int clientUid, int clientPid,
+ virtual binder::Status getCameraInfo(int cameraId,
+ hardware::CameraInfo* cameraInfo);
+ virtual binder::Status getCameraCharacteristics(int cameraId,
+ CameraMetadata* cameraInfo);
+ virtual binder::Status getCameraVendorTagDescriptor(
/*out*/
- sp<ICamera>& device);
+ hardware::camera2::params::VendorTagDescriptor* desc);
- virtual status_t connectLegacy(const sp<ICameraClient>& cameraClient, int cameraId,
- int halVersion, const String16& clientPackageName, int clientUid,
+ virtual binder::Status connect(const sp<hardware::ICameraClient>& cameraClient,
+ int32_t cameraId, const String16& clientPackageName,
+ int32_t clientUid, int clientPid,
/*out*/
- sp<ICamera>& device);
+ sp<hardware::ICamera>* device);
- virtual status_t connectDevice(
- const sp<ICameraDeviceCallbacks>& cameraCb,
- int cameraId,
- const String16& clientPackageName,
- int clientUid,
+ virtual binder::Status connectLegacy(const sp<hardware::ICameraClient>& cameraClient,
+ int32_t cameraId, int32_t halVersion,
+ const String16& clientPackageName, int32_t clientUid,
/*out*/
- sp<ICameraDeviceUser>& device);
+ sp<hardware::ICamera>* device);
- virtual status_t addListener(const sp<ICameraServiceListener>& listener);
- virtual status_t removeListener(
- const sp<ICameraServiceListener>& listener);
+ virtual binder::Status connectDevice(
+ const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb, int32_t cameraId,
+ const String16& clientPackageName, int32_t clientUid,
+ /*out*/
+ sp<hardware::camera2::ICameraDeviceUser>* device);
- virtual status_t getLegacyParameters(
- int cameraId,
+ virtual binder::Status addListener(const sp<hardware::ICameraServiceListener>& listener);
+ virtual binder::Status removeListener(
+ const sp<hardware::ICameraServiceListener>& listener);
+
+ virtual binder::Status getLegacyParameters(
+ int32_t cameraId,
/*out*/
String16* parameters);
- virtual status_t setTorchMode(const String16& cameraId, bool enabled,
+ virtual binder::Status setTorchMode(const String16& cameraId, bool enabled,
const sp<IBinder>& clientBinder);
- virtual void notifySystemEvent(int32_t eventId, const int32_t* args, size_t length);
+ virtual binder::Status notifySystemEvent(int32_t eventId,
+ const std::vector<int32_t>& args);
// OK = supports api of that version, -EOPNOTSUPP = does not support
- virtual status_t supportsCameraApi(
- int cameraId, int apiVersion);
+ virtual binder::Status supportsCameraApi(
+ int32_t cameraId, int32_t apiVersion,
+ /*out*/
+ bool *isSupported);
// Extra permissions checks
virtual status_t onTransact(uint32_t code, const Parcel& data,
@@ -185,35 +185,35 @@
/////////////////////////////////////////////////////////////////////
// Shared utilities
- static status_t filterGetInfoErrorCode(status_t err);
+ static binder::Status filterGetInfoErrorCode(status_t err);
/////////////////////////////////////////////////////////////////////
// CameraClient functionality
class BasicClient : public virtual RefBase {
public:
- virtual status_t initialize(CameraModule *module) = 0;
- virtual void disconnect();
+ virtual status_t initialize(CameraModule *module) = 0;
+ virtual binder::Status disconnect();
// because we can't virtually inherit IInterface, which breaks
// virtual inheritance
- virtual sp<IBinder> asBinderWrapper() = 0;
+ virtual sp<IBinder> asBinderWrapper() = 0;
// Return the remote callback binder object (e.g. ICameraDeviceCallbacks)
- sp<IBinder> getRemote() {
+ sp<IBinder> getRemote() {
return mRemoteBinder;
}
// Disallows dumping over binder interface
- virtual status_t dump(int fd, const Vector<String16>& args);
+ virtual status_t dump(int fd, const Vector<String16>& args);
// Internal dump method to be called by CameraService
- virtual status_t dumpClient(int fd, const Vector<String16>& args) = 0;
+ virtual status_t dumpClient(int fd, const Vector<String16>& args) = 0;
// Return the package name for this client
virtual String16 getPackageName() const;
// Notify client about a fatal error
- virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+ virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras) = 0;
// Get the UID of the application client using this
@@ -282,14 +282,14 @@
virtual void opChanged(int32_t op, const String16& packageName);
}; // class BasicClient
- class Client : public BnCamera, public BasicClient
+ class Client : public hardware::BnCamera, public BasicClient
{
public:
- typedef ICameraClient TCamCallbacks;
+ typedef hardware::ICameraClient TCamCallbacks;
// ICamera interface (see ICamera for details)
- virtual void disconnect();
- virtual status_t connect(const sp<ICameraClient>& client) = 0;
+ virtual binder::Status disconnect();
+ virtual status_t connect(const sp<hardware::ICameraClient>& client) = 0;
virtual status_t lock() = 0;
virtual status_t unlock() = 0;
virtual status_t setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer)=0;
@@ -314,7 +314,7 @@
// Interface used by CameraService
Client(const sp<CameraService>& cameraService,
- const sp<ICameraClient>& cameraClient,
+ const sp<hardware::ICameraClient>& cameraClient,
const String16& clientPackageName,
int cameraId,
int cameraFacing,
@@ -324,7 +324,7 @@
~Client();
// return our camera client
- const sp<ICameraClient>& getRemoteCallback() {
+ const sp<hardware::ICameraClient>& getRemoteCallback() {
return mRemoteCallback;
}
@@ -332,7 +332,7 @@
return asBinder(this);
}
- virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+ virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras);
// Check what API level is used for this client. This is used to determine which
@@ -345,7 +345,7 @@
// Initialized in constructor
// - The app-side Binder interface to receive callbacks from us
- sp<ICameraClient> mRemoteCallback;
+ sp<hardware::ICameraClient> mRemoteCallback;
}; // class Client
@@ -432,12 +432,12 @@
*
* This method acquires mStatusLock.
*/
- ICameraServiceListener::Status getStatus() const;
+ int32_t getStatus() const;
/**
* This function updates the status for this camera device, unless the given status
* is in the given list of rejected status states, and execute the function passed in
- * with a signature onStatusUpdateLocked(const String8&, ICameraServiceListener::Status)
+ * with a signature onStatusUpdateLocked(const String8&, int32_t)
* if the status has changed.
*
* This method is idempotent, and will not result in the function passed to
@@ -445,8 +445,8 @@
* This method aquires mStatusLock.
*/
template<class Func>
- void updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
- std::initializer_list<ICameraServiceListener::Status> rejectSourceStates,
+ void updateStatus(int32_t status, const String8& cameraId,
+ std::initializer_list<int32_t> rejectSourceStates,
Func onStatusUpdatedLocked);
/**
@@ -477,7 +477,7 @@
private:
const String8 mId;
- ICameraServiceListener::Status mStatus; // protected by mStatusLock
+ int32_t mStatus; // protected by mStatusLock
const int mCost;
std::set<String8> mConflicting;
mutable Mutex mStatusLock;
@@ -488,8 +488,14 @@
virtual void onFirstRef();
// Check if we can connect, before we acquire the service lock.
- status_t validateConnectLocked(const String8& cameraId, /*inout*/int& clientUid,
- /*inout*/int& clientPid) const;
+ // The returned originalClientPid is the PID of the original process that wants to connect to
+ // camera.
+ // The returned clientPid is the PID of the client that directly connects to camera.
+ // originalClientPid and clientPid are usually the same except when the application uses
+ // mediaserver to connect to camera (using MediaRecorder to connect to camera). In that case,
+ // clientPid is the PID of mediaserver and originalClientPid is the PID of the application.
+ binder::Status validateConnectLocked(const String8& cameraId, const String8& clientName8,
+ /*inout*/int& clientUid, /*inout*/int& clientPid, /*out*/int& originalClientPid) const;
// Handle active client evictions, and update service state.
// Only call with with mServiceLock held.
@@ -501,8 +507,9 @@
// Single implementation shared between the various connect calls
template<class CALLBACK, class CLIENT>
- status_t connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId, int halVersion,
- const String16& clientPackageName, int clientUid, int clientPid,
+ binder::Status connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
+ int halVersion, const String16& clientPackageName,
+ int clientUid, int clientPid,
apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
/*out*/sp<CLIENT>& device);
@@ -583,7 +590,7 @@
/**
* Handle a notification that the current device user has changed.
*/
- void doUserSwitch(const int32_t* newUserId, size_t length);
+ void doUserSwitch(const std::vector<int32_t>& newUserIds);
/**
* Add an event log message.
@@ -651,7 +658,7 @@
CameraModule* mModule;
// Guarded by mStatusListenerMutex
- std::vector<sp<ICameraServiceListener>> mListenerList;
+ std::vector<sp<hardware::ICameraServiceListener>> mListenerList;
Mutex mStatusListenerLock;
/**
@@ -662,9 +669,9 @@
* This method must be idempotent.
* This method acquires mStatusLock and mStatusListenerLock.
*/
- void updateStatus(ICameraServiceListener::Status status, const String8& cameraId,
- std::initializer_list<ICameraServiceListener::Status> rejectedSourceStates);
- void updateStatus(ICameraServiceListener::Status status, const String8& cameraId);
+ void updateStatus(int32_t status, const String8& cameraId,
+ std::initializer_list<int32_t> rejectedSourceStates);
+ void updateStatus(int32_t status, const String8& cameraId);
// flashlight control
sp<CameraFlashlight> mFlashlight;
@@ -675,7 +682,7 @@
// guard mTorchUidMap
Mutex mTorchUidMapMutex;
// camera id -> torch status
- KeyedVector<String8, ICameraServiceListener::TorchStatus> mTorchStatusMap;
+ KeyedVector<String8, int32_t> mTorchStatusMap;
// camera id -> torch client binder
// only store the last client that turns on each camera's torch mode
KeyedVector<String8, sp<IBinder>> mTorchClientMap;
@@ -688,15 +695,15 @@
// handle torch mode status change and invoke callbacks. mTorchStatusMutex
// should be locked.
void onTorchStatusChangedLocked(const String8& cameraId,
- ICameraServiceListener::TorchStatus newStatus);
+ int32_t newStatus);
// get a camera's torch status. mTorchStatusMutex should be locked.
status_t getTorchStatusLocked(const String8 &cameraId,
- ICameraServiceListener::TorchStatus *status) const;
+ int32_t *status) const;
// set a camera's torch status. mTorchStatusMutex should be locked.
status_t setTorchStatusLocked(const String8 &cameraId,
- ICameraServiceListener::TorchStatus status);
+ int32_t status);
// IBinder::DeathRecipient implementation
virtual void binderDied(const wp<IBinder> &who);
@@ -708,25 +715,25 @@
/**
* Initialize and cache the metadata used by the HAL1 shim for a given cameraId.
*
- * Returns OK on success, or a negative error code.
+ * Sets Status to a service-specific error on failure
*/
- status_t initializeShimMetadata(int cameraId);
+ binder::Status initializeShimMetadata(int cameraId);
/**
* Get the cached CameraParameters for the camera. If they haven't been
* cached yet, then initialize them for the first time.
*
- * Returns OK on success, or a negative error code.
+ * Sets Status to a service-specific error on failure
*/
- status_t getLegacyParametersLazy(int cameraId, /*out*/CameraParameters* parameters);
+ binder::Status getLegacyParametersLazy(int cameraId, /*out*/CameraParameters* parameters);
/**
* Generate the CameraCharacteristics metadata required by the Camera2 API
* from the available HAL1 CameraParameters and CameraInfo.
*
- * Returns OK on success, or a negative error code.
+ * Sets Status to a service-specific error on failure
*/
- status_t generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo);
+ binder::Status generateShimMetadata(int cameraId, /*out*/CameraMetadata* cameraInfo);
static int getCallingPid();
@@ -742,8 +749,8 @@
*/
static int getCameraPriorityFromProcState(int procState);
- static status_t makeClient(const sp<CameraService>& cameraService,
- const sp<IInterface>& cameraCb, const String16& packageName, const String8& cameraId,
+ static binder::Status makeClient(const sp<CameraService>& cameraService,
+ const sp<IInterface>& cameraCb, const String16& packageName, int cameraId,
int facing, int clientPid, uid_t clientUid, int servicePid, bool legacyMode,
int halVersion, int deviceVersion, apiLevel effectiveApiLevel,
/*out*/sp<BasicClient>* client);
@@ -758,12 +765,12 @@
};
template<class Func>
-void CameraService::CameraState::updateStatus(ICameraServiceListener::Status status,
+void CameraService::CameraState::updateStatus(int32_t status,
const String8& cameraId,
- std::initializer_list<ICameraServiceListener::Status> rejectSourceStates,
+ std::initializer_list<int32_t> rejectSourceStates,
Func onStatusUpdatedLocked) {
Mutex::Autolock lock(mStatusLock);
- ICameraServiceListener::Status oldStatus = mStatus;
+ int32_t oldStatus = mStatus;
mStatus = status;
if (oldStatus == status) {
@@ -773,9 +780,9 @@
ALOGV("%s: Status has changed for camera ID %s from %#x to %#x", __FUNCTION__,
cameraId.string(), oldStatus, status);
- if (oldStatus == ICameraServiceListener::STATUS_NOT_PRESENT &&
- (status != ICameraServiceListener::STATUS_PRESENT &&
- status != ICameraServiceListener::STATUS_ENUMERATING)) {
+ if (oldStatus == hardware::ICameraServiceListener::STATUS_NOT_PRESENT &&
+ (status != hardware::ICameraServiceListener::STATUS_PRESENT &&
+ status != hardware::ICameraServiceListener::STATUS_ENUMERATING)) {
ALOGW("%s: From NOT_PRESENT can only transition into PRESENT or ENUMERATING",
__FUNCTION__);
@@ -800,15 +807,26 @@
onStatusUpdatedLocked(cameraId, status);
}
+#define STATUS_ERROR(errorCode, errorString) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
+
+#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, __VA_ARGS__))
+
template<class CALLBACK, class CLIENT>
-status_t CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
+binder::Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
int halVersion, const String16& clientPackageName, int clientUid, int clientPid,
apiLevel effectiveApiLevel, bool legacyMode, bool shimUpdateOnly,
/*out*/sp<CLIENT>& device) {
- status_t ret = NO_ERROR;
+ binder::Status ret = binder::Status::ok();
+
String8 clientName8(clientPackageName);
+ int originalClientPid = 0;
+
ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) for HAL version %s and "
"Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
(halVersion == -1) ? "default" : std::to_string(halVersion).c_str(),
@@ -821,14 +839,16 @@
AutoConditionLock::waitAndAcquire(mServiceLockWrapper, DEFAULT_CONNECT_TIMEOUT_NS);
if (lock == nullptr) {
- ALOGE("CameraService::connect X (PID %d) rejected (too many other clients connecting)."
+ ALOGE("CameraService::connect (PID %d) rejected (too many other clients connecting)."
, clientPid);
- return -EBUSY;
+ return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+ "Cannot open camera %s for \"%s\" (PID %d): Too many other clients connecting",
+ cameraId.string(), clientName8.string(), clientPid);
}
// Enforce client permissions and do basic sanity checks
- if((ret = validateConnectLocked(cameraId, /*inout*/clientUid, /*inout*/clientPid)) !=
- NO_ERROR) {
+ if(!(ret = validateConnectLocked(cameraId, clientName8,
+ /*inout*/clientUid, /*inout*/clientPid, /*out*/originalClientPid)).isOk()) {
return ret;
}
@@ -837,22 +857,37 @@
if (shimUpdateOnly) {
auto cameraState = getCameraState(cameraId);
if (cameraState != nullptr) {
- if (!cameraState->getShimParams().isEmpty()) return NO_ERROR;
+ if (!cameraState->getShimParams().isEmpty()) return ret;
}
}
+ status_t err;
+
sp<BasicClient> clientTmp = nullptr;
std::shared_ptr<resource_policy::ClientDescriptor<String8, sp<BasicClient>>> partial;
- if ((ret = handleEvictionsLocked(cameraId, clientPid, effectiveApiLevel,
+ if ((err = handleEvictionsLocked(cameraId, originalClientPid, effectiveApiLevel,
IInterface::asBinder(cameraCb), clientName8, /*out*/&clientTmp,
/*out*/&partial)) != NO_ERROR) {
- return ret;
+ switch (err) {
+ case -ENODEV:
+ return STATUS_ERROR_FMT(ERROR_DISCONNECTED,
+ "No camera device with ID \"%s\" currently available",
+ cameraId.string());
+ case -EBUSY:
+ return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+ "Higher-priority client using camera, ID \"%s\" currently unavailable",
+ cameraId.string());
+ default:
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Unexpected error %s (%d) opening camera \"%s\"",
+ strerror(-err), err, cameraId.string());
+ }
}
if (clientTmp.get() != nullptr) {
// Handle special case for API1 MediaRecorder where the existing client is returned
device = static_cast<CLIENT*>(clientTmp.get());
- return NO_ERROR;
+ return ret;
}
// give flashlight a chance to close devices if necessary.
@@ -863,15 +898,16 @@
if (id == -1) {
ALOGE("%s: Invalid camera ID %s, cannot get device version from HAL.", __FUNCTION__,
cameraId.string());
- return BAD_VALUE;
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Bad camera ID \"%s\" passed to camera open", cameraId.string());
}
int facing = -1;
int deviceVersion = getDeviceVersion(id, /*out*/&facing);
sp<BasicClient> tmp = nullptr;
- if((ret = makeClient(this, cameraCb, clientPackageName, cameraId, facing, clientPid,
+ if(!(ret = makeClient(this, cameraCb, clientPackageName, id, facing, clientPid,
clientUid, getpid(), legacyMode, halVersion, deviceVersion, effectiveApiLevel,
- /*out*/&tmp)) != NO_ERROR) {
+ /*out*/&tmp)).isOk()) {
return ret;
}
client = static_cast<CLIENT*>(tmp.get());
@@ -879,9 +915,32 @@
LOG_ALWAYS_FATAL_IF(client.get() == nullptr, "%s: CameraService in invalid state",
__FUNCTION__);
- if ((ret = client->initialize(mModule)) != OK) {
+ if ((err = client->initialize(mModule)) != OK) {
ALOGE("%s: Could not initialize client from HAL module.", __FUNCTION__);
- return ret;
+ // Errors could be from the HAL module open call or from AppOpsManager
+ switch(err) {
+ case BAD_VALUE:
+ return STATUS_ERROR_FMT(ERROR_ILLEGAL_ARGUMENT,
+ "Illegal argument to HAL module for camera \"%s\"", cameraId.string());
+ case -EBUSY:
+ return STATUS_ERROR_FMT(ERROR_CAMERA_IN_USE,
+ "Camera \"%s\" is already open", cameraId.string());
+ case -EUSERS:
+ return STATUS_ERROR_FMT(ERROR_MAX_CAMERAS_IN_USE,
+ "Too many cameras already open, cannot open camera \"%s\"",
+ cameraId.string());
+ case PERMISSION_DENIED:
+ return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
+ "No permission to open camera \"%s\"", cameraId.string());
+ case -EACCES:
+ return STATUS_ERROR_FMT(ERROR_DISABLED,
+ "Camera \"%s\" disabled by policy", cameraId.string());
+ case -ENODEV:
+ default:
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Failed to initialize camera \"%s\": %s (%d)", cameraId.string(),
+ strerror(-err), err);
+ }
}
// Update shim paremeters for legacy clients
@@ -914,9 +973,12 @@
// Important: release the mutex here so the client can call back into the service from its
// destructor (can be at the end of the call)
device = client;
- return NO_ERROR;
+ return ret;
}
+#undef STATUS_ERROR_FMT
+#undef STATUS_ERROR
+
} // namespace android
#endif
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 5ac5743..4eb7b03 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -24,6 +24,7 @@
#include <cutils/properties.h>
#include <gui/Surface.h>
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
#include "api1/Camera2Client.h"
@@ -46,7 +47,7 @@
// Interface used by CameraService
Camera2Client::Camera2Client(const sp<CameraService>& cameraService,
- const sp<ICameraClient>& cameraClient,
+ const sp<hardware::ICameraClient>& cameraClient,
const String16& clientPackageName,
int cameraId,
int cameraFacing,
@@ -367,15 +368,16 @@
// ICamera interface
-void Camera2Client::disconnect() {
+binder::Status Camera2Client::disconnect() {
ATRACE_CALL();
Mutex::Autolock icl(mBinderSerializationLock);
+ binder::Status res = binder::Status::ok();
// Allow both client and the cameraserver to disconnect at all times
int callingPid = getCallingPid();
- if (callingPid != mClientPid && callingPid != mServicePid) return;
+ if (callingPid != mClientPid && callingPid != mServicePid) return res;
- if (mDevice == 0) return;
+ if (mDevice == 0) return res;
ALOGV("Camera %d: Shutting down", mCameraId);
@@ -389,7 +391,7 @@
{
SharedParameters::Lock l(mParameters);
- if (l.mParameters.state == Parameters::DISCONNECTED) return;
+ if (l.mParameters.state == Parameters::DISCONNECTED) return res;
l.mParameters.state = Parameters::DISCONNECTED;
}
@@ -430,9 +432,11 @@
mDevice.clear();
CameraService::Client::disconnect();
+
+ return res;
}
-status_t Camera2Client::connect(const sp<ICameraClient>& client) {
+status_t Camera2Client::connect(const sp<hardware::ICameraClient>& client) {
ATRACE_CALL();
ALOGV("%s: E", __FUNCTION__);
Mutex::Autolock icl(mBinderSerializationLock);
@@ -1682,22 +1686,22 @@
}
}
-void Camera2Client::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+void Camera2Client::notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras) {
int32_t err = CAMERA_ERROR_UNKNOWN;
switch(errorCode) {
- case ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED:
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DISCONNECTED:
err = CAMERA_ERROR_RELEASED;
break;
- case ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
err = CAMERA_ERROR_UNKNOWN;
break;
- case ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE:
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_SERVICE:
err = CAMERA_ERROR_SERVER_DIED;
break;
- case ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
- case ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
- case ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
ALOGW("%s: Received recoverable error %d from HAL - ignoring, requestId %" PRId32,
__FUNCTION__, errorCode, resultExtras.requestId);
return;
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index 9155e43..12ee157 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -53,8 +53,8 @@
* ICamera interface (see ICamera for details)
*/
- virtual void disconnect();
- virtual status_t connect(const sp<ICameraClient>& client);
+ virtual binder::Status disconnect();
+ virtual status_t connect(const sp<hardware::ICameraClient>& client);
virtual status_t lock();
virtual status_t unlock();
virtual status_t setPreviewTarget(
@@ -77,7 +77,7 @@
virtual status_t setParameters(const String8& params);
virtual String8 getParameters() const;
virtual status_t sendCommand(int32_t cmd, int32_t arg1, int32_t arg2);
- virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+ virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras);
virtual status_t setVideoTarget(const sp<IGraphicBufferProducer>& bufferProducer);
@@ -86,7 +86,7 @@
*/
Camera2Client(const sp<CameraService>& cameraService,
- const sp<ICameraClient>& cameraClient,
+ const sp<hardware::ICameraClient>& cameraClient,
const String16& clientPackageName,
int cameraId,
int cameraFacing,
diff --git a/services/camera/libcameraservice/api1/CameraClient.cpp b/services/camera/libcameraservice/api1/CameraClient.cpp
index 8ab9a65..1086340 100644
--- a/services/camera/libcameraservice/api1/CameraClient.cpp
+++ b/services/camera/libcameraservice/api1/CameraClient.cpp
@@ -34,7 +34,7 @@
}
CameraClient::CameraClient(const sp<CameraService>& cameraService,
- const sp<ICameraClient>& cameraClient,
+ const sp<hardware::ICameraClient>& cameraClient,
const String16& clientPackageName,
int cameraId, int cameraFacing,
int clientPid, int clientUid,
@@ -193,7 +193,7 @@
}
// connect a new client to the camera
-status_t CameraClient::connect(const sp<ICameraClient>& client) {
+status_t CameraClient::connect(const sp<hardware::ICameraClient>& client) {
int callingPid = getCallingPid();
LOG1("connect E (pid %d)", callingPid);
Mutex::Autolock lock(mLock);
@@ -229,20 +229,21 @@
}
}
-void CameraClient::disconnect() {
+binder::Status CameraClient::disconnect() {
int callingPid = getCallingPid();
LOG1("disconnect E (pid %d)", callingPid);
Mutex::Autolock lock(mLock);
+ binder::Status res = binder::Status::ok();
// Allow both client and the cameraserver to disconnect at all times
if (callingPid != mClientPid && callingPid != mServicePid) {
ALOGW("different client - don't disconnect");
- return;
+ return res;
}
// Make sure disconnect() is done once and once only, whether it is called
// from the user directly, or called by the destructor.
- if (mHardware == 0) return;
+ if (mHardware == 0) return res;
LOG1("hardware teardown");
// Before destroying mHardware, we must make sure it's in the
@@ -268,6 +269,8 @@
CameraService::Client::disconnect();
LOG1("disconnect X (pid %d)", callingPid);
+
+ return res;
}
// ----------------------------------------------------------------------------
@@ -476,6 +479,12 @@
void CameraClient::releaseRecordingFrame(const sp<IMemory>& mem) {
Mutex::Autolock lock(mLock);
if (checkPidAndHardware() != NO_ERROR) return;
+ if (mem == nullptr) {
+ android_errorWriteWithInfoLog(CameraService::SN_EVENT_LOG_ID, "26164272",
+ IPCThreadState::self()->getCallingUid(), nullptr, 0);
+ return;
+ }
+
mHardware->releaseRecordingFrame(mem);
}
@@ -797,7 +806,7 @@
mCameraService->playSound(CameraService::SOUND_SHUTTER);
}
- sp<ICameraClient> c = mRemoteCallback;
+ sp<hardware::ICameraClient> c = mRemoteCallback;
if (c != 0) {
mLock.unlock();
c->notifyCallback(CAMERA_MSG_SHUTTER, 0, 0);
@@ -834,7 +843,7 @@
}
// hold a strong pointer to the client
- sp<ICameraClient> c = mRemoteCallback;
+ sp<hardware::ICameraClient> c = mRemoteCallback;
// clear callback flags if no client or one-shot mode
if (c == 0 || (mPreviewCallbackFlag & CAMERA_FRAME_CALLBACK_FLAG_ONE_SHOT_MASK)) {
@@ -864,7 +873,7 @@
void CameraClient::handlePostview(const sp<IMemory>& mem) {
disableMsgType(CAMERA_MSG_POSTVIEW_FRAME);
- sp<ICameraClient> c = mRemoteCallback;
+ sp<hardware::ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->dataCallback(CAMERA_MSG_POSTVIEW_FRAME, mem, NULL);
@@ -879,7 +888,7 @@
size_t size;
sp<IMemoryHeap> heap = mem->getMemory(&offset, &size);
- sp<ICameraClient> c = mRemoteCallback;
+ sp<hardware::ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->dataCallback(CAMERA_MSG_RAW_IMAGE, mem, NULL);
@@ -890,7 +899,7 @@
void CameraClient::handleCompressedPicture(const sp<IMemory>& mem) {
disableMsgType(CAMERA_MSG_COMPRESSED_IMAGE);
- sp<ICameraClient> c = mRemoteCallback;
+ sp<hardware::ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->dataCallback(CAMERA_MSG_COMPRESSED_IMAGE, mem, NULL);
@@ -900,7 +909,7 @@
void CameraClient::handleGenericNotify(int32_t msgType,
int32_t ext1, int32_t ext2) {
- sp<ICameraClient> c = mRemoteCallback;
+ sp<hardware::ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->notifyCallback(msgType, ext1, ext2);
@@ -909,7 +918,7 @@
void CameraClient::handleGenericData(int32_t msgType,
const sp<IMemory>& dataPtr, camera_frame_metadata_t *metadata) {
- sp<ICameraClient> c = mRemoteCallback;
+ sp<hardware::ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->dataCallback(msgType, dataPtr, metadata);
@@ -918,7 +927,7 @@
void CameraClient::handleGenericDataTimestamp(nsecs_t timestamp,
int32_t msgType, const sp<IMemory>& dataPtr) {
- sp<ICameraClient> c = mRemoteCallback;
+ sp<hardware::ICameraClient> c = mRemoteCallback;
mLock.unlock();
if (c != 0) {
c->dataCallbackTimestamp(timestamp, msgType, dataPtr);
@@ -926,7 +935,7 @@
}
void CameraClient::copyFrameAndPostCopiedFrame(
- int32_t msgType, const sp<ICameraClient>& client,
+ int32_t msgType, const sp<hardware::ICameraClient>& client,
const sp<IMemoryHeap>& heap, size_t offset, size_t size,
camera_frame_metadata_t *metadata) {
LOG2("copyFrameAndPostCopiedFrame");
diff --git a/services/camera/libcameraservice/api1/CameraClient.h b/services/camera/libcameraservice/api1/CameraClient.h
index 9b32774..603fd17 100644
--- a/services/camera/libcameraservice/api1/CameraClient.h
+++ b/services/camera/libcameraservice/api1/CameraClient.h
@@ -33,8 +33,8 @@
{
public:
// ICamera interface (see ICamera for details)
- virtual void disconnect();
- virtual status_t connect(const sp<ICameraClient>& client);
+ virtual binder::Status disconnect();
+ virtual status_t connect(const sp<hardware::ICameraClient>& client);
virtual status_t lock();
virtual status_t unlock();
virtual status_t setPreviewTarget(const sp<IGraphicBufferProducer>& bufferProducer);
@@ -59,7 +59,7 @@
// Interface used by CameraService
CameraClient(const sp<CameraService>& cameraService,
- const sp<ICameraClient>& cameraClient,
+ const sp<hardware::ICameraClient>& cameraClient,
const String16& clientPackageName,
int cameraId,
int cameraFacing,
@@ -116,7 +116,7 @@
void copyFrameAndPostCopiedFrame(
int32_t msgType,
- const sp<ICameraClient>& client,
+ const sp<hardware::ICameraClient>& client,
const sp<IMemoryHeap>& heap,
size_t offset, size_t size,
camera_frame_metadata_t *metadata);
diff --git a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
index 5f4fb22..b4b269a 100644
--- a/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/CallbackProcessor.cpp
@@ -155,7 +155,7 @@
callbackFormat, params.previewFormat);
res = device->createStream(mCallbackWindow,
params.previewWidth, params.previewHeight, callbackFormat,
- HAL_DATASPACE_JFIF, CAMERA3_STREAM_ROTATION_0, &mCallbackStreamId);
+ HAL_DATASPACE_V0_JFIF, CAMERA3_STREAM_ROTATION_0, &mCallbackStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for callbacks: "
"%s (%d)", __FUNCTION__, mId,
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
index 61e1442..e3d6906 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.cpp
@@ -41,6 +41,7 @@
mNewAEState(false),
mNewFrameReceived(false),
mNewCaptureReceived(false),
+ mNewCaptureErrorCnt(0),
mShutterNotified(false),
mHalNotifiedShutter(false),
mShutterCaptureId(-1),
@@ -131,7 +132,7 @@
}
void CaptureSequencer::onCaptureAvailable(nsecs_t timestamp,
- sp<MemoryBase> captureBuffer) {
+ sp<MemoryBase> captureBuffer, bool captureError) {
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
Mutex::Autolock l(mInputMutex);
@@ -139,6 +140,11 @@
mCaptureBuffer = captureBuffer;
if (!mNewCaptureReceived) {
mNewCaptureReceived = true;
+ if (captureError) {
+ mNewCaptureErrorCnt++;
+ } else {
+ mNewCaptureErrorCnt = 0;
+ }
mNewCaptureSignal.signal();
}
}
@@ -623,6 +629,17 @@
break;
}
}
+ if (mNewCaptureReceived) {
+ if (mNewCaptureErrorCnt > kMaxRetryCount) {
+ ALOGW("Exceeding multiple retry limit of %d due to buffer drop", kMaxRetryCount);
+ return DONE;
+ } else if (mNewCaptureErrorCnt > 0) {
+ ALOGW("Capture error happened, retry %d...", mNewCaptureErrorCnt);
+ mNewCaptureReceived = false;
+ return STANDARD_CAPTURE;
+ }
+ }
+
if (mTimeoutCount <= 0) {
ALOGW("Timed out waiting for capture to complete");
return DONE;
diff --git a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
index b05207e..a7c61d2 100644
--- a/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
+++ b/services/camera/libcameraservice/api1/client2/CaptureSequencer.h
@@ -69,7 +69,7 @@
virtual void onResultAvailable(const CaptureResult &result);
// Notifications from the JPEG processor
- void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer);
+ void onCaptureAvailable(nsecs_t timestamp, sp<MemoryBase> captureBuffer, bool captureError);
void dump(int fd, const Vector<String16>& args);
@@ -94,6 +94,7 @@
Condition mNewFrameSignal;
bool mNewCaptureReceived;
+ int32_t mNewCaptureErrorCnt;
nsecs_t mCaptureTimestamp;
sp<MemoryBase> mCaptureBuffer;
Condition mNewCaptureSignal;
@@ -110,6 +111,7 @@
static const int kMaxTimeoutsForPrecaptureStart = 10; // 1 sec
static const int kMaxTimeoutsForPrecaptureEnd = 20; // 2 sec
static const int kMaxTimeoutsForCaptureEnd = 40; // 4 sec
+ static const int kMaxRetryCount = 3; // 3 retries in case of buffer drop
wp<Camera2Client> mClient;
wp<ZslProcessor> mZslProcessor;
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
index 6490682..4d12015 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.cpp
@@ -53,7 +53,13 @@
// Check if lens is fixed-focus
if (l.mParameters.focusMode == Parameters::FOCUS_MODE_FIXED) {
m3aState.afMode = ANDROID_CONTROL_AF_MODE_OFF;
+ } else {
+ m3aState.afMode = ANDROID_CONTROL_AF_MODE_AUTO;
}
+ m3aState.awbMode = ANDROID_CONTROL_AWB_MODE_AUTO;
+ m3aState.aeState = ANDROID_CONTROL_AE_STATE_INACTIVE;
+ m3aState.afState = ANDROID_CONTROL_AF_STATE_INACTIVE;
+ m3aState.awbState = ANDROID_CONTROL_AWB_STATE_INACTIVE;
}
}
@@ -253,80 +259,99 @@
if (frameNumber <= mLast3AFrameNumber) {
ALOGV("%s: Already sent 3A for frame number %d, skipping",
__FUNCTION__, frameNumber);
+
+ // Remove the entry if there is one for this frame number in mPending3AStates.
+ mPending3AStates.removeItem(frameNumber);
return OK;
}
- mLast3AFrameNumber = frameNumber;
+ AlgState pendingState;
- // Get 3A states from result metadata
+ ssize_t index = mPending3AStates.indexOfKey(frameNumber);
+ if (index != NAME_NOT_FOUND) {
+ pendingState = mPending3AStates.valueAt(index);
+ }
+
+ // Update 3A states from the result.
bool gotAllStates = true;
- AlgState new3aState;
-
// TODO: Also use AE mode, AE trigger ID
+ gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
+ &pendingState.afMode, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_MODE,
- &new3aState.afMode, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
+ &pendingState.awbMode, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_MODE,
- &new3aState.awbMode, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
+ &pendingState.aeState, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AE_STATE,
- &new3aState.aeState, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
+ &pendingState.afState, frameNumber, cameraId);
- gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AF_STATE,
- &new3aState.afState, frameNumber, cameraId);
-
- gotAllStates &= get3aResult<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
- &new3aState.awbState, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<uint8_t>(metadata, ANDROID_CONTROL_AWB_STATE,
+ &pendingState.awbState, frameNumber, cameraId);
if (client->getCameraDeviceVersion() >= CAMERA_DEVICE_API_VERSION_3_2) {
- new3aState.afTriggerId = frame.mResultExtras.afTriggerId;
- new3aState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
+ pendingState.afTriggerId = frame.mResultExtras.afTriggerId;
+ pendingState.aeTriggerId = frame.mResultExtras.precaptureTriggerId;
} else {
- gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AF_TRIGGER_ID,
- &new3aState.afTriggerId, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<int32_t>(metadata,
+ ANDROID_CONTROL_AF_TRIGGER_ID, &pendingState.afTriggerId, frameNumber, cameraId);
- gotAllStates &= get3aResult<int32_t>(metadata, ANDROID_CONTROL_AE_PRECAPTURE_ID,
- &new3aState.aeTriggerId, frameNumber, cameraId);
+ gotAllStates &= updatePendingState<int32_t>(metadata,
+ ANDROID_CONTROL_AE_PRECAPTURE_ID, &pendingState.aeTriggerId, frameNumber, cameraId);
}
- if (!gotAllStates) return BAD_VALUE;
+ if (!gotAllStates) {
+ // If not all states are received, put the pending state to mPending3AStates.
+ if (index == NAME_NOT_FOUND) {
+ mPending3AStates.add(frameNumber, pendingState);
+ } else {
+ mPending3AStates.replaceValueAt(index, pendingState);
+ }
+ return NOT_ENOUGH_DATA;
+ }
- if (new3aState.aeState != m3aState.aeState) {
+ // Once all 3A states are received, notify the client about 3A changes.
+ if (pendingState.aeState != m3aState.aeState) {
ALOGV("%s: Camera %d: AE state %d->%d",
__FUNCTION__, cameraId,
- m3aState.aeState, new3aState.aeState);
- client->notifyAutoExposure(new3aState.aeState, new3aState.aeTriggerId);
+ m3aState.aeState, pendingState.aeState);
+ client->notifyAutoExposure(pendingState.aeState, pendingState.aeTriggerId);
}
- if (new3aState.afState != m3aState.afState ||
- new3aState.afMode != m3aState.afMode ||
- new3aState.afTriggerId != m3aState.afTriggerId) {
+ if (pendingState.afState != m3aState.afState ||
+ pendingState.afMode != m3aState.afMode ||
+ pendingState.afTriggerId != m3aState.afTriggerId) {
ALOGV("%s: Camera %d: AF state %d->%d. AF mode %d->%d. Trigger %d->%d",
__FUNCTION__, cameraId,
- m3aState.afState, new3aState.afState,
- m3aState.afMode, new3aState.afMode,
- m3aState.afTriggerId, new3aState.afTriggerId);
- client->notifyAutoFocus(new3aState.afState, new3aState.afTriggerId);
+ m3aState.afState, pendingState.afState,
+ m3aState.afMode, pendingState.afMode,
+ m3aState.afTriggerId, pendingState.afTriggerId);
+ client->notifyAutoFocus(pendingState.afState, pendingState.afTriggerId);
}
- if (new3aState.awbState != m3aState.awbState ||
- new3aState.awbMode != m3aState.awbMode) {
+ if (pendingState.awbState != m3aState.awbState ||
+ pendingState.awbMode != m3aState.awbMode) {
ALOGV("%s: Camera %d: AWB state %d->%d. AWB mode %d->%d",
__FUNCTION__, cameraId,
- m3aState.awbState, new3aState.awbState,
- m3aState.awbMode, new3aState.awbMode);
- client->notifyAutoWhitebalance(new3aState.awbState,
- new3aState.aeTriggerId);
+ m3aState.awbState, pendingState.awbState,
+ m3aState.awbMode, pendingState.awbMode);
+ client->notifyAutoWhitebalance(pendingState.awbState,
+ pendingState.aeTriggerId);
}
- m3aState = new3aState;
+ if (index != NAME_NOT_FOUND) {
+ mPending3AStates.removeItemsAt(index);
+ }
+
+ m3aState = pendingState;
+ mLast3AFrameNumber = frameNumber;
return OK;
}
template<typename Src, typename T>
-bool FrameProcessor::get3aResult(const CameraMetadata& result, int32_t tag,
+bool FrameProcessor::updatePendingState(const CameraMetadata& result, int32_t tag,
T* value, int32_t frameNumber, int cameraId) {
camera_metadata_ro_entry_t entry;
if (value == NULL) {
@@ -335,9 +360,14 @@
return false;
}
+ // Already got the value for this tag.
+ if (*value != static_cast<T>(NOT_SET)) {
+ return true;
+ }
+
entry = result.find(tag);
if (entry.count == 0) {
- ALOGE("%s: Camera %d: No %s provided by HAL for frame %d!",
+ ALOGV("%s: Camera %d: No %s provided by HAL for frame %d in this result!",
__FUNCTION__, cameraId,
get_camera_metadata_tag_name(tag), frameNumber);
return false;
diff --git a/services/camera/libcameraservice/api1/client2/FrameProcessor.h b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
index 68cf55b..a5b81a7 100644
--- a/services/camera/libcameraservice/api1/client2/FrameProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/FrameProcessor.h
@@ -43,6 +43,8 @@
~FrameProcessor();
private:
+ static const int32_t NOT_SET = -1;
+
wp<Camera2Client> mClient;
bool mSynthesize3ANotify;
@@ -63,7 +65,7 @@
// Helper for process3aState
template<typename Src, typename T>
- bool get3aResult(const CameraMetadata& result, int32_t tag, T* value,
+ bool updatePendingState(const CameraMetadata& result, int32_t tag, T* value,
int32_t frameNumber, int cameraId);
@@ -81,15 +83,20 @@
// These defaults need to match those in Parameters.cpp
AlgState() :
- afMode(ANDROID_CONTROL_AF_MODE_AUTO),
- awbMode(ANDROID_CONTROL_AWB_MODE_AUTO),
- aeState(ANDROID_CONTROL_AE_STATE_INACTIVE),
- afState(ANDROID_CONTROL_AF_STATE_INACTIVE),
- awbState(ANDROID_CONTROL_AWB_STATE_INACTIVE),
- afTriggerId(0),
- aeTriggerId(0) {
+ afMode((camera_metadata_enum_android_control_af_mode)NOT_SET),
+ awbMode((camera_metadata_enum_android_control_awb_mode)NOT_SET),
+ aeState((camera_metadata_enum_android_control_ae_state)NOT_SET),
+ afState((camera_metadata_enum_android_control_af_state)NOT_SET),
+ awbState((camera_metadata_enum_android_control_awb_state)NOT_SET),
+ afTriggerId(NOT_SET),
+ aeTriggerId(NOT_SET) {
}
- } m3aState;
+ };
+
+ AlgState m3aState;
+
+ // frame number -> pending 3A states that not all data are received yet.
+ KeyedVector<int32_t, AlgState> mPending3AStates;
// Whether the partial result is enabled for this device
bool mUsePartialResult;
diff --git a/services/camera/libcameraservice/api1/client2/JpegCompressor.h b/services/camera/libcameraservice/api1/client2/JpegCompressor.h
index 945b1de..df5da54 100644
--- a/services/camera/libcameraservice/api1/client2/JpegCompressor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegCompressor.h
@@ -71,7 +71,6 @@
Vector<CpuConsumer::LockedBuffer*> mBuffers;
CpuConsumer::LockedBuffer *mJpegBuffer;
CpuConsumer::LockedBuffer *mAuxBuffer;
- bool mFoundJpeg, mFoundAux;
jpeg_compress_struct mCInfo;
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index 3923853..ffe96fc 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -42,7 +42,8 @@
mDevice(client->getCameraDevice()),
mSequencer(sequencer),
mId(client->getCameraId()),
- mCaptureAvailable(false),
+ mCaptureDone(false),
+ mCaptureSuccess(false),
mCaptureStreamId(NO_STREAM) {
}
@@ -53,9 +54,26 @@
void JpegProcessor::onFrameAvailable(const BufferItem& /*item*/) {
Mutex::Autolock l(mInputMutex);
- if (!mCaptureAvailable) {
- mCaptureAvailable = true;
- mCaptureAvailableSignal.signal();
+ ALOGV("%s", __FUNCTION__);
+ if (!mCaptureDone) {
+ mCaptureDone = true;
+ mCaptureSuccess = true;
+ mCaptureDoneSignal.signal();
+ }
+}
+
+void JpegProcessor::onBufferAcquired(const BufferInfo& /*bufferInfo*/) {
+ // Intentionally left empty
+}
+
+void JpegProcessor::onBufferReleased(const BufferInfo& bufferInfo) {
+ Mutex::Autolock l(mInputMutex);
+ ALOGV("%s", __FUNCTION__);
+
+ if (bufferInfo.mError) {
+ mCaptureDone = true;
+ mCaptureSuccess = false;
+ mCaptureDoneSignal.signal();
}
}
@@ -145,7 +163,7 @@
// Create stream for HAL production
res = device->createStream(mCaptureWindow,
params.pictureWidth, params.pictureHeight,
- HAL_PIXEL_FORMAT_BLOB, HAL_DATASPACE_JFIF,
+ HAL_PIXEL_FORMAT_BLOB, HAL_DATASPACE_V0_JFIF,
CAMERA3_STREAM_ROTATION_0, &mCaptureStreamId);
if (res != OK) {
ALOGE("%s: Camera %d: Can't create output stream for capture: "
@@ -154,6 +172,12 @@
return res;
}
+ res = device->addBufferListenerForStream(mCaptureStreamId, this);
+ if (res != OK) {
+ ALOGE("%s: Camera %d: Can't add buffer listeneri: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
}
return OK;
}
@@ -192,24 +216,26 @@
bool JpegProcessor::threadLoop() {
status_t res;
+ bool captureSuccess = false;
{
Mutex::Autolock l(mInputMutex);
- while (!mCaptureAvailable) {
- res = mCaptureAvailableSignal.waitRelative(mInputMutex,
+
+ while (!mCaptureDone) {
+ res = mCaptureDoneSignal.waitRelative(mInputMutex,
kWaitDuration);
if (res == TIMED_OUT) return true;
}
- mCaptureAvailable = false;
+
+ captureSuccess = mCaptureSuccess;
+ mCaptureDone = false;
}
- do {
- res = processNewCapture();
- } while (res == OK);
+ res = processNewCapture(captureSuccess);
return true;
}
-status_t JpegProcessor::processNewCapture() {
+status_t JpegProcessor::processNewCapture(bool captureSuccess) {
ATRACE_CALL();
status_t res;
sp<Camera2Heap> captureHeap;
@@ -217,7 +243,7 @@
CpuConsumer::LockedBuffer imgBuffer;
- {
+ if (captureSuccess) {
Mutex::Autolock l(mInputMutex);
if (mCaptureStreamId == NO_STREAM) {
ALOGW("%s: Camera %d: No stream is available", __FUNCTION__, mId);
@@ -269,7 +295,7 @@
sp<CaptureSequencer> sequencer = mSequencer.promote();
if (sequencer != 0) {
- sequencer->onCaptureAvailable(imgBuffer.timestamp, captureBuffer);
+ sequencer->onCaptureAvailable(imgBuffer.timestamp, captureBuffer, !captureSuccess);
}
return OK;
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.h b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
index ac6f5c7..7187ad9 100644
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.h
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.h
@@ -41,7 +41,8 @@
* Still image capture output image processing
*/
class JpegProcessor:
- public Thread, public CpuConsumer::FrameAvailableListener {
+ public Thread, public CpuConsumer::FrameAvailableListener,
+ public camera3::Camera3StreamBufferListener {
public:
JpegProcessor(sp<Camera2Client> client, wp<CaptureSequencer> sequencer);
~JpegProcessor();
@@ -49,6 +50,10 @@
// CpuConsumer listener implementation
void onFrameAvailable(const BufferItem& item);
+ // Camera3StreamBufferListener implementation
+ void onBufferAcquired(const BufferInfo& bufferInfo) override;
+ void onBufferReleased(const BufferInfo& bufferInfo) override;
+
status_t updateStream(const Parameters ¶ms);
status_t deleteStream();
int getStreamId() const;
@@ -61,8 +66,9 @@
int mId;
mutable Mutex mInputMutex;
- bool mCaptureAvailable;
- Condition mCaptureAvailableSignal;
+ bool mCaptureDone;
+ bool mCaptureSuccess;
+ Condition mCaptureDoneSignal;
enum {
NO_STREAM = -1
@@ -75,7 +81,7 @@
virtual bool threadLoop();
- status_t processNewCapture();
+ status_t processNewCapture(bool captureSuccess);
size_t findJpegSize(uint8_t* jpegBuffer, size_t maxSize);
};
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 7a97396..5779176 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -30,7 +30,7 @@
#include "Parameters.h"
#include "system/camera.h"
#include "hardware/camera_common.h"
-#include <camera/ICamera.h>
+#include <android/hardware/ICamera.h>
#include <media/MediaProfiles.h>
#include <media/mediarecorder.h>
@@ -872,8 +872,8 @@
// Set up initial state for non-Camera.Parameters state variables
videoFormat = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- videoDataSpace = HAL_DATASPACE_BT709;
- videoBufferMode = ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV;
+ videoDataSpace = HAL_DATASPACE_V0_BT709;
+ videoBufferMode = hardware::ICamera::VIDEO_BUFFER_MODE_DATA_CALLBACK_YUV;
playShutterSound = true;
enableFaceDetect = false;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 7be5696..51c8148 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -28,14 +28,23 @@
#include "common/CameraDeviceBase.h"
#include "api2/CameraDeviceClient.h"
+// Convenience methods for constructing binder::Status objects for error returns
+#define STATUS_ERROR(errorCode, errorString) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: %s", __FUNCTION__, __LINE__, errorString))
+
+#define STATUS_ERROR_FMT(errorCode, errorString, ...) \
+ binder::Status::fromServiceSpecificError(errorCode, \
+ String8::format("%s:%d: " errorString, __FUNCTION__, __LINE__, \
+ __VA_ARGS__))
namespace android {
using namespace camera2;
CameraDeviceClientBase::CameraDeviceClientBase(
const sp<CameraService>& cameraService,
- const sp<ICameraDeviceCallbacks>& remoteCallback,
+ const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
const String16& clientPackageName,
int cameraId,
int cameraFacing,
@@ -56,13 +65,13 @@
// Interface used by CameraService
CameraDeviceClient::CameraDeviceClient(const sp<CameraService>& cameraService,
- const sp<ICameraDeviceCallbacks>& remoteCallback,
- const String16& clientPackageName,
- int cameraId,
- int cameraFacing,
- int clientPid,
- uid_t clientUid,
- int servicePid) :
+ const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
+ const String16& clientPackageName,
+ int cameraId,
+ int cameraFacing,
+ int clientPid,
+ uid_t clientUid,
+ int servicePid) :
Camera2ClientBase(cameraService, remoteCallback, clientPackageName,
cameraId, cameraFacing, clientPid, clientUid, servicePid),
mInputStream(),
@@ -98,68 +107,77 @@
CameraDeviceClient::~CameraDeviceClient() {
}
-status_t CameraDeviceClient::submitRequest(sp<CaptureRequest> request,
- bool streaming,
- /*out*/
- int64_t* lastFrameNumber) {
- List<sp<CaptureRequest> > requestList;
- requestList.push_back(request);
- return submitRequestList(requestList, streaming, lastFrameNumber);
+binder::Status CameraDeviceClient::submitRequest(
+ const hardware::camera2::CaptureRequest& request,
+ bool streaming,
+ /*out*/
+ hardware::camera2::utils::SubmitInfo *submitInfo) {
+ std::vector<hardware::camera2::CaptureRequest> requestList = { request };
+ return submitRequestList(requestList, streaming, submitInfo);
}
-status_t CameraDeviceClient::submitRequestList(List<sp<CaptureRequest> > requests,
- bool streaming, int64_t* lastFrameNumber) {
+binder::Status CameraDeviceClient::submitRequestList(
+ const std::vector<hardware::camera2::CaptureRequest>& requests,
+ bool streaming,
+ /*out*/
+ hardware::camera2::utils::SubmitInfo *submitInfo) {
ATRACE_CALL();
ALOGV("%s-start of function. Request list size %zu", __FUNCTION__, requests.size());
- status_t res;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ binder::Status res = binder::Status::ok();
+ status_t err;
+ if ( !(res = checkPidStatus(__FUNCTION__) ).isOk()) {
+ return res;
+ }
Mutex::Autolock icl(mBinderSerializationLock);
- if (!mDevice.get()) return DEAD_OBJECT;
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
if (requests.empty()) {
ALOGE("%s: Camera %d: Sent null request. Rejecting request.",
__FUNCTION__, mCameraId);
- return BAD_VALUE;
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Empty request list");
}
List<const CameraMetadata> metadataRequestList;
- int32_t requestId = mRequestIdCounter;
+ submitInfo->mRequestId = mRequestIdCounter;
uint32_t loopCounter = 0;
- for (List<sp<CaptureRequest> >::iterator it = requests.begin(); it != requests.end(); ++it) {
- sp<CaptureRequest> request = *it;
- if (request == 0) {
- ALOGE("%s: Camera %d: Sent null request.",
- __FUNCTION__, mCameraId);
- return BAD_VALUE;
- } else if (request->mIsReprocess) {
+ for (auto&& request: requests) {
+ if (request.mIsReprocess) {
if (!mInputStream.configured) {
ALOGE("%s: Camera %d: no input stream is configured.", __FUNCTION__, mCameraId);
- return BAD_VALUE;
+ return STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "No input configured for camera %d but request is for reprocessing",
+ mCameraId);
} else if (streaming) {
ALOGE("%s: Camera %d: streaming reprocess requests not supported.", __FUNCTION__,
mCameraId);
- return BAD_VALUE;
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Repeating reprocess requests not supported");
}
}
- CameraMetadata metadata(request->mMetadata);
+ CameraMetadata metadata(request.mMetadata);
if (metadata.isEmpty()) {
ALOGE("%s: Camera %d: Sent empty metadata packet. Rejecting request.",
__FUNCTION__, mCameraId);
- return BAD_VALUE;
- } else if (request->mSurfaceList.isEmpty()) {
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Request settings are empty");
+ } else if (request.mSurfaceList.isEmpty()) {
ALOGE("%s: Camera %d: Requests must have at least one surface target. "
- "Rejecting request.", __FUNCTION__, mCameraId);
- return BAD_VALUE;
+ "Rejecting request.", __FUNCTION__, mCameraId);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Request has no output targets");
}
if (!enforceRequestPermissions(metadata)) {
// Callee logs
- return PERMISSION_DENIED;
+ return STATUS_ERROR(CameraService::ERROR_PERMISSION_DENIED,
+ "Caller does not have permission to change restricted controls");
}
/**
@@ -167,9 +185,8 @@
* the capture request's list of surface targets
*/
Vector<int32_t> outputStreamIds;
- outputStreamIds.setCapacity(request->mSurfaceList.size());
- for (size_t i = 0; i < request->mSurfaceList.size(); ++i) {
- sp<Surface> surface = request->mSurfaceList[i];
+ outputStreamIds.setCapacity(request.mSurfaceList.size());
+ for (sp<Surface> surface : request.mSurfaceList) {
if (surface == 0) continue;
sp<IGraphicBufferProducer> gbp = surface->getIGraphicBufferProducer();
@@ -178,69 +195,80 @@
// Trying to submit request with surface that wasn't created
if (idx == NAME_NOT_FOUND) {
ALOGE("%s: Camera %d: Tried to submit a request with a surface that"
- " we have not called createStream on",
- __FUNCTION__, mCameraId);
- return BAD_VALUE;
+ " we have not called createStream on",
+ __FUNCTION__, mCameraId);
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Request targets Surface that is not part of current capture session");
}
int streamId = mStreamMap.valueAt(idx);
outputStreamIds.push_back(streamId);
ALOGV("%s: Camera %d: Appending output stream %d to request",
- __FUNCTION__, mCameraId, streamId);
+ __FUNCTION__, mCameraId, streamId);
}
metadata.update(ANDROID_REQUEST_OUTPUT_STREAMS, &outputStreamIds[0],
outputStreamIds.size());
- if (request->mIsReprocess) {
+ if (request.mIsReprocess) {
metadata.update(ANDROID_REQUEST_INPUT_STREAMS, &mInputStream.id, 1);
}
- metadata.update(ANDROID_REQUEST_ID, &requestId, /*size*/1);
+ metadata.update(ANDROID_REQUEST_ID, &(submitInfo->mRequestId), /*size*/1);
loopCounter++; // loopCounter starts from 1
ALOGV("%s: Camera %d: Creating request with ID %d (%d of %zu)",
- __FUNCTION__, mCameraId, requestId, loopCounter, requests.size());
+ __FUNCTION__, mCameraId, submitInfo->mRequestId, loopCounter, requests.size());
metadataRequestList.push_back(metadata);
}
mRequestIdCounter++;
if (streaming) {
- res = mDevice->setStreamingRequestList(metadataRequestList, lastFrameNumber);
- if (res != OK) {
- ALOGE("%s: Camera %d: Got error %d after trying to set streaming "
- "request", __FUNCTION__, mCameraId, res);
+ err = mDevice->setStreamingRequestList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+ if (err != OK) {
+ String8 msg = String8::format(
+ "Camera %d: Got error %s (%d) after trying to set streaming request",
+ mCameraId, strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
+ msg.string());
} else {
- mStreamingRequestList.push_back(requestId);
+ mStreamingRequestList.push_back(submitInfo->mRequestId);
}
} else {
- res = mDevice->captureList(metadataRequestList, lastFrameNumber);
- if (res != OK) {
- ALOGE("%s: Camera %d: Got error %d after trying to set capture",
- __FUNCTION__, mCameraId, res);
+ err = mDevice->captureList(metadataRequestList, &(submitInfo->mLastFrameNumber));
+ if (err != OK) {
+ String8 msg = String8::format(
+ "Camera %d: Got error %s (%d) after trying to submit capture request",
+ mCameraId, strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
+ msg.string());
}
- ALOGV("%s: requestId = %d ", __FUNCTION__, requestId);
+ ALOGV("%s: requestId = %d ", __FUNCTION__, submitInfo->mRequestId);
}
ALOGV("%s: Camera %d: End of function", __FUNCTION__, mCameraId);
- if (res == OK) {
- return requestId;
- }
-
return res;
}
-status_t CameraDeviceClient::cancelRequest(int requestId, int64_t* lastFrameNumber) {
+binder::Status CameraDeviceClient::cancelRequest(
+ int requestId,
+ /*out*/
+ int64_t* lastFrameNumber) {
ATRACE_CALL();
ALOGV("%s, requestId = %d", __FUNCTION__, requestId);
- status_t res;
+ status_t err;
+ binder::Status res;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
Mutex::Autolock icl(mBinderSerializationLock);
- if (!mDevice.get()) return DEAD_OBJECT;
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
Vector<int>::iterator it, end;
for (it = mStreamingRequestList.begin(), end = mStreamingRequestList.end();
@@ -251,32 +279,46 @@
}
if (it == end) {
- ALOGE("%s: Camera%d: Did not find request id %d in list of streaming "
- "requests", __FUNCTION__, mCameraId, requestId);
- return BAD_VALUE;
+ String8 msg = String8::format("Camera %d: Did not find request ID %d in list of "
+ "streaming requests", mCameraId, requestId);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
- res = mDevice->clearStreamingRequest(lastFrameNumber);
+ err = mDevice->clearStreamingRequest(lastFrameNumber);
- if (res == OK) {
+ if (err == OK) {
ALOGV("%s: Camera %d: Successfully cleared streaming request",
__FUNCTION__, mCameraId);
mStreamingRequestList.erase(it);
+ } else {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %d: Error clearing streaming request: %s (%d)",
+ mCameraId, strerror(-err), err);
}
return res;
}
-status_t CameraDeviceClient::beginConfigure() {
+binder::Status CameraDeviceClient::beginConfigure() {
// TODO: Implement this.
ALOGV("%s: Not implemented yet.", __FUNCTION__);
- return OK;
+ return binder::Status::ok();
}
-status_t CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) {
+binder::Status CameraDeviceClient::endConfigure(bool isConstrainedHighSpeed) {
ALOGV("%s: ending configure (%d input stream, %zu output streams)",
__FUNCTION__, mInputStream.configured ? 1 : 0, mStreamMap.size());
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
+
+ Mutex::Autolock icl(mBinderSerializationLock);
+
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
+
// Sanitize the high speed session against necessary capability bit.
if (isConstrainedHighSpeed) {
CameraMetadata staticInfo = mDevice->info();
@@ -290,33 +332,41 @@
}
}
if (!isConstrainedHighSpeedSupported) {
- ALOGE("%s: Camera %d: Try to create a constrained high speed configuration on a device"
- " that doesn't support it.",
- __FUNCTION__, mCameraId);
- return INVALID_OPERATION;
+ String8 msg = String8::format(
+ "Camera %d: Try to create a constrained high speed configuration on a device"
+ " that doesn't support it.", mCameraId);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ msg.string());
}
}
- status_t res;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ status_t err = mDevice->configureStreams(isConstrainedHighSpeed);
+ if (err == BAD_VALUE) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Camera %d: Unsupported set of inputs/outputs provided",
+ mCameraId);
+ } else if (err != OK) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %d: Error configuring streams: %s (%d)",
+ mCameraId, strerror(-err), err);
+ }
- Mutex::Autolock icl(mBinderSerializationLock);
-
- if (!mDevice.get()) return DEAD_OBJECT;
-
- return mDevice->configureStreams(isConstrainedHighSpeed);
+ return res;
}
-status_t CameraDeviceClient::deleteStream(int streamId) {
+binder::Status CameraDeviceClient::deleteStream(int streamId) {
ATRACE_CALL();
ALOGV("%s (streamId = 0x%x)", __FUNCTION__, streamId);
- status_t res;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
Mutex::Autolock icl(mBinderSerializationLock);
- if (!mDevice.get()) return DEAD_OBJECT;
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
bool isInput = false;
ssize_t index = NAME_NOT_FOUND;
@@ -333,20 +383,22 @@
}
if (index == NAME_NOT_FOUND) {
- ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
- "created yet", __FUNCTION__, mCameraId, streamId);
- return BAD_VALUE;
+ String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no such "
+ "stream created yet", mCameraId, streamId);
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
}
// Also returns BAD_VALUE if stream ID was not valid
- res = mDevice->deleteStream(streamId);
+ status_t err = mDevice->deleteStream(streamId);
- if (res == BAD_VALUE) {
- ALOGE("%s: Camera %d: Unexpected BAD_VALUE when deleting stream, but we"
- " already checked and the stream ID (%d) should be valid.",
- __FUNCTION__, mCameraId, streamId);
- } else if (res == OK) {
+ if (err != OK) {
+ String8 msg = String8::format("Camera %d: Unexpected error %s (%d) when deleting stream %d",
+ mCameraId, strerror(-err), err, streamId);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ res = STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
+ } else {
if (isInput) {
mInputStream.configured = false;
} else {
@@ -357,44 +409,50 @@
return res;
}
-status_t CameraDeviceClient::createStream(const OutputConfiguration &outputConfiguration)
-{
+binder::Status CameraDeviceClient::createStream(
+ const hardware::camera2::params::OutputConfiguration &outputConfiguration,
+ /*out*/
+ int32_t* newStreamId) {
ATRACE_CALL();
- status_t res;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
Mutex::Autolock icl(mBinderSerializationLock);
-
sp<IGraphicBufferProducer> bufferProducer = outputConfiguration.getGraphicBufferProducer();
if (bufferProducer == NULL) {
ALOGE("%s: bufferProducer must not be null", __FUNCTION__);
- return BAD_VALUE;
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Target Surface is invalid");
}
- if (!mDevice.get()) return DEAD_OBJECT;
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
// Don't create multiple streams for the same target surface
{
ssize_t index = mStreamMap.indexOfKey(IInterface::asBinder(bufferProducer));
if (index != NAME_NOT_FOUND) {
- ALOGW("%s: Camera %d: Buffer producer already has a stream for it "
- "(ID %zd)",
- __FUNCTION__, mCameraId, index);
- return ALREADY_EXISTS;
+ String8 msg = String8::format("Camera %d: Surface already has a stream created for it "
+ "(ID %zd)", mCameraId, index);
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
}
}
+ status_t err;
+
// HACK b/10949105
// Query consumer usage bits to set async operation mode for
// GLConsumer using controlledByApp parameter.
bool useAsync = false;
int32_t consumerUsage;
- if ((res = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
+ if ((err = bufferProducer->query(NATIVE_WINDOW_CONSUMER_USAGE_BITS,
&consumerUsage)) != OK) {
- ALOGE("%s: Camera %d: Failed to query consumer usage", __FUNCTION__,
- mCameraId);
- return res;
+ String8 msg = String8::format("Camera %d: Failed to query Surface consumer usage: %s (%d)",
+ mCameraId, strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
if (consumerUsage & GraphicBuffer::USAGE_HW_TEXTURE) {
ALOGW("%s: Camera %d: Forcing asynchronous mode for stream",
@@ -417,26 +475,30 @@
int width, height, format;
android_dataspace dataSpace;
- if ((res = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
- ALOGE("%s: Camera %d: Failed to query Surface width", __FUNCTION__,
- mCameraId);
- return res;
+ if ((err = anw->query(anw, NATIVE_WINDOW_WIDTH, &width)) != OK) {
+ String8 msg = String8::format("Camera %d: Failed to query Surface width: %s (%d)",
+ mCameraId, strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
- if ((res = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
- ALOGE("%s: Camera %d: Failed to query Surface height", __FUNCTION__,
- mCameraId);
- return res;
+ if ((err = anw->query(anw, NATIVE_WINDOW_HEIGHT, &height)) != OK) {
+ String8 msg = String8::format("Camera %d: Failed to query Surface height: %s (%d)",
+ mCameraId, strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
- if ((res = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
- ALOGE("%s: Camera %d: Failed to query Surface format", __FUNCTION__,
- mCameraId);
- return res;
+ if ((err = anw->query(anw, NATIVE_WINDOW_FORMAT, &format)) != OK) {
+ String8 msg = String8::format("Camera %d: Failed to query Surface format: %s (%d)",
+ mCameraId, strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
- if ((res = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
+ if ((err = anw->query(anw, NATIVE_WINDOW_DEFAULT_DATASPACE,
reinterpret_cast<int*>(&dataSpace))) != OK) {
- ALOGE("%s: Camera %d: Failed to query Surface dataSpace", __FUNCTION__,
- mCameraId);
- return res;
+ String8 msg = String8::format("Camera %d: Failed to query Surface dataspace: %s (%d)",
+ mCameraId, strerror(-err), err);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
// FIXME: remove this override since the default format should be
@@ -451,18 +513,22 @@
// Round dimensions to the nearest dimensions available for this format
if (flexibleConsumer && !CameraDeviceClient::roundBufferDimensionNearest(width, height,
format, dataSpace, mDevice->info(), /*out*/&width, /*out*/&height)) {
- ALOGE("%s: No stream configurations with the format %#x defined, failed to create stream.",
- __FUNCTION__, format);
- return BAD_VALUE;
+ String8 msg = String8::format("Camera %d: No supported stream configurations with "
+ "format %#x defined, failed to create output stream", mCameraId, format);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
int streamId = camera3::CAMERA3_STREAM_ID_INVALID;
- res = mDevice->createStream(surface, width, height, format, dataSpace,
- static_cast<camera3_stream_rotation_t>
- (outputConfiguration.getRotation()),
- &streamId, outputConfiguration.getSurfaceSetID());
+ err = mDevice->createStream(surface, width, height, format, dataSpace,
+ static_cast<camera3_stream_rotation_t>(outputConfiguration.getRotation()),
+ &streamId, outputConfiguration.getSurfaceSetID());
- if (res == OK) {
+ if (err != OK) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %d: Error creating output stream (%d x %d, fmt %x, dataSpace %x): %s (%d)",
+ mCameraId, width, height, format, dataSpace, strerror(-err), err);
+ } else {
mStreamMap.add(binder, streamId);
ALOGV("%s: Camera %d: Successfully created a new stream ID %d",
@@ -473,49 +539,56 @@
* rotate the camera stream for preview use cases.
*/
int32_t transform = 0;
- res = getRotationTransformLocked(&transform);
+ err = getRotationTransformLocked(&transform);
- if (res != OK) {
+ if (err != OK) {
// Error logged by getRotationTransformLocked.
- return res;
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION,
+ "Unable to calculate rotation transform for new stream");
}
- res = mDevice->setStreamTransform(streamId, transform);
- if (res != OK) {
- ALOGE("%s: Failed to set stream transform (stream id %d)",
- __FUNCTION__, streamId);
- return res;
+ err = mDevice->setStreamTransform(streamId, transform);
+ if (err != OK) {
+ String8 msg = String8::format("Failed to set stream transform (stream id %d)",
+ streamId);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
- return streamId;
+ *newStreamId = streamId;
}
return res;
}
-status_t CameraDeviceClient::createInputStream(int width, int height,
- int format) {
+binder::Status CameraDeviceClient::createInputStream(
+ int width, int height, int format,
+ /*out*/
+ int32_t* newStreamId) {
ATRACE_CALL();
ALOGV("%s (w = %d, h = %d, f = 0x%x)", __FUNCTION__, width, height, format);
- status_t res;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
Mutex::Autolock icl(mBinderSerializationLock);
- if (!mDevice.get()) return DEAD_OBJECT;
+
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
if (mInputStream.configured) {
- ALOGE("%s: Camera %d: Already has an input stream "
- " configuration. (ID %zd)", __FUNCTION__, mCameraId,
- mInputStream.id);
- return ALREADY_EXISTS;
+ String8 msg = String8::format("Camera %d: Already has an input stream "
+ "configured (ID %zd)", mCameraId, mInputStream.id);
+ ALOGE("%s: %s", __FUNCTION__, msg.string() );
+ return STATUS_ERROR(CameraService::ERROR_ALREADY_EXISTS, msg.string());
}
int streamId = -1;
- res = mDevice->createInputStream(width, height, format, &streamId);
- if (res == OK) {
+ status_t err = mDevice->createInputStream(width, height, format, &streamId);
+ if (err == OK) {
mInputStream.configured = true;
mInputStream.width = width;
mInputStream.height = height;
@@ -523,27 +596,42 @@
mInputStream.id = streamId;
ALOGV("%s: Camera %d: Successfully created a new input stream ID %d",
- __FUNCTION__, mCameraId, streamId);
+ __FUNCTION__, mCameraId, streamId);
- return streamId;
+ *newStreamId = streamId;
+ } else {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %d: Error creating new input stream: %s (%d)", mCameraId,
+ strerror(-err), err);
}
return res;
}
-status_t CameraDeviceClient::getInputBufferProducer(
- /*out*/sp<IGraphicBufferProducer> *producer) {
- status_t res;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+binder::Status CameraDeviceClient::getInputSurface(/*out*/ view::Surface *inputSurface) {
- if (producer == NULL) {
- return BAD_VALUE;
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
+
+ if (inputSurface == NULL) {
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, "Null input surface");
}
Mutex::Autolock icl(mBinderSerializationLock);
- if (!mDevice.get()) return DEAD_OBJECT;
-
- return mDevice->getInputBufferProducer(producer);
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
+ sp<IGraphicBufferProducer> producer;
+ status_t err = mDevice->getInputBufferProducer(&producer);
+ if (err != OK) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %d: Error getting input Surface: %s (%d)",
+ mCameraId, strerror(-err), err);
+ } else {
+ inputSurface->name = String16("CameraInput");
+ inputSurface->graphicBufferProducer = producer;
+ }
+ return res;
}
bool CameraDeviceClient::roundBufferDimensionNearest(int32_t width, int32_t height,
@@ -604,42 +692,57 @@
}
// Create a request object from a template.
-status_t CameraDeviceClient::createDefaultRequest(int templateId,
- /*out*/
- CameraMetadata* request)
+binder::Status CameraDeviceClient::createDefaultRequest(int templateId,
+ /*out*/
+ hardware::camera2::impl::CameraMetadataNative* request)
{
ATRACE_CALL();
ALOGV("%s (templateId = 0x%x)", __FUNCTION__, templateId);
- status_t res;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
Mutex::Autolock icl(mBinderSerializationLock);
- if (!mDevice.get()) return DEAD_OBJECT;
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
CameraMetadata metadata;
- if ( (res = mDevice->createDefaultRequest(templateId, &metadata) ) == OK &&
+ status_t err;
+ if ( (err = mDevice->createDefaultRequest(templateId, &metadata) ) == OK &&
request != NULL) {
request->swap(metadata);
- }
+ } else if (err == BAD_VALUE) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Camera %d: Template ID %d is invalid or not supported: %s (%d)",
+ mCameraId, templateId, strerror(-err), err);
+ } else {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %d: Error creating default request for template %d: %s (%d)",
+ mCameraId, templateId, strerror(-err), err);
+ }
return res;
}
-status_t CameraDeviceClient::getCameraInfo(/*out*/CameraMetadata* info)
+binder::Status CameraDeviceClient::getCameraInfo(
+ /*out*/
+ hardware::camera2::impl::CameraMetadataNative* info)
{
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
- status_t res = OK;
+ binder::Status res;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
Mutex::Autolock icl(mBinderSerializationLock);
- if (!mDevice.get()) return DEAD_OBJECT;
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
if (info != NULL) {
*info = mDevice->info(); // static camera metadata
@@ -649,51 +752,68 @@
return res;
}
-status_t CameraDeviceClient::waitUntilIdle()
+binder::Status CameraDeviceClient::waitUntilIdle()
{
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
- status_t res = OK;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
Mutex::Autolock icl(mBinderSerializationLock);
- if (!mDevice.get()) return DEAD_OBJECT;
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
// FIXME: Also need check repeating burst.
if (!mStreamingRequestList.isEmpty()) {
- ALOGE("%s: Camera %d: Try to waitUntilIdle when there are active streaming requests",
- __FUNCTION__, mCameraId);
- return INVALID_OPERATION;
+ String8 msg = String8::format(
+ "Camera %d: Try to waitUntilIdle when there are active streaming requests",
+ mCameraId);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_INVALID_OPERATION, msg.string());
}
- res = mDevice->waitUntilDrained();
+ status_t err = mDevice->waitUntilDrained();
+ if (err != OK) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %d: Error waiting to drain: %s (%d)",
+ mCameraId, strerror(-err), err);
+ }
ALOGV("%s Done", __FUNCTION__);
-
return res;
}
-status_t CameraDeviceClient::flush(int64_t* lastFrameNumber) {
+binder::Status CameraDeviceClient::flush(
+ /*out*/
+ int64_t* lastFrameNumber) {
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
- status_t res = OK;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
Mutex::Autolock icl(mBinderSerializationLock);
- if (!mDevice.get()) return DEAD_OBJECT;
+ if (!mDevice.get()) {
+ return STATUS_ERROR(CameraService::ERROR_DISCONNECTED, "Camera device no longer alive");
+ }
mStreamingRequestList.clear();
- return mDevice->flush(lastFrameNumber);
+ status_t err = mDevice->flush(lastFrameNumber);
+ if (err != OK) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %d: Error flushing device: %s (%d)", mCameraId, strerror(-err), err);
+ }
+ return res;
}
-status_t CameraDeviceClient::prepare(int streamId) {
+binder::Status CameraDeviceClient::prepare(int streamId) {
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
- status_t res = OK;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
Mutex::Autolock icl(mBinderSerializationLock);
@@ -707,24 +827,33 @@
}
if (index == NAME_NOT_FOUND) {
- ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
- "created yet", __FUNCTION__, mCameraId, streamId);
- return BAD_VALUE;
+ String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
+ "with that ID exists", mCameraId, streamId);
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
// Also returns BAD_VALUE if stream ID was not valid, or stream already
// has been used
- res = mDevice->prepare(streamId);
-
+ status_t err = mDevice->prepare(streamId);
+ if (err == BAD_VALUE) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Camera %d: Stream %d has already been used, and cannot be prepared",
+ mCameraId, streamId);
+ } else if (err != OK) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %d: Error preparing stream %d: %s (%d)", mCameraId, streamId,
+ strerror(-err), err);
+ }
return res;
}
-status_t CameraDeviceClient::prepare2(int maxCount, int streamId) {
+binder::Status CameraDeviceClient::prepare2(int maxCount, int streamId) {
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
- status_t res = OK;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
Mutex::Autolock icl(mBinderSerializationLock);
@@ -738,30 +867,41 @@
}
if (index == NAME_NOT_FOUND) {
- ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream created yet",
- __FUNCTION__, mCameraId, streamId);
- return BAD_VALUE;
+ String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
+ "with that ID exists", mCameraId, streamId);
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
if (maxCount <= 0) {
- ALOGE("%s: Camera %d: Invalid maxCount (%d) specified, must be greater than 0.",
- __FUNCTION__, mCameraId, maxCount);
- return BAD_VALUE;
+ String8 msg = String8::format("Camera %d: maxCount (%d) must be greater than 0",
+ mCameraId, maxCount);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
// Also returns BAD_VALUE if stream ID was not valid, or stream already
// has been used
- res = mDevice->prepare(maxCount, streamId);
+ status_t err = mDevice->prepare(maxCount, streamId);
+ if (err == BAD_VALUE) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Camera %d: Stream %d has already been used, and cannot be prepared",
+ mCameraId, streamId);
+ } else if (err != OK) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %d: Error preparing stream %d: %s (%d)", mCameraId, streamId,
+ strerror(-err), err);
+ }
return res;
}
-status_t CameraDeviceClient::tearDown(int streamId) {
+binder::Status CameraDeviceClient::tearDown(int streamId) {
ATRACE_CALL();
ALOGV("%s", __FUNCTION__);
- status_t res = OK;
- if ( (res = checkPid(__FUNCTION__) ) != OK) return res;
+ binder::Status res;
+ if (!(res = checkPidStatus(__FUNCTION__)).isOk()) return res;
Mutex::Autolock icl(mBinderSerializationLock);
@@ -775,14 +915,24 @@
}
if (index == NAME_NOT_FOUND) {
- ALOGW("%s: Camera %d: Invalid stream ID (%d) specified, no stream "
- "created yet", __FUNCTION__, mCameraId, streamId);
- return BAD_VALUE;
+ String8 msg = String8::format("Camera %d: Invalid stream ID (%d) specified, no stream "
+ "with that ID exists", mCameraId, streamId);
+ ALOGW("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
// Also returns BAD_VALUE if stream ID was not valid or if the stream is in
// use
- res = mDevice->tearDown(streamId);
+ status_t err = mDevice->tearDown(streamId);
+ if (err == BAD_VALUE) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_ILLEGAL_ARGUMENT,
+ "Camera %d: Stream %d is still in use, cannot be torn down",
+ mCameraId, streamId);
+ } else if (err != OK) {
+ res = STATUS_ERROR_FMT(CameraService::ERROR_INVALID_OPERATION,
+ "Camera %d: Error tearing down stream %d: %s (%d)", mCameraId, streamId,
+ strerror(-err), err);
+ }
return res;
}
@@ -822,10 +972,10 @@
return dumpDevice(fd, args);
}
-void CameraDeviceClient::notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+void CameraDeviceClient::notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras) {
// Thread safe. Don't bother locking.
- sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+ sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
if (remoteCb != 0) {
remoteCb->onDeviceError(errorCode, resultExtras);
@@ -834,7 +984,7 @@
void CameraDeviceClient::notifyIdle() {
// Thread safe. Don't bother locking.
- sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+ sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
if (remoteCb != 0) {
remoteCb->onDeviceIdle();
@@ -845,7 +995,7 @@
void CameraDeviceClient::notifyShutter(const CaptureResultExtras& resultExtras,
nsecs_t timestamp) {
// Thread safe. Don't bother locking.
- sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+ sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
if (remoteCb != 0) {
remoteCb->onCaptureStarted(resultExtras, timestamp);
}
@@ -854,7 +1004,7 @@
void CameraDeviceClient::notifyPrepared(int streamId) {
// Thread safe. Don't bother locking.
- sp<ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
+ sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = getRemoteCallback();
if (remoteCb != 0) {
remoteCb->onPrepared(streamId);
}
@@ -893,12 +1043,19 @@
ALOGV("%s", __FUNCTION__);
// Thread-safe. No lock necessary.
- sp<ICameraDeviceCallbacks> remoteCb = mRemoteCallback;
+ sp<hardware::camera2::ICameraDeviceCallbacks> remoteCb = mRemoteCallback;
if (remoteCb != NULL) {
remoteCb->onResultReceived(result.mMetadata, result.mResultExtras);
}
}
+binder::Status CameraDeviceClient::checkPidStatus(const char* checkLocation) {
+ status_t res = checkPid(checkLocation);
+ return (res == OK) ? binder::Status::ok() :
+ STATUS_ERROR(CameraService::ERROR_PERMISSION_DENIED,
+ "Attempt to use camera from a different process than original client");
+}
+
// TODO: move to Camera2ClientBase
bool CameraDeviceClient::enforceRequestPermissions(CameraMetadata& metadata) {
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index b1d1762..38137a2 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -17,9 +17,10 @@
#ifndef ANDROID_SERVERS_CAMERA_PHOTOGRAPHY_CAMERADEVICECLIENT_H
#define ANDROID_SERVERS_CAMERA_PHOTOGRAPHY_CAMERADEVICECLIENT_H
-#include <camera/camera2/ICameraDeviceUser.h>
-#include <camera/camera2/ICameraDeviceCallbacks.h>
+#include <android/hardware/camera2/BnCameraDeviceUser.h>
+#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
#include <camera/camera2/OutputConfiguration.h>
+#include <camera/camera2/SubmitInfo.h>
#include "CameraService.h"
#include "common/FrameProcessorBase.h"
@@ -27,17 +28,19 @@
namespace android {
-struct CameraDeviceClientBase : public CameraService::BasicClient, public BnCameraDeviceUser
+struct CameraDeviceClientBase :
+ public CameraService::BasicClient,
+ public hardware::camera2::BnCameraDeviceUser
{
- typedef ICameraDeviceCallbacks TCamCallbacks;
+ typedef hardware::camera2::ICameraDeviceCallbacks TCamCallbacks;
- const sp<ICameraDeviceCallbacks>& getRemoteCallback() {
+ const sp<hardware::camera2::ICameraDeviceCallbacks>& getRemoteCallback() {
return mRemoteCallback;
}
protected:
CameraDeviceClientBase(const sp<CameraService>& cameraService,
- const sp<ICameraDeviceCallbacks>& remoteCallback,
+ const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
const String16& clientPackageName,
int cameraId,
int cameraFacing,
@@ -45,7 +48,7 @@
uid_t clientUid,
int servicePid);
- sp<ICameraDeviceCallbacks> mRemoteCallback;
+ sp<hardware::camera2::ICameraDeviceCallbacks> mRemoteCallback;
};
/**
@@ -63,66 +66,77 @@
*/
// Note that the callee gets a copy of the metadata.
- virtual status_t submitRequest(sp<CaptureRequest> request,
- bool streaming = false,
- /*out*/
- int64_t* lastFrameNumber = NULL);
+ virtual binder::Status submitRequest(
+ const hardware::camera2::CaptureRequest& request,
+ bool streaming = false,
+ /*out*/
+ hardware::camera2::utils::SubmitInfo *submitInfo = nullptr);
// List of requests are copied.
- virtual status_t submitRequestList(List<sp<CaptureRequest> > requests,
- bool streaming = false,
- /*out*/
- int64_t* lastFrameNumber = NULL);
- virtual status_t cancelRequest(int requestId,
- /*out*/
- int64_t* lastFrameNumber = NULL);
+ virtual binder::Status submitRequestList(
+ const std::vector<hardware::camera2::CaptureRequest>& requests,
+ bool streaming = false,
+ /*out*/
+ hardware::camera2::utils::SubmitInfo *submitInfo = nullptr);
+ virtual binder::Status cancelRequest(int requestId,
+ /*out*/
+ int64_t* lastFrameNumber = NULL);
- virtual status_t beginConfigure();
+ virtual binder::Status beginConfigure();
- virtual status_t endConfigure(bool isConstrainedHighSpeed = false);
+ virtual binder::Status endConfigure(bool isConstrainedHighSpeed = false);
// Returns -EBUSY if device is not idle
- virtual status_t deleteStream(int streamId);
+ virtual binder::Status deleteStream(int streamId);
- virtual status_t createStream(const OutputConfiguration &outputConfiguration);
+ virtual binder::Status createStream(
+ const hardware::camera2::params::OutputConfiguration &outputConfiguration,
+ /*out*/
+ int32_t* newStreamId = NULL);
// Create an input stream of width, height, and format.
- virtual status_t createInputStream(int width, int height, int format);
+ virtual binder::Status createInputStream(int width, int height, int format,
+ /*out*/
+ int32_t* newStreamId = NULL);
// Get the buffer producer of the input stream
- virtual status_t getInputBufferProducer(
- /*out*/sp<IGraphicBufferProducer> *producer);
+ virtual binder::Status getInputSurface(
+ /*out*/
+ view::Surface *inputSurface);
// Create a request object from a template.
- virtual status_t createDefaultRequest(int templateId,
- /*out*/
- CameraMetadata* request);
+ virtual binder::Status createDefaultRequest(int templateId,
+ /*out*/
+ hardware::camera2::impl::CameraMetadataNative* request);
// Get the static metadata for the camera
// -- Caller owns the newly allocated metadata
- virtual status_t getCameraInfo(/*out*/CameraMetadata* info);
+ virtual binder::Status getCameraInfo(
+ /*out*/
+ hardware::camera2::impl::CameraMetadataNative* cameraCharacteristics);
// Wait until all the submitted requests have finished processing
- virtual status_t waitUntilIdle();
+ virtual binder::Status waitUntilIdle();
// Flush all active and pending requests as fast as possible
- virtual status_t flush(/*out*/
- int64_t* lastFrameNumber = NULL);
+ virtual binder::Status flush(
+ /*out*/
+ int64_t* lastFrameNumber = NULL);
// Prepare stream by preallocating its buffers
- virtual status_t prepare(int streamId);
+ virtual binder::Status prepare(int32_t streamId);
// Tear down stream resources by freeing its unused buffers
- virtual status_t tearDown(int streamId);
+ virtual binder::Status tearDown(int32_t streamId);
// Prepare stream by preallocating up to maxCount of its buffers
- virtual status_t prepare2(int maxCount, int streamId);
+ virtual binder::Status prepare2(int32_t maxCount, int32_t streamId);
/**
* Interface used by CameraService
*/
CameraDeviceClient(const sp<CameraService>& cameraService,
- const sp<ICameraDeviceCallbacks>& remoteCallback,
+ const sp<hardware::camera2::ICameraDeviceCallbacks>& remoteCallback,
const String16& clientPackageName,
int cameraId,
int cameraFacing,
@@ -142,7 +156,7 @@
*/
virtual void notifyIdle();
- virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+ virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras);
virtual void notifyShutter(const CaptureResultExtras& resultExtras, nsecs_t timestamp);
virtual void notifyPrepared(int streamId);
@@ -167,6 +181,7 @@
static const int32_t FRAME_PROCESSOR_LISTENER_MAX_ID = 0x7fffffffL;
/** Utility members */
+ binder::Status checkPidStatus(const char* checkLocation);
bool enforceRequestPermissions(CameraMetadata& metadata);
// Find the square of the euclidean distance between two points
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 4a812b4..2cc150d 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -169,14 +169,15 @@
template <typename TClientBase>
-void Camera2ClientBase<TClientBase>::disconnect() {
+binder::Status Camera2ClientBase<TClientBase>::disconnect() {
ATRACE_CALL();
Mutex::Autolock icl(mBinderSerializationLock);
+ binder::Status res = binder::Status::ok();
// Allow both client and the media server to disconnect at all times
int callingPid = getCallingPid();
if (callingPid != TClientBase::mClientPid &&
- callingPid != TClientBase::mServicePid) return;
+ callingPid != TClientBase::mServicePid) return res;
ALOGV("Camera %d: Shutting down", TClientBase::mCameraId);
@@ -185,6 +186,8 @@
CameraService::BasicClient::disconnect();
ALOGV("Camera %d: Shut down complete complete", TClientBase::mCameraId);
+
+ return res;
}
template <typename TClientBase>
@@ -228,7 +231,7 @@
template <typename TClientBase>
void Camera2ClientBase<TClientBase>::notifyError(
- ICameraDeviceCallbacks::CameraErrorCode errorCode,
+ int32_t errorCode,
const CaptureResultExtras& resultExtras) {
ALOGE("Error condition %d reported by HAL, requestId %" PRId32, errorCode,
resultExtras.requestId);
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 81bae7b..6eea2f4 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -38,8 +38,8 @@
/**
* Base binder interface (see ICamera/ICameraDeviceUser for details)
*/
- virtual status_t connect(const sp<TCamCallbacks>& callbacks);
- virtual void disconnect();
+ virtual status_t connect(const sp<TCamCallbacks>& callbacks);
+ virtual binder::Status disconnect();
/**
* Interface used by CameraService
@@ -63,7 +63,7 @@
* CameraDeviceBase::NotificationListener implementation
*/
- virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+ virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras);
virtual void notifyIdle();
virtual void notifyShutter(const CaptureResultExtras& resultExtras,
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 6fd2b39..d570d4b 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -24,7 +24,6 @@
#include <utils/Timers.h>
#include <utils/List.h>
-#include <camera/camera2/ICameraDeviceCallbacks.h>
#include "hardware/camera2.h"
#include "hardware/camera3.h"
#include "camera/CameraMetadata.h"
@@ -32,6 +31,7 @@
#include "common/CameraModule.h"
#include "gui/IGraphicBufferProducer.h"
#include "device3/Camera3StreamInterface.h"
+#include "binder/Status.h"
namespace android {
@@ -195,7 +195,7 @@
// API1 and API2.
// Required for API 1 and 2
- virtual void notifyError(ICameraDeviceCallbacks::CameraErrorCode errorCode,
+ virtual void notifyError(int32_t errorCode,
const CaptureResultExtras &resultExtras) = 0;
// Required only for API2
@@ -296,6 +296,12 @@
virtual status_t tearDown(int streamId) = 0;
/**
+ * Add buffer listener for a particular stream in the device.
+ */
+ virtual status_t addBufferListenerForStream(int streamId,
+ wp<camera3::Camera3StreamBufferListener> listener) = 0;
+
+ /**
* Prepare stream by preallocating up to maxCount buffers for it asynchronously.
* Calls notifyPrepared() once allocation is complete.
*/
diff --git a/services/camera/libcameraservice/device1/CameraHardwareInterface.h b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
index 0fe76e5..bce0762 100644
--- a/services/camera/libcameraservice/device1/CameraHardwareInterface.h
+++ b/services/camera/libcameraservice/device1/CameraHardwareInterface.h
@@ -102,7 +102,9 @@
ALOGI("Opening camera %s", mName.string());
camera_info info;
status_t res = module->getCameraInfo(atoi(mName.string()), &info);
- if (res != OK) return res;
+ if (res != OK) {
+ return res;
+ }
int rc = OK;
if (module->getModuleApiVersion() >= CAMERA_MODULE_API_VERSION_2_3 &&
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 5f990a9..1caf157 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -43,6 +43,8 @@
#include <utils/Trace.h>
#include <utils/Timers.h>
+#include <android/hardware/camera2/ICameraDeviceUser.h>
+
#include "utils/CameraTraces.h"
#include "mediautils/SchedulingPolicyService.h"
#include "device3/Camera3Device.h"
@@ -132,8 +134,7 @@
}
camera_info info;
- res = CameraService::filterGetInfoErrorCode(module->getCameraInfo(
- mId, &info));
+ res = module->getCameraInfo(mId, &info);
if (res != OK) return res;
if (info.device_version != device->common.version) {
@@ -409,6 +410,31 @@
return measured;
}
+/**
+ * Map Android N dataspace definitions back to Android M definitions, for
+ * use with HALv3.3 or older.
+ *
+ * Only map where correspondences exist, and otherwise preserve the value.
+ */
+android_dataspace Camera3Device::mapToLegacyDataspace(android_dataspace dataSpace) {
+ switch (dataSpace) {
+ case HAL_DATASPACE_V0_SRGB_LINEAR:
+ return HAL_DATASPACE_SRGB_LINEAR;
+ case HAL_DATASPACE_V0_SRGB:
+ return HAL_DATASPACE_SRGB;
+ case HAL_DATASPACE_V0_JFIF:
+ return HAL_DATASPACE_JFIF;
+ case HAL_DATASPACE_V0_BT601_625:
+ return HAL_DATASPACE_BT601_625;
+ case HAL_DATASPACE_V0_BT601_525:
+ return HAL_DATASPACE_BT601_525;
+ case HAL_DATASPACE_V0_BT709:
+ return HAL_DATASPACE_BT709;
+ default:
+ return dataSpace;
+ }
+}
+
ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
// Get max jpeg size (area-wise).
Size maxJpegResolution = getMaxJpegResolution();
@@ -453,7 +479,7 @@
return maxBytesForPointCloud;
}
-ssize_t Camera3Device::getRawOpaqueBufferSize(uint32_t width, uint32_t height) const {
+ssize_t Camera3Device::getRawOpaqueBufferSize(int32_t width, int32_t height) const {
const int PER_CONFIGURATION_SIZE = 3;
const int WIDTH_OFFSET = 0;
const int HEIGHT_OFFSET = 1;
@@ -462,7 +488,7 @@
mDeviceInfo.find(ANDROID_SENSOR_OPAQUE_RAW_SIZE);
size_t count = rawOpaqueSizes.count;
if (count == 0 || (count % PER_CONFIGURATION_SIZE)) {
- ALOGE("%s: Camera %d: bad opaque RAW size static metadata length(%d)!",
+ ALOGE("%s: Camera %d: bad opaque RAW size static metadata length(%zu)!",
__FUNCTION__, mId, count);
return BAD_VALUE;
}
@@ -533,9 +559,11 @@
mOutputStreams[i]->dump(fd,args);
}
- lines = String8(" Camera3 Buffer Manager:\n");
- write(fd, lines.string(), lines.size());
- mBufferManager->dump(fd, args);
+ if (mBufferManager != NULL) {
+ lines = String8(" Camera3 Buffer Manager:\n");
+ write(fd, lines.string(), lines.size());
+ mBufferManager->dump(fd, args);
+ }
lines = String8(" In-flight requests:\n");
if (mInFlightMap.size() == 0) {
@@ -1003,6 +1031,10 @@
if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_2) {
streamSetId = CAMERA3_STREAM_SET_ID_INVALID;
}
+ // Use legacy dataspace values for older HALs
+ if (mDeviceVersion <= CAMERA_DEVICE_API_VERSION_3_3) {
+ dataSpace = mapToLegacyDataspace(dataSpace);
+ }
if (format == HAL_PIXEL_FORMAT_BLOB) {
ssize_t blobBufferSize;
if (dataSpace != HAL_DATASPACE_DEPTH) {
@@ -1236,6 +1268,13 @@
CameraMetadata *request) {
ATRACE_CALL();
ALOGV("%s: for template %d", __FUNCTION__, templateId);
+
+ if (templateId <= 0 || templateId >= CAMERA3_TEMPLATE_COUNT) {
+ android_errorWriteWithInfoLog(CameraService::SN_EVENT_LOG_ID, "26866110",
+ IPCThreadState::self()->getCallingUid(), nullptr, 0);
+ return BAD_VALUE;
+ }
+
Mutex::Autolock il(mInterfaceLock);
Mutex::Autolock l(mLock);
@@ -1613,6 +1652,26 @@
return stream->tearDown();
}
+status_t Camera3Device::addBufferListenerForStream(int streamId,
+ wp<Camera3StreamBufferListener> listener) {
+ ATRACE_CALL();
+ ALOGV("%s: Camera %d: Adding buffer listener for stream %d", __FUNCTION__, mId, streamId);
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ sp<Camera3StreamInterface> stream;
+ ssize_t outputStreamIdx = mOutputStreams.indexOfKey(streamId);
+ if (outputStreamIdx == NAME_NOT_FOUND) {
+ CLOGE("Stream %d does not exist", streamId);
+ return BAD_VALUE;
+ }
+
+ stream = mOutputStreams.editValueAt(outputStreamIdx);
+ stream->addBufferListener(listener);
+
+ return OK;
+}
+
uint32_t Camera3Device::getDeviceVersion() {
ATRACE_CALL();
Mutex::Autolock il(mInterfaceLock);
@@ -2021,7 +2080,7 @@
// Notify upstream about a device error
if (mListener != NULL) {
- mListener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
+ mListener->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
CaptureResultExtras());
}
@@ -2048,176 +2107,6 @@
return OK;
}
-/**
- * Check if all 3A fields are ready, and send off a partial 3A-only result
- * to the output frame queue
- */
-bool Camera3Device::processPartial3AResult(
- uint32_t frameNumber,
- const CameraMetadata& partial, const CaptureResultExtras& resultExtras) {
-
- // Check if all 3A states are present
- // The full list of fields is
- // android.control.afMode
- // android.control.awbMode
- // android.control.aeState
- // android.control.awbState
- // android.control.afState
- // android.control.afTriggerID
- // android.control.aePrecaptureID
- // TODO: Add android.control.aeMode
-
- bool gotAllStates = true;
-
- uint8_t afMode;
- uint8_t awbMode;
- uint8_t aeState;
- uint8_t afState;
- uint8_t awbState;
-
- gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_MODE,
- &afMode, frameNumber);
-
- gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_MODE,
- &awbMode, frameNumber);
-
- gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AE_STATE,
- &aeState, frameNumber);
-
- gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AF_STATE,
- &afState, frameNumber);
-
- gotAllStates &= get3AResult(partial, ANDROID_CONTROL_AWB_STATE,
- &awbState, frameNumber);
-
- if (!gotAllStates) return false;
-
- ALOGVV("%s: Camera %d: Frame %d, Request ID %d: AF mode %d, AWB mode %d, "
- "AF state %d, AE state %d, AWB state %d, "
- "AF trigger %d, AE precapture trigger %d",
- __FUNCTION__, mId, frameNumber, resultExtras.requestId,
- afMode, awbMode,
- afState, aeState, awbState,
- resultExtras.afTriggerId, resultExtras.precaptureTriggerId);
-
- // Got all states, so construct a minimal result to send
- // In addition to the above fields, this means adding in
- // android.request.frameCount
- // android.request.requestId
- // android.quirks.partialResult (for HAL version below HAL3.2)
-
- const size_t kMinimal3AResultEntries = 10;
-
- Mutex::Autolock l(mOutputLock);
-
- CaptureResult captureResult;
- captureResult.mResultExtras = resultExtras;
- captureResult.mMetadata = CameraMetadata(kMinimal3AResultEntries, /*dataCapacity*/ 0);
- // TODO: change this to sp<CaptureResult>. This will need other changes, including,
- // but not limited to CameraDeviceBase::getNextResult
- CaptureResult& min3AResult =
- *mResultQueue.insert(mResultQueue.end(), captureResult);
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_REQUEST_FRAME_COUNT,
- // TODO: This is problematic casting. Need to fix CameraMetadata.
- reinterpret_cast<int32_t*>(&frameNumber), frameNumber)) {
- return false;
- }
-
- int32_t requestId = resultExtras.requestId;
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_REQUEST_ID,
- &requestId, frameNumber)) {
- return false;
- }
-
- if (mDeviceVersion < CAMERA_DEVICE_API_VERSION_3_2) {
- static const uint8_t partialResult = ANDROID_QUIRKS_PARTIAL_RESULT_PARTIAL;
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_QUIRKS_PARTIAL_RESULT,
- &partialResult, frameNumber)) {
- return false;
- }
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_MODE,
- &afMode, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AWB_MODE,
- &awbMode, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_STATE,
- &aeState, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_STATE,
- &afState, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AWB_STATE,
- &awbState, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AF_TRIGGER_ID,
- &resultExtras.afTriggerId, frameNumber)) {
- return false;
- }
-
- if (!insert3AResult(min3AResult.mMetadata, ANDROID_CONTROL_AE_PRECAPTURE_ID,
- &resultExtras.precaptureTriggerId, frameNumber)) {
- return false;
- }
-
- // We only send the aggregated partial when all 3A related metadata are available
- // For both API1 and API2.
- // TODO: we probably should pass through all partials to API2 unconditionally.
- mResultSignal.signal();
-
- return true;
-}
-
-template<typename T>
-bool Camera3Device::get3AResult(const CameraMetadata& result, int32_t tag,
- T* value, uint32_t frameNumber) {
- (void) frameNumber;
-
- camera_metadata_ro_entry_t entry;
-
- entry = result.find(tag);
- if (entry.count == 0) {
- ALOGVV("%s: Camera %d: Frame %d: No %s provided by HAL!", __FUNCTION__,
- mId, frameNumber, get_camera_metadata_tag_name(tag));
- return false;
- }
-
- if (sizeof(T) == sizeof(uint8_t)) {
- *value = entry.data.u8[0];
- } else if (sizeof(T) == sizeof(int32_t)) {
- *value = entry.data.i32[0];
- } else {
- ALOGE("%s: Unexpected type", __FUNCTION__);
- return false;
- }
- return true;
-}
-
-template<typename T>
-bool Camera3Device::insert3AResult(CameraMetadata& result, int32_t tag,
- const T* value, uint32_t frameNumber) {
- if (result.update(tag, value, 1) != NO_ERROR) {
- mResultQueue.erase(--mResultQueue.end(), mResultQueue.end());
- SET_ERR("Frame %d: Failed to set %s in partial metadata",
- frameNumber, get_camera_metadata_tag_name(tag));
- return false;
- }
- return true;
-}
-
void Camera3Device::returnOutputBuffers(
const camera3_stream_buffer_t *outputBuffers, size_t numBuffers,
nsecs_t timestamp) {
@@ -2285,6 +2174,48 @@
}
}
+void Camera3Device::insertResultLocked(CaptureResult *result, uint32_t frameNumber,
+ const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+ if (result == nullptr) return;
+
+ if (result->mMetadata.update(ANDROID_REQUEST_FRAME_COUNT,
+ (int32_t*)&frameNumber, 1) != OK) {
+ SET_ERR("Failed to set frame number %d in metadata", frameNumber);
+ return;
+ }
+
+ if (result->mMetadata.update(ANDROID_REQUEST_ID, &result->mResultExtras.requestId, 1) != OK) {
+ SET_ERR("Failed to set request ID in metadata for frame %d", frameNumber);
+ return;
+ }
+
+ overrideResultForPrecaptureCancel(&result->mMetadata, aeTriggerCancelOverride);
+
+ // Valid result, insert into queue
+ List<CaptureResult>::iterator queuedResult =
+ mResultQueue.insert(mResultQueue.end(), CaptureResult(*result));
+ ALOGVV("%s: result requestId = %" PRId32 ", frameNumber = %" PRId64
+ ", burstId = %" PRId32, __FUNCTION__,
+ queuedResult->mResultExtras.requestId,
+ queuedResult->mResultExtras.frameNumber,
+ queuedResult->mResultExtras.burstId);
+
+ mResultSignal.signal();
+}
+
+
+void Camera3Device::sendPartialCaptureResult(const camera_metadata_t * partialResult,
+ const CaptureResultExtras &resultExtras, uint32_t frameNumber,
+ const AeTriggerCancelOverride_t &aeTriggerCancelOverride) {
+ Mutex::Autolock l(mOutputLock);
+
+ CaptureResult captureResult;
+ captureResult.mResultExtras = resultExtras;
+ captureResult.mMetadata = partialResult;
+
+ insertResultLocked(&captureResult, frameNumber, aeTriggerCancelOverride);
+}
+
void Camera3Device::sendCaptureResult(CameraMetadata &pendingMetadata,
CaptureResultExtras &resultExtras,
@@ -2320,16 +2251,6 @@
captureResult.mResultExtras = resultExtras;
captureResult.mMetadata = pendingMetadata;
- if (captureResult.mMetadata.update(ANDROID_REQUEST_FRAME_COUNT,
- (int32_t*)&frameNumber, 1) != OK) {
- SET_ERR("Failed to set frame# in metadata (%d)",
- frameNumber);
- return;
- } else {
- ALOGVV("%s: Camera %d: Set frame# in metadata (%d)",
- __FUNCTION__, mId, frameNumber);
- }
-
// Append any previous partials to form a complete result
if (mUsePartialResult && !collectedPartialResult.isEmpty()) {
captureResult.mMetadata.append(collectedPartialResult);
@@ -2338,26 +2259,14 @@
captureResult.mMetadata.sort();
// Check that there's a timestamp in the result metadata
- camera_metadata_entry entry =
- captureResult.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
+ camera_metadata_entry entry = captureResult.mMetadata.find(ANDROID_SENSOR_TIMESTAMP);
if (entry.count == 0) {
SET_ERR("No timestamp provided by HAL for frame %d!",
frameNumber);
return;
}
- overrideResultForPrecaptureCancel(&captureResult.mMetadata, aeTriggerCancelOverride);
-
- // Valid result, insert into queue
- List<CaptureResult>::iterator queuedResult =
- mResultQueue.insert(mResultQueue.end(), CaptureResult(captureResult));
- ALOGVV("%s: result requestId = %" PRId32 ", frameNumber = %" PRId64
- ", burstId = %" PRId32, __FUNCTION__,
- queuedResult->mResultExtras.requestId,
- queuedResult->mResultExtras.frameNumber,
- queuedResult->mResultExtras.burstId);
-
- mResultSignal.signal();
+ insertResultLocked(&captureResult, frameNumber, aeTriggerCancelOverride);
}
/**
@@ -2434,7 +2343,7 @@
}
isPartialResult = (result->partial_result < mNumPartialResults);
if (isPartialResult) {
- request.partialResult.collectedResult.append(result->result);
+ request.collectedPartialResult.append(result->result);
}
} else {
camera_metadata_ro_entry_t partialResultEntry;
@@ -2447,21 +2356,17 @@
// A partial result. Flag this as such, and collect this
// set of metadata into the in-flight entry.
isPartialResult = true;
- request.partialResult.collectedResult.append(
+ request.collectedPartialResult.append(
result->result);
- request.partialResult.collectedResult.erase(
+ request.collectedPartialResult.erase(
ANDROID_QUIRKS_PARTIAL_RESULT);
}
}
if (isPartialResult) {
- // Fire off a 3A-only result if possible
- if (!request.partialResult.haveSent3A) {
- request.partialResult.haveSent3A =
- processPartial3AResult(frameNumber,
- request.partialResult.collectedResult,
- request.resultExtras);
- }
+ // Send partial capture result
+ sendPartialCaptureResult(result->result, request.resultExtras, frameNumber,
+ request.aeTriggerCancelOverride);
}
}
@@ -2476,9 +2381,9 @@
return;
}
if (mUsePartialResult &&
- !request.partialResult.collectedResult.isEmpty()) {
+ !request.collectedPartialResult.isEmpty()) {
collectedPartialResult.acquire(
- request.partialResult.collectedResult);
+ request.collectedPartialResult);
}
request.haveResultMetadata = true;
}
@@ -2521,7 +2426,7 @@
if (result->result != NULL && !isPartialResult) {
if (shutterTimestamp == 0) {
request.pendingMetadata = result->result;
- request.partialResult.collectedResult = collectedPartialResult;
+ request.collectedPartialResult = collectedPartialResult;
} else {
CameraMetadata metadata;
metadata = result->result;
@@ -2587,25 +2492,24 @@
// Map camera HAL error codes to ICameraDeviceCallback error codes
// Index into this with the HAL error code
- static const ICameraDeviceCallbacks::CameraErrorCode
- halErrorMap[CAMERA3_MSG_NUM_ERRORS] = {
+ static const int32_t halErrorMap[CAMERA3_MSG_NUM_ERRORS] = {
// 0 = Unused error code
- ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR,
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR,
// 1 = CAMERA3_MSG_ERROR_DEVICE
- ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE,
// 2 = CAMERA3_MSG_ERROR_REQUEST
- ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
// 3 = CAMERA3_MSG_ERROR_RESULT
- ICameraDeviceCallbacks::ERROR_CAMERA_RESULT,
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT,
// 4 = CAMERA3_MSG_ERROR_BUFFER
- ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER
};
- ICameraDeviceCallbacks::CameraErrorCode errorCode =
+ int32_t errorCode =
((msg.error_code >= 0) &&
(msg.error_code < CAMERA3_MSG_NUM_ERRORS)) ?
halErrorMap[msg.error_code] :
- ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR;
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_INVALID_ERROR;
int streamId = 0;
if (msg.error_stream != NULL) {
@@ -2619,13 +2523,13 @@
CaptureResultExtras resultExtras;
switch (errorCode) {
- case ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_DEVICE:
// SET_ERR calls notifyError
SET_ERR("Camera HAL reported serious device error");
break;
- case ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
- case ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
- case ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST:
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_RESULT:
+ case hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_BUFFER:
{
Mutex::Autolock l(mInFlightLock);
ssize_t idx = mInFlightMap.indexOfKey(msg.frame_number);
@@ -2640,6 +2544,7 @@
resultExtras.frameNumber);
}
}
+ resultExtras.errorStreamId = streamId;
if (listener != NULL) {
listener->notifyError(errorCode, resultExtras);
} else {
@@ -2700,7 +2605,7 @@
// send pending result and buffers
sendCaptureResult(r.pendingMetadata, r.resultExtras,
- r.partialResult.collectedResult, msg.frame_number,
+ r.collectedPartialResult, msg.frame_number,
r.hasInputBuffer, r.aeTriggerCancelOverride);
returnOutputBuffers(r.pendingOutputBuffers.array(),
r.pendingOutputBuffers.size(), r.shutterTimestamp);
@@ -2749,7 +2654,8 @@
mLatestRequestId(NAME_NOT_FOUND),
mCurrentAfTriggerId(0),
mCurrentPreCaptureTriggerId(0),
- mRepeatingLastFrameNumber(NO_IN_FLIGHT_REPEATING_FRAMES),
+ mRepeatingLastFrameNumber(
+ hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES),
mAeLockAvailable(aeLockAvailable) {
mStatusId = statusTracker->addComponent();
}
@@ -2857,7 +2763,7 @@
unpauseForNewRequests();
- mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES;
+ mRepeatingLastFrameNumber = hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES;
return OK;
}
@@ -2878,7 +2784,7 @@
if (lastFrameNumber != NULL) {
*lastFrameNumber = mRepeatingLastFrameNumber;
}
- mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES;
+ mRepeatingLastFrameNumber = hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES;
return OK;
}
@@ -2915,7 +2821,7 @@
// The requestId and burstId fields were set when the request was
// submitted originally (in convertMetadataListToRequestListLocked)
(*it)->mResultExtras.frameNumber = mFrameNumber++;
- listener->notifyError(ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+ listener->notifyError(hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
(*it)->mResultExtras);
}
}
@@ -2924,7 +2830,7 @@
if (lastFrameNumber != NULL) {
*lastFrameNumber = mRepeatingLastFrameNumber;
}
- mRepeatingLastFrameNumber = NO_IN_FLIGHT_REPEATING_FRAMES;
+ mRepeatingLastFrameNumber = hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES;
return OK;
}
@@ -3344,7 +3250,7 @@
Mutex::Autolock l(mRequestLock);
if (mListener != NULL) {
mListener->notifyError(
- ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
captureRequest->mResultExtras);
}
}
@@ -3484,7 +3390,7 @@
" %s (%d)", __FUNCTION__, strerror(-res), res);
if (mListener != NULL) {
mListener->notifyError(
- ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
+ hardware::camera2::ICameraDeviceCallbacks::ERROR_CAMERA_REQUEST,
nextRequest->mResultExtras);
}
return NULL;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 3848200..96ca7b7 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -26,7 +26,6 @@
#include <utils/Timers.h>
#include <hardware/camera3.h>
#include <camera/CaptureResult.h>
-#include <camera/camera2/ICameraDeviceUser.h>
#include "common/CameraDeviceBase.h"
#include "device3/StatusTracker.h"
@@ -147,13 +146,16 @@
virtual status_t tearDown(int streamId);
+ virtual status_t addBufferListenerForStream(int streamId,
+ wp<camera3::Camera3StreamBufferListener> listener);
+
virtual status_t prepare(int maxCount, int streamId);
virtual uint32_t getDeviceVersion();
virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const;
ssize_t getPointCloudBufferSize() const;
- ssize_t getRawOpaqueBufferSize(uint32_t width, uint32_t height) const;
+ ssize_t getRawOpaqueBufferSize(int32_t width, int32_t height) const;
// Methods called by subclasses
void notifyStatus(bool idle); // updates from StatusTracker
@@ -405,6 +407,11 @@
*/
static nsecs_t getMonoToBoottimeOffset();
+ /**
+ * Helper function to map between legacy and new dataspace enums
+ */
+ static android_dataspace mapToLegacyDataspace(android_dataspace dataSpace);
+
struct RequestTrigger {
// Metadata tag number, e.g. android.control.aePrecaptureTrigger
uint32_t metadataTag;
@@ -649,6 +656,10 @@
// receives the shutter event.
CameraMetadata pendingMetadata;
+ // The metadata of the partial results that framework receives from HAL so far
+ // and has sent out.
+ CameraMetadata collectedPartialResult;
+
// Buffers are added by process_capture_result when output buffers
// return from HAL but framework has not yet received the shutter
// event. They will be returned to the streams when framework receives
@@ -659,19 +670,6 @@
// CONTROL_AE_PRECAPTURE_TRIGGER_CANCEL
AeTriggerCancelOverride_t aeTriggerCancelOverride;
-
- // Fields used by the partial result only
- struct PartialResultInFlight {
- // Set by process_capture_result once 3A has been sent to clients
- bool haveSent3A;
- // Result metadata collected so far, when partial results are in use
- CameraMetadata collectedResult;
-
- PartialResultInFlight():
- haveSent3A(false) {
- }
- } partialResult;
-
// Default constructor needed by KeyedVector
InFlightRequest() :
shutterTimestamp(0),
@@ -707,23 +705,6 @@
const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
/**
- * For the partial result, check if all 3A state fields are available
- * and if so, queue up 3A-only result to the client. Returns true if 3A
- * is sent.
- */
- bool processPartial3AResult(uint32_t frameNumber,
- const CameraMetadata& partial, const CaptureResultExtras& resultExtras);
-
- // Helpers for reading and writing 3A metadata into to/from partial results
- template<typename T>
- bool get3AResult(const CameraMetadata& result, int32_t tag,
- T* value, uint32_t frameNumber);
-
- template<typename T>
- bool insert3AResult(CameraMetadata &result, int32_t tag, const T* value,
- uint32_t frameNumber);
-
- /**
* Override result metadata for cancelling AE precapture trigger applied in
* handleAePrecaptureCancelRequest().
*/
@@ -821,13 +802,24 @@
void returnOutputBuffers(const camera3_stream_buffer_t *outputBuffers,
size_t numBuffers, nsecs_t timestamp);
- // Insert the capture result given the pending metadata, result extras,
+ // Send a partial capture result.
+ void sendPartialCaptureResult(const camera_metadata_t * partialResult,
+ const CaptureResultExtras &resultExtras, uint32_t frameNumber,
+ const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+
+ // Send a total capture result given the pending metadata and result extras,
// partial results, and the frame number to the result queue.
void sendCaptureResult(CameraMetadata &pendingMetadata,
CaptureResultExtras &resultExtras,
CameraMetadata &collectedPartialResult, uint32_t frameNumber,
bool reprocess, const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+ // Insert the result to the result queue after updating frame number and overriding AE
+ // trigger cancel.
+ // mOutputLock must be held when calling this function.
+ void insertResultLocked(CaptureResult *result, uint32_t frameNumber,
+ const AeTriggerCancelOverride_t &aeTriggerCancelOverride);
+
/**** Scope for mInFlightLock ****/
// Remove the in-flight request of the given index from mInFlightMap
diff --git a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
index fe04eb1..5bf76bd 100644
--- a/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3DummyStream.cpp
@@ -36,26 +36,26 @@
}
-status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *buffer) {
+status_t Camera3DummyStream::getBufferLocked(camera3_stream_buffer *) {
ATRACE_CALL();
ALOGE("%s: Stream %d: Dummy stream cannot produce buffers!", __FUNCTION__, mId);
return INVALID_OPERATION;
}
status_t Camera3DummyStream::returnBufferLocked(
- const camera3_stream_buffer &buffer,
- nsecs_t timestamp) {
+ const camera3_stream_buffer &,
+ nsecs_t) {
ATRACE_CALL();
ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", __FUNCTION__, mId);
return INVALID_OPERATION;
}
status_t Camera3DummyStream::returnBufferCheckedLocked(
- const camera3_stream_buffer &buffer,
- nsecs_t timestamp,
- bool output,
+ const camera3_stream_buffer &,
+ nsecs_t,
+ bool,
/*out*/
- sp<Fence> *releaseFenceOut) {
+ sp<Fence>*) {
ATRACE_CALL();
ALOGE("%s: Stream %d: Dummy stream cannot return buffers!", __FUNCTION__, mId);
return INVALID_OPERATION;
@@ -70,7 +70,7 @@
Camera3IOStreamBase::dump(fd, args);
}
-status_t Camera3DummyStream::setTransform(int transform) {
+status_t Camera3DummyStream::setTransform(int) {
ATRACE_CALL();
// Do nothing
return OK;
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 7dab2e3..f781ded 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -116,6 +116,7 @@
bufferFound = true;
bufferItem = tmp;
mBuffersInFlight.erase(it);
+ break;
}
}
}
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index ed3ab97..a4714a7 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -55,9 +55,9 @@
mMaxSize(maxSize),
mState(STATE_CONSTRUCTED),
mStatusId(StatusTracker::NO_STATUS_ID),
- oldUsage(0),
- oldMaxBuffers(0),
mStreamUnpreparable(false),
+ mOldUsage(0),
+ mOldMaxBuffers(0),
mPrepared(false),
mPreparedBufferIdx(0),
mLastMaxCount(Camera3StreamInterface::ALLOCATE_PIPELINE_MAX) {
@@ -118,7 +118,7 @@
case STATE_IN_CONFIG:
case STATE_IN_RECONFIG:
// Can start config again with no trouble; but don't redo
- // oldUsage/oldMaxBuffers
+ // mOldUsage/mOldMaxBuffers
return this;
case STATE_CONFIGURED:
if (hasOutstandingBuffersLocked()) {
@@ -132,8 +132,8 @@
return NULL;
}
- oldUsage = camera3_stream::usage;
- oldMaxBuffers = camera3_stream::max_buffers;
+ mOldUsage = camera3_stream::usage;
+ mOldMaxBuffers = camera3_stream::max_buffers;
res = getEndpointUsage(&(camera3_stream::usage));
if (res != OK) {
@@ -196,8 +196,8 @@
// Check if the stream configuration is unchanged, and skip reallocation if
// so. As documented in hardware/camera3.h:configure_streams().
if (mState == STATE_IN_RECONFIG &&
- oldUsage == camera3_stream::usage &&
- oldMaxBuffers == camera3_stream::max_buffers) {
+ mOldUsage == camera3_stream::usage &&
+ mOldMaxBuffers == camera3_stream::max_buffers) {
mState = STATE_CONFIGURED;
return OK;
}
@@ -250,8 +250,8 @@
return INVALID_OPERATION;
}
- camera3_stream::usage = oldUsage;
- camera3_stream::max_buffers = oldMaxBuffers;
+ camera3_stream::usage = mOldUsage;
+ camera3_stream::max_buffers = mOldMaxBuffers;
mState = (mState == STATE_IN_RECONFIG) ? STATE_CONFIGURED : STATE_CONSTRUCTED;
return OK;
@@ -268,7 +268,6 @@
ATRACE_CALL();
Mutex::Autolock l(mLock);
- status_t res = OK;
if (maxCount < 0) {
ALOGE("%s: Stream %d: Can't prepare stream if max buffer count (%d) is < 0",
@@ -340,7 +339,7 @@
// Get next buffer - this may allocate, and take a while for large buffers
res = getBufferLocked( &mPreparedBuffers.editItemAt(mPreparedBufferIdx) );
if (res != OK) {
- ALOGE("%s: Stream %d: Unable to allocate buffer %d during preparation",
+ ALOGE("%s: Stream %d: Unable to allocate buffer %zu during preparation",
__FUNCTION__, mId, mPreparedBufferIdx);
return NO_INIT;
}
@@ -561,7 +560,7 @@
}
void Camera3Stream::fireBufferListenersLocked(
- const camera3_stream_buffer& /*buffer*/, bool acquired, bool output) {
+ const camera3_stream_buffer& buffer, bool acquired, bool output) {
List<wp<Camera3StreamBufferListener> >::iterator it, end;
// TODO: finish implementing
@@ -569,6 +568,7 @@
Camera3StreamBufferListener::BufferInfo info =
Camera3StreamBufferListener::BufferInfo();
info.mOutput = output;
+ info.mError = (buffer.status == CAMERA3_BUFFER_STATUS_ERROR);
// TODO: rest of fields
for (it = mBufferListenerList.begin(), end = mBufferListenerList.end();
@@ -719,7 +719,7 @@
ALOGE("%s: This type of stream does not support input", __FUNCTION__);
return INVALID_OPERATION;
}
-status_t Camera3Stream::getInputBufferProducerLocked(sp<IGraphicBufferProducer> *producer) {
+status_t Camera3Stream::getInputBufferProducerLocked(sp<IGraphicBufferProducer>*) {
ALOGE("%s: This type of stream does not support input", __FUNCTION__);
return INVALID_OPERATION;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index fe51ab5..c932e253 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -442,8 +442,8 @@
bool mStreamUnpreparable;
private:
- uint32_t oldUsage;
- uint32_t oldMaxBuffers;
+ uint32_t mOldUsage;
+ uint32_t mOldMaxBuffers;
Condition mOutputBufferReturnedSignal;
Condition mInputBufferReturnedSignal;
static const nsecs_t kWaitForBufferDuration = 3000000000LL; // 3000 ms
@@ -468,7 +468,7 @@
size_t mPreparedBufferIdx;
// Number of buffers allocated on last prepare call.
- int mLastMaxCount;
+ size_t mLastMaxCount;
}; // class Camera3Stream
diff --git a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
index 62ea6c0..2db333d 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamBufferListener.h
@@ -34,6 +34,7 @@
uint32_t mScalingMode;
int64_t mTimestamp;
uint64_t mFrameNumber;
+ bool mError;
};
// Buffer was acquired by the HAL
diff --git a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
index 65816e0..3d54460 100644
--- a/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
+++ b/services/camera/libcameraservice/gui/RingBufferConsumer.cpp
@@ -256,9 +256,7 @@
BI_LOGV("Buffer timestamp %" PRId64 ", frame %" PRIu64 " evicted",
item.mTimestamp, item.mFrameNumber);
- size_t currentSize = mBufferItemList.size();
mBufferItemList.erase(accIt);
- assert(mBufferItemList.size() == currentSize - 1);
} else {
BI_LOGW("All buffers pinned, could not find any to release");
return NO_BUFFER_AVAILABLE;
diff --git a/services/camera/libcameraservice/utils/CameraTraces.h b/services/camera/libcameraservice/utils/CameraTraces.h
index d10dbc9..13ca16d 100644
--- a/services/camera/libcameraservice/utils/CameraTraces.h
+++ b/services/camera/libcameraservice/utils/CameraTraces.h
@@ -24,7 +24,7 @@
namespace android {
namespace camera3 {
-class CameraTracesImpl;
+struct CameraTracesImpl;
// Collect a list of the process's stack traces
class CameraTraces {
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index 239b4e1..a5f0751 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -14,8 +14,10 @@
# service executable
include $(CLEAR_VARS)
-LOCAL_SRC_FILES := main_codecservice.cpp
-LOCAL_SHARED_LIBRARIES := libmedia libmediacodecservice libbinder libutils liblog
+LOCAL_REQUIRED_MODULES_arm := mediacodec-seccomp.policy
+LOCAL_SRC_FILES := main_codecservice.cpp minijail/minijail.cpp
+LOCAL_SHARED_LIBRARIES := libmedia libmediacodecservice libbinder libutils \
+ liblog libminijail
LOCAL_C_INCLUDES := \
$(TOP)/frameworks/av/media/libstagefright \
$(TOP)/frameworks/native/include/media/openmax
@@ -24,4 +26,5 @@
LOCAL_INIT_RC := mediacodec.rc
include $(BUILD_EXECUTABLE)
+include $(call all-makefiles-under, $(LOCAL_PATH))
diff --git a/services/mediacodec/main_codecservice.cpp b/services/mediacodec/main_codecservice.cpp
index aedf0c3..a2868c1 100644
--- a/services/mediacodec/main_codecservice.cpp
+++ b/services/mediacodec/main_codecservice.cpp
@@ -1,6 +1,6 @@
/*
**
-** Copyright 2015, The Android Open Source Project
+** Copyright 2016, The Android Open Source Project
**
** Licensed under the Apache License, Version 2.0 (the "License");
** you may not use this file except in compliance with the License.
@@ -28,6 +28,7 @@
// from LOCAL_C_INCLUDES
#include "MediaCodecService.h"
+#include "minijail/minijail.h"
using namespace android;
@@ -35,6 +36,7 @@
{
ALOGI("@@@ mediacodecservice starting");
signal(SIGPIPE, SIG_IGN);
+ MiniJail();
strcpy(argv[0], "media.codec");
sp<ProcessState> proc(ProcessState::self());
diff --git a/services/mediacodec/minijail/Android.mk b/services/mediacodec/minijail/Android.mk
new file mode 100644
index 0000000..d2becb4
--- /dev/null
+++ b/services/mediacodec/minijail/Android.mk
@@ -0,0 +1,28 @@
+LOCAL_PATH := $(call my-dir)
+
+ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), arm arm64))
+include $(CLEAR_VARS)
+LOCAL_MODULE := mediacodec-seccomp.policy
+LOCAL_MODULE_CLASS := ETC
+LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
+
+# mediacodec runs in 32-bit combatibility mode. For 64 bit architectures,
+# use the 32 bit policy
+ifdef TARGET_2ND_ARCH
+ LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediacodec-seccomp-$(TARGET_2ND_ARCH).policy
+else
+ LOCAL_SRC_FILES := $(LOCAL_PATH)/seccomp_policy/mediacodec-seccomp-$(TARGET_ARCH).policy
+endif
+
+# allow device specific additions to the syscall whitelist
+ifneq (,$(wildcard $(BOARD_SECCOMP_POLICY)/mediacodec-seccomp.policy))
+ LOCAL_SRC_FILES += $(BOARD_SECCOMP_POLICY)/mediacodec-seccomp.policy
+endif
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+$(LOCAL_BUILT_MODULE): $(LOCAL_SRC_FILES)
+ @mkdir -p $(dir $@)
+ $(hide) cat > $@ $^
+
+endif
diff --git a/services/mediacodec/minijail/minijail.cpp b/services/mediacodec/minijail/minijail.cpp
new file mode 100644
index 0000000..72bb1af
--- /dev/null
+++ b/services/mediacodec/minijail/minijail.cpp
@@ -0,0 +1,50 @@
+/*
+**
+** Copyright 2016, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+#include <cutils/log.h>
+#include <libminijail.h>
+
+#include "minijail.h"
+
+namespace android {
+
+/* Must match location in Android.mk */
+static const char kSeccompFilePath[] = "/system/etc/seccomp_policy/mediacodec-seccomp.policy";
+
+int MiniJail()
+{
+ /* no seccomp policy for this architecture */
+ if (access(kSeccompFilePath, R_OK) == -1) {
+ ALOGW("No seccomp filter defined for this architecture.");
+ return 0;
+ }
+
+ struct minijail *jail = minijail_new();
+ if (jail == NULL) {
+ ALOGW("Failed to create minijail.");
+ return -1;
+ }
+
+ minijail_no_new_privs(jail);
+ minijail_log_seccomp_filter_failures(jail);
+ minijail_use_seccomp_filter(jail);
+ minijail_parse_seccomp_filters(jail, kSeccompFilePath);
+ minijail_enter(jail);
+ minijail_destroy(jail);
+ return 0;
+}
+}
diff --git a/services/mediacodec/minijail/minijail.h b/services/mediacodec/minijail/minijail.h
new file mode 100644
index 0000000..ae01470
--- /dev/null
+++ b/services/mediacodec/minijail/minijail.h
@@ -0,0 +1,20 @@
+/*
+**
+** Copyright 2016, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+namespace android {
+int MiniJail();
+}
diff --git a/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy b/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
new file mode 100644
index 0000000..0afaa15
--- /dev/null
+++ b/services/mediacodec/minijail/seccomp_policy/mediacodec-seccomp-arm.policy
@@ -0,0 +1,51 @@
+# Organized by frequency of systemcall - in descending order for
+# best performance.
+futex: 1
+ioctl: 1
+write: 1
+prctl: 1
+clock_gettime: 1
+getpriority: 1
+read: 1
+close: 1
+writev: 1
+dup: 1
+ppoll: 1
+mmap2: 1
+munmap: 1
+mprotect: 1
+madvise: 1
+openat: 1
+sigaltstack: 1
+clone: 1
+setpriority: 1
+getuid32: 1
+fstat64: 1
+pread64: 1
+faccessat: 1
+readlinkat: 1
+exit: 1
+rt_sigprocmask: 1
+set_tid_address: 1
+restart_syscall: 1
+exit_group: 1
+rt_sigreturn: 1
+pipe2: 1
+gettimeofday: 1
+sched_yield: 1
+nanosleep: 1
+lseek: 1
+sched_get_priority_max: 1
+sched_get_priority_min: 1
+statfs64: 1
+sched_setscheduler: 1
+fstatat64: 1
+ugetrlimit: 1
+
+# for attaching to debuggerd on process crash
+sigaction: 1
+tgkill: 1
+socket: 1
+connect: 1
+fcntl64: 1
+rt_tgsigqueueinfo: 1
diff --git a/services/mediadrm/mediadrmserver.rc b/services/mediadrm/mediadrmserver.rc
index c22ec7c..374d24b 100644
--- a/services/mediadrm/mediadrmserver.rc
+++ b/services/mediadrm/mediadrmserver.rc
@@ -1,5 +1,5 @@
service mediadrm /system/bin/mediadrmserver
class main
- user mediadrm
- group drmrpc
+ user media
+ group mediadrm drmrpc
ioprio rt 4
diff --git a/services/mediaextractor/MediaExtractorService.cpp b/services/mediaextractor/MediaExtractorService.cpp
index cd20635..a2b35f6 100644
--- a/services/mediaextractor/MediaExtractorService.cpp
+++ b/services/mediaextractor/MediaExtractorService.cpp
@@ -18,12 +18,35 @@
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <utils/Vector.h>
+
#include <media/stagefright/DataSource.h>
#include <media/stagefright/MediaExtractor.h>
#include "MediaExtractorService.h"
namespace android {
+typedef struct {
+ String8 mime;
+ String8 name;
+ pid_t owner;
+ wp<MediaExtractor> extractor;
+ String8 toString() {
+ String8 str = name;
+ str.append(" for mime ");
+ str.append(mime);
+ str.append(String8::format(", pid %d: ", owner));
+ if (extractor.promote() == NULL) {
+ str.append("deleted");
+ } else {
+ str.append("active");
+ }
+ return str;
+ }
+} ExtractorInstance;
+
+static Vector<ExtractorInstance> extractors;
+
sp<IMediaExtractor> MediaExtractorService::makeExtractor(
const sp<IDataSource> &remoteSource, const char *mime) {
ALOGV("@@@ MediaExtractorService::makeExtractor for %s", mime);
@@ -36,9 +59,35 @@
ret.get(),
ret == NULL ? "" : ret->name());
+ if (ret != NULL) {
+ ExtractorInstance ex;
+ ex.mime = mime == NULL ? "NULL" : mime;
+ ex.name = ret->name();
+ ex.owner = IPCThreadState::self()->getCallingPid();
+ ex.extractor = ret;
+
+ if (extractors.size() > 10) {
+ extractors.resize(10);
+ }
+ extractors.push_front(ex);
+ }
+
return ret;
}
+status_t MediaExtractorService::dump(int fd, const Vector<String16>& args) {
+ String8 out;
+ out.append("Recent extractors, most recent first:\n");
+ for (size_t i = 0; i < extractors.size(); i++) {
+ ExtractorInstance ex = extractors.itemAt(i);
+ out.append(" ");
+ out.append(ex.toString());
+ out.append("\n");
+ }
+ write(fd, out.string(), out.size());
+ return OK;
+}
+
status_t MediaExtractorService::onTransact(uint32_t code, const Parcel& data, Parcel* reply,
uint32_t flags)
diff --git a/services/mediaextractor/MediaExtractorService.h b/services/mediaextractor/MediaExtractorService.h
index 9c75042..078af0c 100644
--- a/services/mediaextractor/MediaExtractorService.h
+++ b/services/mediaextractor/MediaExtractorService.h
@@ -34,6 +34,7 @@
static const char* getServiceName() { return "media.extractor"; }
virtual sp<IMediaExtractor> makeExtractor(const sp<IDataSource> &source, const char *mime);
+ virtual status_t dump(int fd, const Vector<String16>& args);
virtual status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply,
uint32_t flags);
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy
index 5bbd4e3..cc9a580 100644
--- a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy
+++ b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-arm.policy
@@ -32,6 +32,7 @@
gettid: 1
rt_sigprocmask: 1
sched_yield: 1
+ugetrlimit: 1
# for attaching to debuggerd on process crash
sigaction: 1
@@ -39,3 +40,4 @@
socket: 1
connect: 1
fcntl64: 1
+rt_tgsigqueueinfo: 1
diff --git a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
index 6c44886..516ca60 100644
--- a/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
+++ b/services/mediaextractor/minijail/seccomp_policy/mediaextractor-seccomp-x86.policy
@@ -28,6 +28,9 @@
exit_group: 1
rt_sigreturn: 1
faccessat: 1
+sched_setscheduler: 1
+ugetrlimit: 1
+getrlimit: 1
# for attaching to debuggerd on process crash
socketcall: 1
@@ -35,3 +38,4 @@
tgkill: 1
rt_sigprocmask: 1
fcntl64: 1
+rt_tgsigqueueinfo: 1
diff --git a/services/medialog/Android.mk b/services/medialog/Android.mk
index 03438bf..88f98cf 100644
--- a/services/medialog/Android.mk
+++ b/services/medialog/Android.mk
@@ -6,10 +6,12 @@
LOCAL_SHARED_LIBRARIES := libmedia libbinder libutils liblog libnbaio
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libmedialogservice
-LOCAL_32_BIT_ONLY := true
-
LOCAL_C_INCLUDES := $(call include-path-for, audio-utils)
+LOCAL_CFLAGS := -Werror -Wall
+
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/medialog/MediaLogService.cpp b/services/medialog/MediaLogService.cpp
index 98a71bb..f85aa13 100644
--- a/services/medialog/MediaLogService.cpp
+++ b/services/medialog/MediaLogService.cpp
@@ -26,6 +26,8 @@
namespace android {
+static const char kDeadlockedString[] = "MediaLogService may be deadlocked\n";
+
void MediaLogService::registerWriter(const sp<IMemory>& shared, size_t size, const char *name)
{
if (IPCThreadState::self()->getCallingUid() != AID_AUDIOSERVER || shared == 0 ||
@@ -54,6 +56,19 @@
}
}
+bool MediaLogService::dumpTryLock(Mutex& mutex)
+{
+ bool locked = false;
+ for (int i = 0; i < kDumpLockRetries; ++i) {
+ if (mutex.tryLock() == NO_ERROR) {
+ locked = true;
+ break;
+ }
+ usleep(kDumpLockSleepUs);
+ }
+ return locked;
+}
+
status_t MediaLogService::dump(int fd, const Vector<String16>& args __unused)
{
// FIXME merge with similar but not identical code at services/audioflinger/ServiceUtilities.cpp
@@ -68,9 +83,22 @@
Vector<NamedReader> namedReaders;
{
- Mutex::Autolock _l(mLock);
+ bool locked = dumpTryLock(mLock);
+
+ // failed to lock - MediaLogService is probably deadlocked
+ if (!locked) {
+ String8 result(kDeadlockedString);
+ if (fd >= 0) {
+ write(fd, result.string(), result.size());
+ } else {
+ ALOGW("%s:", result.string());
+ }
+ return NO_ERROR;
+ }
namedReaders = mNamedReaders;
+ mLock.unlock();
}
+
for (size_t i = 0; i < namedReaders.size(); i++) {
const NamedReader& namedReader = namedReaders[i];
if (fd >= 0) {
diff --git a/services/medialog/MediaLogService.h b/services/medialog/MediaLogService.h
index 2d89a41..c9bf2eb 100644
--- a/services/medialog/MediaLogService.h
+++ b/services/medialog/MediaLogService.h
@@ -43,6 +43,12 @@
uint32_t flags);
private:
+
+ // Internal dump
+ static const int kDumpLockRetries = 50;
+ static const int kDumpLockSleepUs = 20000;
+ static bool dumpTryLock(Mutex& mutex);
+
Mutex mLock;
class NamedReader {
public:
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 3d4e0b5..64534bf 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -43,7 +43,7 @@
return itemsStr;
}
-static bool hasResourceType(String8 type, Vector<MediaResource> resources) {
+static bool hasResourceType(MediaResource::Type type, Vector<MediaResource> resources) {
for (size_t i = 0; i < resources.size(); ++i) {
if (resources[i].mType == type) {
return true;
@@ -52,7 +52,7 @@
return false;
}
-static bool hasResourceType(String8 type, ResourceInfos infos) {
+static bool hasResourceType(MediaResource::Type type, ResourceInfos infos) {
for (size_t i = 0; i < infos.size(); ++i) {
if (hasResourceType(type, infos[i].resources)) {
return true;
@@ -96,8 +96,11 @@
if (binder != NULL) {
sp<IMediaResourceMonitor> service = interface_cast<IMediaResourceMonitor>(binder);
for (size_t i = 0; i < resources.size(); ++i) {
- service->notifyResourceGranted(pid, String16(resources[i].mType),
- String16(resources[i].mSubType), resources[i].mValue);
+ if (resources[i].mSubType == MediaResource::kAudioCodec) {
+ service->notifyResourceGranted(pid, IMediaResourceMonitor::TYPE_AUDIO_CODEC);
+ } else if (resources[i].mSubType == MediaResource::kVideoCodec) {
+ service->notifyResourceGranted(pid, IMediaResourceMonitor::TYPE_VIDEO_CODEC);
+ }
}
}
}
@@ -275,12 +278,12 @@
const MediaResource *nonSecureCodec = NULL;
const MediaResource *graphicMemory = NULL;
for (size_t i = 0; i < resources.size(); ++i) {
- String8 type = resources[i].mType;
- if (resources[i].mType == kResourceSecureCodec) {
+ MediaResource::Type type = resources[i].mType;
+ if (resources[i].mType == MediaResource::kSecureCodec) {
secureCodec = &resources[i];
- } else if (type == kResourceNonSecureCodec) {
+ } else if (type == MediaResource::kNonSecureCodec) {
nonSecureCodec = &resources[i];
- } else if (type == kResourceGraphicMemory) {
+ } else if (type == MediaResource::kGraphicMemory) {
graphicMemory = &resources[i];
}
}
@@ -288,19 +291,19 @@
// first pass to handle secure/non-secure codec conflict
if (secureCodec != NULL) {
if (!mSupportsMultipleSecureCodecs) {
- if (!getAllClients_l(callingPid, String8(kResourceSecureCodec), &clients)) {
+ if (!getAllClients_l(callingPid, MediaResource::kSecureCodec, &clients)) {
return false;
}
}
if (!mSupportsSecureWithNonSecureCodec) {
- if (!getAllClients_l(callingPid, String8(kResourceNonSecureCodec), &clients)) {
+ if (!getAllClients_l(callingPid, MediaResource::kNonSecureCodec, &clients)) {
return false;
}
}
}
if (nonSecureCodec != NULL) {
if (!mSupportsSecureWithNonSecureCodec) {
- if (!getAllClients_l(callingPid, String8(kResourceSecureCodec), &clients)) {
+ if (!getAllClients_l(callingPid, MediaResource::kSecureCodec, &clients)) {
return false;
}
}
@@ -320,11 +323,11 @@
if (clients.size() == 0) {
// if we are here, run the fourth pass to free one codec with the different type.
if (secureCodec != NULL) {
- MediaResource temp(String8(kResourceNonSecureCodec), 1);
+ MediaResource temp(MediaResource::kNonSecureCodec, 1);
getClientForResource_l(callingPid, &temp, &clients);
}
if (nonSecureCodec != NULL) {
- MediaResource temp(String8(kResourceSecureCodec), 1);
+ MediaResource temp(MediaResource::kSecureCodec, 1);
getClientForResource_l(callingPid, &temp, &clients);
}
}
@@ -374,7 +377,7 @@
}
bool ResourceManagerService::getAllClients_l(
- int callingPid, const String8 &type, Vector<sp<IResourceManagerClient>> *clients) {
+ int callingPid, MediaResource::Type type, Vector<sp<IResourceManagerClient>> *clients) {
Vector<sp<IResourceManagerClient>> temp;
for (size_t i = 0; i < mMap.size(); ++i) {
ResourceInfos &infos = mMap.editValueAt(i);
@@ -384,7 +387,7 @@
// some higher/equal priority process owns the resource,
// this request can't be fulfilled.
ALOGE("getAllClients_l: can't reclaim resource %s from pid %d",
- type.string(), mMap.keyAt(i));
+ asString(type), mMap.keyAt(i));
return false;
}
temp.push_back(infos[j].client);
@@ -392,7 +395,7 @@
}
}
if (temp.size() == 0) {
- ALOGV("getAllClients_l: didn't find any resource %s", type.string());
+ ALOGV("getAllClients_l: didn't find any resource %s", asString(type));
return true;
}
clients->appendVector(temp);
@@ -400,7 +403,7 @@
}
bool ResourceManagerService::getLowestPriorityBiggestClient_l(
- int callingPid, const String8 &type, sp<IResourceManagerClient> *client) {
+ int callingPid, MediaResource::Type type, sp<IResourceManagerClient> *client) {
int lowestPriorityPid;
int lowestPriority;
int callingPriority;
@@ -425,7 +428,7 @@
}
bool ResourceManagerService::getLowestPriorityPid_l(
- const String8 &type, int *lowestPriorityPid, int *lowestPriority) {
+ MediaResource::Type type, int *lowestPriorityPid, int *lowestPriority) {
int pid = -1;
int priority = -1;
for (size_t i = 0; i < mMap.size(); ++i) {
@@ -472,7 +475,7 @@
}
bool ResourceManagerService::getBiggestClient_l(
- int pid, const String8 &type, sp<IResourceManagerClient> *client) {
+ int pid, MediaResource::Type type, sp<IResourceManagerClient> *client) {
ssize_t index = mMap.indexOfKey(pid);
if (index < 0) {
ALOGE("getBiggestClient_l: can't find resource info for pid %d", pid);
@@ -495,7 +498,7 @@
}
if (clientTemp == NULL) {
- ALOGE("getBiggestClient_l: can't find resource type %s for pid %d", type.string(), pid);
+ ALOGE("getBiggestClient_l: can't find resource type %s for pid %d", asString(type), pid);
return false;
}
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index 4769373..8f6fe9a 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -79,22 +79,22 @@
// Gets the list of all the clients who own the specified resource type.
// Returns false if any client belongs to a process with higher priority than the
// calling process. The clients will remain unchanged if returns false.
- bool getAllClients_l(int callingPid, const String8 &type,
+ bool getAllClients_l(int callingPid, MediaResource::Type type,
Vector<sp<IResourceManagerClient>> *clients);
// Gets the client who owns specified resource type from lowest possible priority process.
// Returns false if the calling process priority is not higher than the lowest process
// priority. The client will remain unchanged if returns false.
- bool getLowestPriorityBiggestClient_l(int callingPid, const String8 &type,
+ bool getLowestPriorityBiggestClient_l(int callingPid, MediaResource::Type type,
sp<IResourceManagerClient> *client);
// Gets lowest priority process that has the specified resource type.
// Returns false if failed. The output parameters will remain unchanged if failed.
- bool getLowestPriorityPid_l(const String8 &type, int *pid, int *priority);
+ bool getLowestPriorityPid_l(MediaResource::Type type, int *pid, int *priority);
// Gets the client who owns biggest piece of specified resource type from pid.
// Returns false if failed. The client will remain unchanged if failed.
- bool getBiggestClient_l(int pid, const String8 &type, sp<IResourceManagerClient> *client);
+ bool getBiggestClient_l(int pid, MediaResource::Type type, sp<IResourceManagerClient> *client);
bool isCallingPriorityHigher_l(int callingPid, int pid);
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
index cffedc6..62b7711 100644
--- a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -152,24 +152,24 @@
void addResource() {
// kTestPid1 mTestClient1
Vector<MediaResource> resources1;
- resources1.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+ resources1.push_back(MediaResource(MediaResource::kSecureCodec, 1));
mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources1);
- resources1.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
+ resources1.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
Vector<MediaResource> resources11;
- resources11.push_back(MediaResource(String8(kResourceGraphicMemory), 200));
+ resources11.push_back(MediaResource(MediaResource::kGraphicMemory, 200));
mService->addResource(kTestPid1, getId(mTestClient1), mTestClient1, resources11);
// kTestPid2 mTestClient2
Vector<MediaResource> resources2;
- resources2.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
- resources2.push_back(MediaResource(String8(kResourceGraphicMemory), 300));
+ resources2.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
+ resources2.push_back(MediaResource(MediaResource::kGraphicMemory, 300));
mService->addResource(kTestPid2, getId(mTestClient2), mTestClient2, resources2);
// kTestPid2 mTestClient3
Vector<MediaResource> resources3;
mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
- resources3.push_back(MediaResource(String8(kResourceSecureCodec), 1));
- resources3.push_back(MediaResource(String8(kResourceGraphicMemory), 100));
+ resources3.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+ resources3.push_back(MediaResource(MediaResource::kGraphicMemory, 100));
mService->addResource(kTestPid2, getId(mTestClient3), mTestClient3, resources3);
const PidResourceInfosMap &map = mService->mMap;
@@ -237,14 +237,12 @@
void testGetAllClients() {
addResource();
- String8 type = String8(kResourceSecureCodec);
- String8 unknowType = String8("unknowType");
+ MediaResource::Type type = MediaResource::kSecureCodec;
Vector<sp<IResourceManagerClient> > clients;
EXPECT_FALSE(mService->getAllClients_l(kLowPriorityPid, type, &clients));
// some higher priority process (e.g. kTestPid2) owns the resource, so getAllClients_l
// will fail.
EXPECT_FALSE(mService->getAllClients_l(kMidPriorityPid, type, &clients));
- EXPECT_TRUE(mService->getAllClients_l(kHighPriorityPid, unknowType, &clients));
EXPECT_TRUE(mService->getAllClients_l(kHighPriorityPid, type, &clients));
EXPECT_EQ(2u, clients.size());
@@ -254,8 +252,8 @@
void testReclaimResourceSecure() {
Vector<MediaResource> resources;
- resources.push_back(MediaResource(String8(kResourceSecureCodec), 1));
- resources.push_back(MediaResource(String8(kResourceGraphicMemory), 150));
+ resources.push_back(MediaResource(MediaResource::kSecureCodec, 1));
+ resources.push_back(MediaResource(MediaResource::kGraphicMemory, 150));
// ### secure codec can't coexist and secure codec can coexist with non-secure codec ###
{
@@ -356,7 +354,7 @@
mService->mSupportsSecureWithNonSecureCodec = true;
Vector<MediaResource> resources;
- resources.push_back(MediaResource(String8(kResourceSecureCodec), 1));
+ resources.push_back(MediaResource(MediaResource::kSecureCodec, 1));
EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
// secure codec from lowest process got reclaimed
@@ -374,8 +372,8 @@
void testReclaimResourceNonSecure() {
Vector<MediaResource> resources;
- resources.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
- resources.push_back(MediaResource(String8(kResourceGraphicMemory), 150));
+ resources.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
+ resources.push_back(MediaResource(MediaResource::kGraphicMemory, 150));
// ### secure codec can't coexist with non-secure codec ###
{
@@ -429,7 +427,7 @@
mService->mSupportsSecureWithNonSecureCodec = true;
Vector<MediaResource> resources;
- resources.push_back(MediaResource(String8(kResourceNonSecureCodec), 1));
+ resources.push_back(MediaResource(MediaResource::kNonSecureCodec, 1));
EXPECT_TRUE(mService->reclaimResource(kHighPriorityPid, resources));
// one non secure codec from lowest process got reclaimed
@@ -445,7 +443,7 @@
}
void testGetLowestPriorityBiggestClient() {
- String8 type = String8(kResourceGraphicMemory);
+ MediaResource::Type type = MediaResource::kGraphicMemory;
sp<IResourceManagerClient> client;
EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, &client));
@@ -454,8 +452,8 @@
EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kLowPriorityPid, type, &client));
EXPECT_TRUE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, &client));
- // kTestPid1 is the lowest priority process with kResourceGraphicMemory.
- // mTestClient1 has the largest kResourceGraphicMemory within kTestPid1.
+ // kTestPid1 is the lowest priority process with MediaResource::kGraphicMemory.
+ // mTestClient1 has the largest MediaResource::kGraphicMemory within kTestPid1.
EXPECT_EQ(mTestClient1, client);
}
@@ -464,7 +462,7 @@
int priority;
TestProcessInfo processInfo;
- String8 type = String8(kResourceGraphicMemory);
+ MediaResource::Type type = MediaResource::kGraphicMemory;
EXPECT_FALSE(mService->getLowestPriorityPid_l(type, &pid, &priority));
addResource();
@@ -475,7 +473,7 @@
processInfo.getPriority(kTestPid1, &priority1);
EXPECT_EQ(priority1, priority);
- type = String8(kResourceNonSecureCodec);
+ type = MediaResource::kNonSecureCodec;
EXPECT_TRUE(mService->getLowestPriorityPid_l(type, &pid, &priority));
EXPECT_EQ(kTestPid2, pid);
int priority2;
@@ -484,7 +482,7 @@
}
void testGetBiggestClient() {
- String8 type = String8(kResourceGraphicMemory);
+ MediaResource::Type type = MediaResource::kGraphicMemory;
sp<IResourceManagerClient> client;
EXPECT_FALSE(mService->getBiggestClient_l(kTestPid2, type, &client));
diff --git a/services/radio/Android.mk b/services/radio/Android.mk
index 6aae31d..f5d74d3 100644
--- a/services/radio/Android.mk
+++ b/services/radio/Android.mk
@@ -33,6 +33,8 @@
LOCAL_CFLAGS += -Wall -Wextra -Werror
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libradioservice
include $(BUILD_SHARED_LIBRARY)
diff --git a/services/soundtrigger/Android.mk b/services/soundtrigger/Android.mk
index ecc49ae..e8e18b8 100644
--- a/services/soundtrigger/Android.mk
+++ b/services/soundtrigger/Android.mk
@@ -38,6 +38,8 @@
LOCAL_C_INCLUDES += \
$(TOPDIR)frameworks/av/services/audioflinger
+LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
+
LOCAL_MODULE:= libsoundtriggerservice
include $(BUILD_SHARED_LIBRARY)