Merge "StagefrightRecorder:close next filedescriptor"
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 793cbf4..e584ffb 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -81,6 +81,7 @@
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libstagefright_xmlparser@1.0.so)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libstagefright_soft_*)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/vndk/libstagefright_soft_*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib/libaudiopolicyengineconfig*)
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
diff --git a/apex/Android.bp b/apex/Android.bp
index 9455290..0a9551d 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -42,6 +42,9 @@
},
key: "com.android.media.key",
certificate: ":com.android.media.certificate",
+
+ // Use a custom AndroidManifest.xml used for API targeting.
+ androidManifest: ":com.android.media-androidManifest",
}
apex {
@@ -50,14 +53,46 @@
defaults: ["com.android.media-defaults"],
}
+filegroup {
+ name: "com.android.media-androidManifest",
+ srcs: ["AndroidManifest-media.xml"],
+}
+
+filegroup {
+ name: "com.android.media.swcodec-androidManifest",
+ srcs: ["AndroidManifest-swcodec.xml"],
+}
+
apex_defaults {
name: "com.android.media.swcodec-defaults",
- native_shared_libs: [
- "libmedia_codecserviceregistrant",
+ binaries: [
+ "mediaswcodec",
+ ],
+ prebuilts: [
+ "com.android.media.swcodec-mediaswcodec.rc",
+ "com.android.media.swcodec-ld.config.txt",
+ "mediaswcodec.policy",
],
use_vendor: true,
key: "com.android.media.swcodec.key",
certificate: ":com.android.media.swcodec.certificate",
+
+ // Use a custom AndroidManifest.xml used for API targeting.
+ androidManifest: ":com.android.media.swcodec-androidManifest",
+}
+
+prebuilt_etc {
+ name: "com.android.media.swcodec-mediaswcodec.rc",
+ src: "mediaswcodec.rc",
+ filename: "init.rc",
+ installable: false,
+}
+
+prebuilt_etc {
+ name: "com.android.media.swcodec-ld.config.txt",
+ src: "ld.config.txt",
+ filename: "ld.config.txt",
+ installable: false,
}
apex {
diff --git a/apex/AndroidManifest-media.xml b/apex/AndroidManifest-media.xml
new file mode 100644
index 0000000..17d3f3a
--- /dev/null
+++ b/apex/AndroidManifest-media.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ -->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.media">
+ <!-- APEX does not have classes.dex -->
+ <application android:hasCode="false" />
+ <uses-sdk
+ android:minSdkVersion="28"
+ android:targetSdkVersion="28"
+ />
+</manifest>
diff --git a/apex/AndroidManifest-swcodec.xml b/apex/AndroidManifest-swcodec.xml
new file mode 100644
index 0000000..bd20dc0
--- /dev/null
+++ b/apex/AndroidManifest-swcodec.xml
@@ -0,0 +1,25 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!--
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ -->
+<manifest xmlns:android="http://schemas.android.com/apk/res/android"
+ package="com.android.media.swcodec">
+ <!-- APEX does not have classes.dex -->
+ <application android:hasCode="false" />
+ <uses-sdk
+ android:minSdkVersion="28"
+ android:targetSdkVersion="28"
+ />
+</manifest>
diff --git a/apex/ld.config.txt b/apex/ld.config.txt
new file mode 100644
index 0000000..a3e96c4
--- /dev/null
+++ b/apex/ld.config.txt
@@ -0,0 +1,74 @@
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Bionic loader config file for the media swcodec APEX.
+#
+# There are no versioned APEX paths here - this APEX module does not support
+# having several versions mounted.
+
+dir.swcodec = /apex/com.android.media.swcodec/bin/
+
+[swcodec]
+additional.namespaces = platform
+
+###############################################################################
+# "default" namespace
+#
+# This namespace is for the binaries and libraries on the swcodec APEX.
+###############################################################################
+
+namespace.default.isolated = true
+namespace.default.visible = true
+
+namespace.default.search.paths = /apex/com.android.media.swcodec/${LIB}
+namespace.default.asan.search.paths = /apex/com.android.media.swcodec/${LIB}
+
+# Keep the below in sync with "sphal" namespace in system's /etc/ld.config.txt
+# Codec2 has dependencies on some SP-hals (eg. android.hardware.graphics.mapper@2.0)
+# These are dlopen'ed by libvndksupport.so.
+namespace.default.search.paths += /odm/${LIB}
+namespace.default.search.paths += /vendor/${LIB}
+
+namespace.default.permitted.paths = /odm/${LIB}
+namespace.default.permitted.paths += /vendor/${LIB}
+namespace.default.permitted.paths += /vendor/${LIB}/hw
+namespace.default.permitted.paths += /system/vendor/${LIB}
+
+namespace.default.asan.search.paths += /data/asan/odm/${LIB}
+namespace.default.asan.search.paths += /odm/${LIB}
+namespace.default.asan.search.paths += /data/asan/vendor/${LIB}
+namespace.default.asan.search.paths += /vendor/${LIB}
+
+namespace.default.asan.permitted.paths = /data/asan/odm/${LIB}
+namespace.default.asan.permitted.paths += /odm/${LIB}
+namespace.default.asan.permitted.paths += /data/asan/vendor/${LIB}
+namespace.default.asan.permitted.paths += /vendor/${LIB}
+
+namespace.default.links = platform
+
+# TODO: replace the following when apex has a way to auto-generate this list
+# namespace.default.link.platform.shared_libs = %LLNDK_LIBRARIES%
+# namespace.default.link.platform.shared_libs += %SANITIZER_RUNTIME_LIBRARIES%
+namespace.default.link.platform.shared_libs = libEGL.so:libGLESv1_CM.so:libGLESv2.so:libGLESv3.so:libRS.so:libandroid_net.so:libc.so:libclang_rt.asan-aarch64-android.so:libclang_rt.asan-arm-android.so:libclang_rt.hwasan-aarch64-android.so:libclang_rt.asan-i686-android.so:libclang_rt.asan-x86_64-android.so:libdl.so:libft2.so:liblog.so:libm.so:libmediandk.so:libnativewindow.so:libneuralnetworks.so:libsync.so:libvndksupport.so:libvulkan.so
+
+###############################################################################
+# "platform" namespace
+#
+# This namespace is for linking to LLNDK and ASAN libraries on the system.
+###############################################################################
+
+namespace.platform.isolated = true
+
+namespace.platform.search.paths = /system/${LIB}
+namespace.platform.asan.search.paths = /data/asan/system/${LIB}
+
+# /system/lib/libc.so, etc are symlinks to /bionic/lib/libc.so, etc.
+# Add /bionic/lib to the permitted paths because linker uses realpath(3)
+# to check the accessibility of the lib. We could add this to search.paths
+# instead but that makes the resolution of bionic libs be dependent on
+# the order of /system/lib and /bionic/lib in search.paths. If /bionic/lib
+# is after /system/lib, then /bionic/lib is never tried because libc.so
+# is always found in /system/lib but fails to pass the accessibility test
+# because of its realpath. It's better to not depend on the ordering if
+# possible.
+namespace.platform.permitted.paths = /bionic/${LIB}
+namespace.platform.asan.permitted.paths = /bionic/${LIB}
diff --git a/apex/mediaswcodec.rc b/apex/mediaswcodec.rc
new file mode 100644
index 0000000..d17481b
--- /dev/null
+++ b/apex/mediaswcodec.rc
@@ -0,0 +1,7 @@
+service media.swcodec /apex/com.android.media.swcodec/bin/mediaswcodec
+ class main
+ user mediacodec
+ group camera drmrpc mediadrm
+ override
+ ioprio rt 4
+ writepid /dev/cpuset/foreground/tasks
diff --git a/camera/OWNERS b/camera/OWNERS
index 18acfee..d6b95da 100644
--- a/camera/OWNERS
+++ b/camera/OWNERS
@@ -1,6 +1,8 @@
-cychen@google.com
epeev@google.com
etalvala@google.com
+jchowdhary@google.com
shuzhenwang@google.com
yinchiayeh@google.com
+# backup owner
+cychen@google.com
zhijunhe@google.com
diff --git a/camera/aidl/android/hardware/ICameraService.aidl b/camera/aidl/android/hardware/ICameraService.aidl
index 0e969c7..3e8992a 100644
--- a/camera/aidl/android/hardware/ICameraService.aidl
+++ b/camera/aidl/android/hardware/ICameraService.aidl
@@ -108,7 +108,7 @@
*
* Also returns the set of currently-known camera IDs and state of each device.
* Adding a listener will trigger the torch status listener to fire for all
- * devices that have a flash unit
+ * devices that have a flash unit.
*/
CameraStatus[] addListener(ICameraServiceListener listener);
diff --git a/camera/aidl/android/hardware/ICameraServiceListener.aidl b/camera/aidl/android/hardware/ICameraServiceListener.aidl
index f871ce4..e9dcbdb 100644
--- a/camera/aidl/android/hardware/ICameraServiceListener.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceListener.aidl
@@ -76,4 +76,11 @@
const int TORCH_STATUS_UNKNOWN = -1;
oneway void onTorchStatusChanged(int status, String cameraId);
+
+ /**
+ * Notify registered clients about camera access priority changes.
+ * Clients which were previously unable to open a certain camera device
+ * can retry after receiving this callback.
+ */
+ oneway void onCameraAccessPrioritiesChanged();
}
diff --git a/camera/ndk/NdkCameraCaptureSession.cpp b/camera/ndk/NdkCameraCaptureSession.cpp
index ab796fb..1ac8482 100644
--- a/camera/ndk/NdkCameraCaptureSession.cpp
+++ b/camera/ndk/NdkCameraCaptureSession.cpp
@@ -105,7 +105,9 @@
if (session->isClosed()) {
ALOGE("%s: session %p is already closed", __FUNCTION__, session);
- *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+ if (captureSequenceId) {
+ *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+ }
return ACAMERA_ERROR_SESSION_CLOSED;
}
@@ -127,7 +129,9 @@
if (session->isClosed()) {
ALOGE("%s: session %p is already closed", __FUNCTION__, session);
- *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+ if (captureSequenceId) {
+ *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+ }
return ACAMERA_ERROR_SESSION_CLOSED;
}
@@ -149,7 +153,9 @@
if (session->isClosed()) {
ALOGE("%s: session %p is already closed", __FUNCTION__, session);
- *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+ if (captureSequenceId) {
+ *captureSequenceId = CAPTURE_SEQUENCE_ID_NONE;
+ }
return ACAMERA_ERROR_SESSION_CLOSED;
}
diff --git a/camera/ndk/NdkCameraDevice.cpp b/camera/ndk/NdkCameraDevice.cpp
index 98608da..09b85d5 100644
--- a/camera/ndk/NdkCameraDevice.cpp
+++ b/camera/ndk/NdkCameraDevice.cpp
@@ -78,7 +78,34 @@
ALOGE("%s: unknown template ID %d", __FUNCTION__, templateId);
return ACAMERA_ERROR_INVALID_PARAMETER;
}
- return device->createCaptureRequest(templateId, request);
+ return device->createCaptureRequest(templateId, nullptr /*physicalIdList*/, request);
+}
+
+EXPORT
+camera_status_t ACameraDevice_createCaptureRequest_withPhysicalIds(
+ const ACameraDevice* device,
+ ACameraDevice_request_template templateId,
+ const ACameraIdList* physicalCameraIdList,
+ ACaptureRequest** request) {
+ ATRACE_CALL();
+ if (device == nullptr || request == nullptr || physicalCameraIdList == nullptr) {
+ ALOGE("%s: invalid argument! device %p request %p, physicalCameraIdList %p",
+ __FUNCTION__, device, request, physicalCameraIdList);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ switch (templateId) {
+ case TEMPLATE_PREVIEW:
+ case TEMPLATE_STILL_CAPTURE:
+ case TEMPLATE_RECORD:
+ case TEMPLATE_VIDEO_SNAPSHOT:
+ case TEMPLATE_ZERO_SHUTTER_LAG:
+ case TEMPLATE_MANUAL:
+ break;
+ default:
+ ALOGE("%s: unknown template ID %d", __FUNCTION__, templateId);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ return device->createCaptureRequest(templateId, physicalCameraIdList, request);
}
EXPORT
diff --git a/camera/ndk/NdkCameraManager.cpp b/camera/ndk/NdkCameraManager.cpp
index 8742d9c..23d01ef 100644
--- a/camera/ndk/NdkCameraManager.cpp
+++ b/camera/ndk/NdkCameraManager.cpp
@@ -105,6 +105,60 @@
}
EXPORT
+camera_status_t ACameraManager_registerExtendedAvailabilityCallback(
+ ACameraManager* /*manager*/, const ACameraManager_ExtendedAvailabilityCallbacks *callback) {
+ ATRACE_CALL();
+ if (callback == nullptr) {
+ ALOGE("%s: invalid argument! callback is null!", __FUNCTION__);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ if ((callback->availabilityCallbacks.onCameraAvailable == nullptr) ||
+ (callback->availabilityCallbacks.onCameraUnavailable == nullptr) ||
+ (callback->onCameraAccessPrioritiesChanged == nullptr)) {
+ ALOGE("%s: invalid argument! callback %p, "
+ "onCameraAvailable %p, onCameraUnavailable %p onCameraAccessPrioritiesChanged %p",
+ __FUNCTION__, callback,
+ callback->availabilityCallbacks.onCameraAvailable,
+ callback->availabilityCallbacks.onCameraUnavailable,
+ callback->onCameraAccessPrioritiesChanged);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ auto reservedEntriesCount = sizeof(callback->reserved) / sizeof(callback->reserved[0]);
+ for (size_t i = 0; i < reservedEntriesCount; i++) {
+ if (callback->reserved[i] != nullptr) {
+ ALOGE("%s: invalid argument! callback reserved entries must be set to NULL",
+ __FUNCTION__);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ }
+ CameraManagerGlobal::getInstance().registerExtendedAvailabilityCallback(callback);
+ return ACAMERA_OK;
+}
+
+EXPORT
+camera_status_t ACameraManager_unregisterExtendedAvailabilityCallback(
+ ACameraManager* /*manager*/, const ACameraManager_ExtendedAvailabilityCallbacks *callback) {
+ ATRACE_CALL();
+ if (callback == nullptr) {
+ ALOGE("%s: invalid argument! callback is null!", __FUNCTION__);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ if ((callback->availabilityCallbacks.onCameraAvailable == nullptr) ||
+ (callback->availabilityCallbacks.onCameraUnavailable == nullptr) ||
+ (callback->onCameraAccessPrioritiesChanged == nullptr)) {
+ ALOGE("%s: invalid argument! callback %p, "
+ "onCameraAvailable %p, onCameraUnavailable %p onCameraAccessPrioritiesChanged %p",
+ __FUNCTION__, callback,
+ callback->availabilityCallbacks.onCameraAvailable,
+ callback->availabilityCallbacks.onCameraUnavailable,
+ callback->onCameraAccessPrioritiesChanged);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ CameraManagerGlobal::getInstance().unregisterExtendedAvailabilityCallback(callback);
+ return ACAMERA_OK;
+}
+
+EXPORT
camera_status_t ACameraManager_getCameraCharacteristics(
ACameraManager* mgr, const char* cameraId, ACameraMetadata** chars){
ATRACE_CALL();
diff --git a/camera/ndk/NdkCaptureRequest.cpp b/camera/ndk/NdkCaptureRequest.cpp
index c64de3e..87de4a9 100644
--- a/camera/ndk/NdkCaptureRequest.cpp
+++ b/camera/ndk/NdkCaptureRequest.cpp
@@ -98,6 +98,27 @@
}
EXPORT
+camera_status_t ACaptureRequest_getConstEntry_physicalCamera(
+ const ACaptureRequest* req, const char* physicalId,
+ uint32_t tag, ACameraMetadata_const_entry* entry) {
+ ATRACE_CALL();
+ if (req == nullptr || entry == nullptr || physicalId == nullptr) {
+ ALOGE("%s: invalid argument! req %p, tag 0x%x, entry %p, physicalId %p",
+ __FUNCTION__, req, tag, entry, physicalId);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+
+ const auto& physicalSettings = req->physicalSettings.find(physicalId);
+ if (physicalSettings == req->physicalSettings.end()) {
+ ALOGE("%s: Failed to find metadata for physical camera id %s",
+ __FUNCTION__, physicalId);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+
+ return physicalSettings->second->getConstEntry(tag, entry);
+}
+
+EXPORT
camera_status_t ACaptureRequest_getAllTags(
const ACaptureRequest* req, /*out*/int32_t* numTags, /*out*/const uint32_t** tags) {
ATRACE_CALL();
@@ -131,6 +152,34 @@
#undef SET_ENTRY
+#define SET_PHYSICAL_ENTRY(NAME,NDK_TYPE) \
+EXPORT \
+camera_status_t ACaptureRequest_setEntry_physicalCamera_##NAME( \
+ ACaptureRequest* req, const char* physicalId, uint32_t tag, \
+ uint32_t count, const NDK_TYPE* data) { \
+ ATRACE_CALL(); \
+ if (req == nullptr || (count > 0 && data == nullptr) || physicalId == nullptr) { \
+ ALOGE("%s: invalid argument! req %p, tag 0x%x, count %d, data 0x%p, physicalId %p", \
+ __FUNCTION__, req, tag, count, data, physicalId); \
+ return ACAMERA_ERROR_INVALID_PARAMETER; \
+ } \
+ if (req->physicalSettings.find(physicalId) == req->physicalSettings.end()) { \
+ ALOGE("%s: Failed to find metadata for physical camera id %s", \
+ __FUNCTION__, physicalId); \
+ return ACAMERA_ERROR_INVALID_PARAMETER; \
+ } \
+ return req->physicalSettings[physicalId]->update(tag, count, data); \
+}
+
+SET_PHYSICAL_ENTRY(u8,uint8_t)
+SET_PHYSICAL_ENTRY(i32,int32_t)
+SET_PHYSICAL_ENTRY(float,float)
+SET_PHYSICAL_ENTRY(double,double)
+SET_PHYSICAL_ENTRY(i64,int64_t)
+SET_PHYSICAL_ENTRY(rational,ACameraMetadata_rational)
+
+#undef SET_PHYSICAL_ENTRY
+
EXPORT
void ACaptureRequest_free(ACaptureRequest* request) {
ATRACE_CALL();
@@ -138,6 +187,7 @@
return;
}
request->settings.clear();
+ request->physicalSettings.clear();
delete request->targets;
delete request;
return;
@@ -174,6 +224,9 @@
ACaptureRequest* pRequest = new ACaptureRequest();
pRequest->settings = new ACameraMetadata(*(src->settings));
+ for (const auto& entry : src->physicalSettings) {
+ pRequest->physicalSettings[entry.first] = new ACameraMetadata(*(entry.second));
+ }
pRequest->targets = new ACameraOutputTargets();
*(pRequest->targets) = *(src->targets);
pRequest->context = src->context;
diff --git a/camera/ndk/impl/ACameraDevice.cpp b/camera/ndk/impl/ACameraDevice.cpp
index d8a5765..5e4fcd0 100644
--- a/camera/ndk/impl/ACameraDevice.cpp
+++ b/camera/ndk/impl/ACameraDevice.cpp
@@ -76,7 +76,7 @@
__FUNCTION__, strerror(-err), err);
setCameraDeviceErrorLocked(ACAMERA_ERROR_CAMERA_DEVICE);
}
- mHandler = new CallbackHandler();
+ mHandler = new CallbackHandler(id);
mCbLooper->registerHandler(mHandler);
const CameraMetadata& metadata = mChars->getInternalData();
@@ -97,6 +97,14 @@
mShadingMapSize[0] = entry.data.i32[0];
mShadingMapSize[1] = entry.data.i32[1];
}
+
+ size_t physicalIdCnt = 0;
+ const char*const* physicalCameraIds;
+ if (mChars->isLogicalMultiCamera(&physicalIdCnt, &physicalCameraIds)) {
+ for (size_t i = 0; i < physicalIdCnt; i++) {
+ mPhysicalIds.push_back(physicalCameraIds[i]);
+ }
+ }
}
// Device close implementaiton
@@ -129,8 +137,29 @@
camera_status_t
CameraDevice::createCaptureRequest(
ACameraDevice_request_template templateId,
+ const ACameraIdList* physicalIdList,
ACaptureRequest** request) const {
Mutex::Autolock _l(mDeviceLock);
+
+ if (physicalIdList != nullptr) {
+ if (physicalIdList->numCameras > static_cast<int>(mPhysicalIds.size())) {
+ ALOGE("%s: physicalIdList size %d exceeds number of available physical cameras %zu",
+ __FUNCTION__, physicalIdList->numCameras, mPhysicalIds.size());
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ for (auto i = 0; i < physicalIdList->numCameras; i++) {
+ if (physicalIdList->cameraIds[i] == nullptr) {
+ ALOGE("%s: physicalId is null!", __FUNCTION__);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ if (mPhysicalIds.end() == std::find(
+ mPhysicalIds.begin(), mPhysicalIds.end(), physicalIdList->cameraIds[i])) {
+ ALOGE("%s: Invalid physicalId %s!", __FUNCTION__, physicalIdList->cameraIds[i]);
+ return ACAMERA_ERROR_INVALID_PARAMETER;
+ }
+ }
+ }
+
camera_status_t ret = checkCameraClosedOrErrorLocked();
if (ret != ACAMERA_OK) {
return ret;
@@ -151,6 +180,12 @@
}
ACaptureRequest* outReq = new ACaptureRequest();
outReq->settings = new ACameraMetadata(rawRequest.release(), ACameraMetadata::ACM_REQUEST);
+ if (physicalIdList != nullptr) {
+ for (auto i = 0; i < physicalIdList->numCameras; i++) {
+ outReq->physicalSettings.emplace(physicalIdList->cameraIds[i],
+ new ACameraMetadata(*(outReq->settings)));
+ }
+ }
outReq->targets = new ACameraOutputTargets();
*request = outReq;
return ACAMERA_OK;
@@ -275,8 +310,12 @@
const ACaptureRequest* request, /*out*/sp<CaptureRequest>& outReq) {
camera_status_t ret;
sp<CaptureRequest> req(new CaptureRequest());
- req->mPhysicalCameraSettings.push_back({std::string(mCameraId.string()),
+ req->mPhysicalCameraSettings.push_back({getId(),
request->settings->getInternalData()});
+ for (auto& entry : request->physicalSettings) {
+ req->mPhysicalCameraSettings.push_back({entry.first,
+ entry.second->getInternalData()});
+ }
req->mIsReprocess = false; // NDK does not support reprocessing yet
req->mContext = request->context;
req->mSurfaceConverted = true; // set to true, and fill in stream/surface idx to speed up IPC
@@ -320,10 +359,17 @@
}
ACaptureRequest*
-CameraDevice::allocateACaptureRequest(sp<CaptureRequest>& req) {
+CameraDevice::allocateACaptureRequest(sp<CaptureRequest>& req, const std::string& deviceId) {
ACaptureRequest* pRequest = new ACaptureRequest();
- CameraMetadata clone = req->mPhysicalCameraSettings.begin()->settings;
- pRequest->settings = new ACameraMetadata(clone.release(), ACameraMetadata::ACM_REQUEST);
+ for (auto& entry : req->mPhysicalCameraSettings) {
+ CameraMetadata clone = entry.settings;
+ if (entry.id == deviceId) {
+ pRequest->settings = new ACameraMetadata(clone.release(), ACameraMetadata::ACM_REQUEST);
+ } else {
+ pRequest->physicalSettings.emplace(entry.id,
+ new ACameraMetadata(clone.release(), ACameraMetadata::ACM_REQUEST));
+ }
+ }
pRequest->targets = new ACameraOutputTargets();
for (size_t i = 0; i < req->mSurfaceList.size(); i++) {
ANativeWindow* anw = static_cast<ANativeWindow*>(req->mSurfaceList[i].get());
@@ -340,6 +386,7 @@
return;
}
req->settings.clear();
+ req->physicalSettings.clear();
delete req->targets;
delete req;
}
@@ -786,6 +833,9 @@
return;
}
+CameraDevice::CallbackHandler::CallbackHandler(const char* id) : mId(id) {
+}
+
void CameraDevice::CallbackHandler::onMessageReceived(
const sp<AMessage> &msg) {
switch (msg->what()) {
@@ -927,7 +977,7 @@
ALOGE("%s: Cannot find timestamp!", __FUNCTION__);
return;
}
- ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ ACaptureRequest* request = allocateACaptureRequest(requestSp, mId);
(*onStart)(context, session.get(), request, timestamp);
freeACaptureRequest(request);
break;
@@ -950,7 +1000,7 @@
return;
}
sp<ACameraMetadata> result(static_cast<ACameraMetadata*>(obj.get()));
- ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ ACaptureRequest* request = allocateACaptureRequest(requestSp, mId);
(*onResult)(context, session.get(), request, result.get());
freeACaptureRequest(request);
break;
@@ -1006,7 +1056,7 @@
physicalMetadataCopyPtrs.push_back(physicalMetadataCopy[i].get());
}
- ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ ACaptureRequest* request = allocateACaptureRequest(requestSp, mId);
(*onResult)(context, session.get(), request, result.get(),
physicalResultInfo.size(), physicalCameraIdPtrs.data(),
physicalMetadataCopyPtrs.data());
@@ -1034,7 +1084,7 @@
static_cast<CameraCaptureFailure*>(obj.get()));
ACameraCaptureFailure* failure =
static_cast<ACameraCaptureFailure*>(failureSp.get());
- ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ ACaptureRequest* request = allocateACaptureRequest(requestSp, mId);
(*onFail)(context, session.get(), request, failure);
freeACaptureRequest(request);
break;
@@ -1111,7 +1161,7 @@
return;
}
- ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ ACaptureRequest* request = allocateACaptureRequest(requestSp, mId);
(*onBufferLost)(context, session.get(), request, anw, frameNumber);
freeACaptureRequest(request);
break;
diff --git a/camera/ndk/impl/ACameraDevice.h b/camera/ndk/impl/ACameraDevice.h
index d0f363b..103efd5 100644
--- a/camera/ndk/impl/ACameraDevice.h
+++ b/camera/ndk/impl/ACameraDevice.h
@@ -68,6 +68,7 @@
camera_status_t createCaptureRequest(
ACameraDevice_request_template templateId,
+ const ACameraIdList* physicalIdList,
ACaptureRequest** request) const;
camera_status_t createCaptureSession(
@@ -145,7 +146,8 @@
camera_status_t allocateCaptureRequest(
const ACaptureRequest* request, sp<CaptureRequest>& outReq);
- static ACaptureRequest* allocateACaptureRequest(sp<CaptureRequest>& req);
+ static ACaptureRequest* allocateACaptureRequest(sp<CaptureRequest>& req,
+ const std::string& deviceId);
static void freeACaptureRequest(ACaptureRequest*);
// only For session to hold device lock
@@ -230,9 +232,11 @@
class CallbackHandler : public AHandler {
public:
+ explicit CallbackHandler(const char* id);
void onMessageReceived(const sp<AMessage> &msg) override;
private:
+ std::string mId;
// This handler will cache all capture session sp until kWhatCleanUpSessions
// is processed. This is used to guarantee the last session reference is always
// being removed in callback thread without holding camera device lock
@@ -327,6 +331,7 @@
// Misc variables
int32_t mShadingMapSize[2]; // const after constructor
int32_t mPartialResultCount; // const after constructor
+ std::vector<std::string> mPhysicalIds; // const after constructor
};
@@ -351,8 +356,9 @@
camera_status_t createCaptureRequest(
ACameraDevice_request_template templateId,
+ const ACameraIdList* physicalCameraIdList,
ACaptureRequest** request) const {
- return mDevice->createCaptureRequest(templateId, request);
+ return mDevice->createCaptureRequest(templateId, physicalCameraIdList, request);
}
camera_status_t createCaptureSession(
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 7d6ecac..9d40fd7 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -193,6 +193,20 @@
}
}
+void CameraManagerGlobal::registerExtendedAvailabilityCallback(
+ const ACameraManager_ExtendedAvailabilityCallbacks *callback) {
+ Mutex::Autolock _l(mLock);
+ Callback cb(callback);
+ mCallbacks.insert(cb);
+}
+
+void CameraManagerGlobal::unregisterExtendedAvailabilityCallback(
+ const ACameraManager_ExtendedAvailabilityCallbacks *callback) {
+ Mutex::Autolock _l(mLock);
+ Callback cb(callback);
+ mCallbacks.erase(cb);
+}
+
void CameraManagerGlobal::registerAvailabilityCallback(
const ACameraManager_AvailabilityCallbacks *callback) {
Mutex::Autolock _l(mLock);
@@ -289,12 +303,40 @@
(*cb)(context, cameraId.c_str());
break;
}
+ case kWhatSendSingleAccessCallback:
+ {
+ ACameraManager_AccessPrioritiesChangedCallback cb;
+ void* context;
+ AString cameraId;
+ bool found = msg->findPointer(kCallbackFpKey, (void**) &cb);
+ if (!found) {
+ ALOGE("%s: Cannot find camera callback fp!", __FUNCTION__);
+ return;
+ }
+ found = msg->findPointer(kContextKey, &context);
+ if (!found) {
+ ALOGE("%s: Cannot find callback context!", __FUNCTION__);
+ return;
+ }
+ (*cb)(context);
+ break;
+ }
default:
ALOGE("%s: unknown message type %d", __FUNCTION__, msg->what());
break;
}
}
+binder::Status CameraManagerGlobal::CameraServiceListener::onCameraAccessPrioritiesChanged() {
+ sp<CameraManagerGlobal> cm = mCameraManager.promote();
+ if (cm != nullptr) {
+ cm->onCameraAccessPrioritiesChanged();
+ } else {
+ ALOGE("Cannot deliver camera access priority callback. Global camera manager died");
+ }
+ return binder::Status::ok();
+}
+
binder::Status CameraManagerGlobal::CameraServiceListener::onStatusChanged(
int32_t status, const String16& cameraId) {
sp<CameraManagerGlobal> cm = mCameraManager.promote();
@@ -306,6 +348,19 @@
return binder::Status::ok();
}
+void CameraManagerGlobal::onCameraAccessPrioritiesChanged() {
+ Mutex::Autolock _l(mLock);
+ for (auto cb : mCallbacks) {
+ sp<AMessage> msg = new AMessage(kWhatSendSingleAccessCallback, mHandler);
+ ACameraManager_AccessPrioritiesChangedCallback cbFp = cb.mAccessPriorityChanged;
+ if (cbFp != nullptr) {
+ msg->setPointer(kCallbackFpKey, (void *) cbFp);
+ msg->setPointer(kContextKey, cb.mContext);
+ msg->post();
+ }
+ }
+}
+
void CameraManagerGlobal::onStatusChanged(
int32_t status, const String8& cameraId) {
Mutex::Autolock _l(mLock);
diff --git a/camera/ndk/impl/ACameraManager.h b/camera/ndk/impl/ACameraManager.h
index 55bfa7e..8c1da36 100644
--- a/camera/ndk/impl/ACameraManager.h
+++ b/camera/ndk/impl/ACameraManager.h
@@ -54,6 +54,11 @@
void unregisterAvailabilityCallback(
const ACameraManager_AvailabilityCallbacks *callback);
+ void registerExtendedAvailabilityCallback(
+ const ACameraManager_ExtendedAvailabilityCallbacks* callback);
+ void unregisterExtendedAvailabilityCallback(
+ const ACameraManager_ExtendedAvailabilityCallbacks* callback);
+
/**
* Return camera IDs that support camera2
*/
@@ -86,6 +91,8 @@
return binder::Status::ok();
}
+ virtual binder::Status onCameraAccessPrioritiesChanged();
+
private:
const wp<CameraManagerGlobal> mCameraManager;
};
@@ -96,11 +103,19 @@
explicit Callback(const ACameraManager_AvailabilityCallbacks *callback) :
mAvailable(callback->onCameraAvailable),
mUnavailable(callback->onCameraUnavailable),
+ mAccessPriorityChanged(nullptr),
mContext(callback->context) {}
+ explicit Callback(const ACameraManager_ExtendedAvailabilityCallbacks *callback) :
+ mAvailable(callback->availabilityCallbacks.onCameraAvailable),
+ mUnavailable(callback->availabilityCallbacks.onCameraUnavailable),
+ mAccessPriorityChanged(callback->onCameraAccessPrioritiesChanged),
+ mContext(callback->availabilityCallbacks.context) {}
+
bool operator == (const Callback& other) const {
return (mAvailable == other.mAvailable &&
mUnavailable == other.mUnavailable &&
+ mAccessPriorityChanged == other.mAccessPriorityChanged &&
mContext == other.mContext);
}
bool operator != (const Callback& other) const {
@@ -109,6 +124,9 @@
bool operator < (const Callback& other) const {
if (*this == other) return false;
if (mContext != other.mContext) return mContext < other.mContext;
+ if (mAccessPriorityChanged != other.mAccessPriorityChanged) {
+ return mAccessPriorityChanged < other.mAccessPriorityChanged;
+ }
if (mAvailable != other.mAvailable) return mAvailable < other.mAvailable;
return mUnavailable < other.mUnavailable;
}
@@ -117,13 +135,15 @@
}
ACameraManager_AvailabilityCallback mAvailable;
ACameraManager_AvailabilityCallback mUnavailable;
+ ACameraManager_AccessPrioritiesChangedCallback mAccessPriorityChanged;
void* mContext;
};
std::set<Callback> mCallbacks;
// definition of handler and message
enum {
- kWhatSendSingleCallback
+ kWhatSendSingleCallback,
+ kWhatSendSingleAccessCallback,
};
static const char* kCameraIdKey;
static const char* kCallbackFpKey;
@@ -136,6 +156,7 @@
sp<CallbackHandler> mHandler;
sp<ALooper> mCbLooper; // Looper thread where callbacks actually happen on
+ void onCameraAccessPrioritiesChanged();
void onStatusChanged(int32_t status, const String8& cameraId);
void onStatusChangedLocked(int32_t status, const String8& cameraId);
// Utils for status
diff --git a/camera/ndk/impl/ACameraMetadata.cpp b/camera/ndk/impl/ACameraMetadata.cpp
index de40990..d832deb 100644
--- a/camera/ndk/impl/ACameraMetadata.cpp
+++ b/camera/ndk/impl/ACameraMetadata.cpp
@@ -38,6 +38,8 @@
filterDurations(ANDROID_DEPTH_AVAILABLE_DEPTH_STALL_DURATIONS);
filterDurations(ANDROID_HEIC_AVAILABLE_HEIC_MIN_FRAME_DURATIONS);
filterDurations(ANDROID_HEIC_AVAILABLE_HEIC_STALL_DURATIONS);
+ filterDurations(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS);
+ filterDurations(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS);
}
// TODO: filter request/result keys
}
@@ -186,6 +188,16 @@
filteredDurations.push_back(duration);
}
break;
+ case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_MIN_FRAME_DURATIONS:
+ case ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STALL_DURATIONS:
+ if (format == HAL_PIXEL_FORMAT_BLOB) {
+ format = AIMAGE_FORMAT_DEPTH_JPEG;
+ filteredDurations.push_back(format);
+ filteredDurations.push_back(width);
+ filteredDurations.push_back(height);
+ filteredDurations.push_back(duration);
+ }
+ break;
default:
// Should not reach here
ALOGE("%s: Unkown tag 0x%x", __FUNCTION__, tag);
@@ -203,7 +215,7 @@
const int STREAM_HEIGHT_OFFSET = 2;
const int STREAM_IS_INPUT_OFFSET = 3;
camera_metadata_entry entry = mData.find(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS);
- if (entry.count == 0 || entry.count % 4 || entry.type != TYPE_INT32) {
+ if (entry.count > 0 && (entry.count % 4 || entry.type != TYPE_INT32)) {
ALOGE("%s: malformed available stream configuration key! count %zu, type %d",
__FUNCTION__, entry.count, entry.type);
return;
@@ -231,9 +243,17 @@
filteredStreamConfigs.push_back(isInput);
}
- mData.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, filteredStreamConfigs);
+ if (filteredStreamConfigs.size() > 0) {
+ mData.update(ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS, filteredStreamConfigs);
+ }
entry = mData.find(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS);
+ if (entry.count > 0 && (entry.count % 4 || entry.type != TYPE_INT32)) {
+ ALOGE("%s: malformed available depth stream configuration key! count %zu, type %d",
+ __FUNCTION__, entry.count, entry.type);
+ return;
+ }
+
Vector<int32_t> filteredDepthStreamConfigs;
filteredDepthStreamConfigs.setCapacity(entry.count);
@@ -258,7 +278,11 @@
filteredDepthStreamConfigs.push_back(height);
filteredDepthStreamConfigs.push_back(isInput);
}
- mData.update(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS, filteredDepthStreamConfigs);
+
+ if (filteredDepthStreamConfigs.size() > 0) {
+ mData.update(ANDROID_DEPTH_AVAILABLE_DEPTH_STREAM_CONFIGURATIONS,
+ filteredDepthStreamConfigs);
+ }
entry = mData.find(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS);
Vector<int32_t> filteredHeicStreamConfigs;
@@ -284,6 +308,32 @@
filteredHeicStreamConfigs.push_back(isInput);
}
mData.update(ANDROID_HEIC_AVAILABLE_HEIC_STREAM_CONFIGURATIONS, filteredHeicStreamConfigs);
+
+ entry = mData.find(ANDROID_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS);
+ Vector<int32_t> filteredDynamicDepthStreamConfigs;
+ filteredDynamicDepthStreamConfigs.setCapacity(entry.count);
+
+ for (size_t i = 0; i < entry.count; i += STREAM_CONFIGURATION_SIZE) {
+ int32_t format = entry.data.i32[i + STREAM_FORMAT_OFFSET];
+ int32_t width = entry.data.i32[i + STREAM_WIDTH_OFFSET];
+ int32_t height = entry.data.i32[i + STREAM_HEIGHT_OFFSET];
+ int32_t isInput = entry.data.i32[i + STREAM_IS_INPUT_OFFSET];
+ if (isInput == ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS_INPUT) {
+ // Hide input streams
+ continue;
+ }
+ // Translate HAL formats to NDK format
+ if (format == HAL_PIXEL_FORMAT_BLOB) {
+ format = AIMAGE_FORMAT_DEPTH_JPEG;
+ }
+
+ filteredDynamicDepthStreamConfigs.push_back(format);
+ filteredDynamicDepthStreamConfigs.push_back(width);
+ filteredDynamicDepthStreamConfigs.push_back(height);
+ filteredDynamicDepthStreamConfigs.push_back(isInput);
+ }
+ mData.update(ACAMERA_DEPTH_AVAILABLE_DYNAMIC_DEPTH_STREAM_CONFIGURATIONS,
+ filteredDynamicDepthStreamConfigs);
}
bool
diff --git a/camera/ndk/impl/ACaptureRequest.h b/camera/ndk/impl/ACaptureRequest.h
index 5c82ab7..2ffcafe 100644
--- a/camera/ndk/impl/ACaptureRequest.h
+++ b/camera/ndk/impl/ACaptureRequest.h
@@ -18,6 +18,7 @@
#include <camera/NdkCaptureRequest.h>
#include <set>
+#include <unordered_map>
using namespace android;
@@ -59,7 +60,8 @@
return ACAMERA_OK;
}
- sp<ACameraMetadata> settings;
+ sp<ACameraMetadata> settings;
+ std::unordered_map<std::string, sp<ACameraMetadata>> physicalSettings;
ACameraOutputTargets* targets;
void* context;
};
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index 26af4f8..cedf83a 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -53,6 +53,17 @@
*/
typedef struct ACameraDevice ACameraDevice;
+/**
+ * Struct to hold list of camera device Ids. This can refer to either the Ids
+ * of connected camera devices returned from {@link ACameraManager_getCameraIdList},
+ * or the physical camera Ids passed into
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ */
+typedef struct ACameraIdList {
+ int numCameras; ///< Number of camera device Ids
+ const char** cameraIds; ///< list of camera device Ids
+} ACameraIdList;
+
/// Enum for ACameraDevice_ErrorStateCallback error code
enum {
/**
@@ -793,6 +804,47 @@
ACameraWindowType* anw, const char* physicalId,
/*out*/ACaptureSessionOutput** output) __INTRODUCED_IN(29);
+/**
+ * Create a logical multi-camera ACaptureRequest for capturing images, initialized with template
+ * for a target use case, with the ability to specify physical camera settings.
+ *
+ * <p>The settings are chosen to be the best options for this camera device,
+ * so it is not recommended to reuse the same request for a different camera device.</p>
+ *
+ * <p>Note that for all keys in physical camera settings, only the keys
+ * advertised in ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS are
+ * applicable. All other keys are ignored by the camera device.</p>
+ *
+ * @param device the camera device of interest
+ * @param templateId the type of capture request to be created.
+ * See {@link ACameraDevice_request_template}.
+ * @param physicalIdList The list of physical camera Ids that can be used to
+ * customize the request for a specific physical camera.
+ * @param request the output request will be stored here if the method call succeeds.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds. The created capture request will be
+ * filled in request argument.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if device, physicalIdList, or request is
+ * NULL, templateId is undefined or camera device does not support
+ * requested template, or if some Ids in physicalIdList isn't a
+ * valid physical camera backing the current camera device.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DISCONNECTED} if the camera device is closed.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_DEVICE} if the camera device encounters fatal error.</li>
+ * <li>{@link ACAMERA_ERROR_CAMERA_SERVICE} if the camera service encounters fatal error.</li>
+ * <li>{@link ACAMERA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ *
+ * @see TEMPLATE_PREVIEW
+ * @see TEMPLATE_RECORD
+ * @see TEMPLATE_STILL_CAPTURE
+ * @see TEMPLATE_VIDEO_SNAPSHOT
+ * @see TEMPLATE_MANUAL
+ */
+camera_status_t ACameraDevice_createCaptureRequest_withPhysicalIds(
+ const ACameraDevice* device, ACameraDevice_request_template templateId,
+ const ACameraIdList* physicalIdList,
+ /*out*/ACaptureRequest** request) __INTRODUCED_IN(29);
+
#endif /* __ANDROID_API__ >= 29 */
__END_DECLS
diff --git a/camera/ndk/include/camera/NdkCameraManager.h b/camera/ndk/include/camera/NdkCameraManager.h
index ea76738..136a497 100644
--- a/camera/ndk/include/camera/NdkCameraManager.h
+++ b/camera/ndk/include/camera/NdkCameraManager.h
@@ -74,12 +74,6 @@
*/
void ACameraManager_delete(ACameraManager* manager) __INTRODUCED_IN(24);
-/// Struct to hold list of camera devices
-typedef struct ACameraIdList {
- int numCameras; ///< Number of connected camera devices
- const char** cameraIds; ///< list of identifier of connected camera devices
-} ACameraIdList;
-
/**
* Create a list of currently connected camera devices, including
* cameras that may be in use by other camera API clients.
@@ -278,6 +272,105 @@
#endif /* __ANDROID_API__ >= 24 */
+#if __ANDROID_API__ >= 29
+
+/**
+ * Definition of camera access permission change callback.
+ *
+ * <p>Notification that camera access priorities have changed and the camera may
+ * now be openable. An application that was previously denied camera access due to
+ * a higher-priority user already using the camera, or that was disconnected from an
+ * active camera session due to a higher-priority user trying to open the camera,
+ * should try to open the camera again if it still wants to use it. Note that
+ * multiple applications may receive this callback at the same time, and only one of
+ * them will succeed in opening the camera in practice, depending on exact access
+ * priority levels and timing. This method is useful in cases where multiple
+ * applications may be in the resumed state at the same time, and the user switches
+ * focus between them, or if the current camera-using application moves between
+ * full-screen and Picture-in-Picture (PiP) states. In such cases, the camera
+ * available/unavailable callbacks will not be invoked, but another application may
+ * now have higher priority for camera access than the current camera-using
+ * application.</p>
+
+ * @param context The optional application context provided by user in
+ * {@link ACameraManager_AvailabilityListener}.
+ */
+typedef void (*ACameraManager_AccessPrioritiesChangedCallback)(void* context);
+
+/**
+ * A listener for camera devices becoming available/unavailable to open or when
+ * the camera access permissions change.
+ *
+ * <p>Cameras become available when they are no longer in use, or when a new
+ * removable camera is connected. They become unavailable when some
+ * application or service starts using a camera, or when a removable camera
+ * is disconnected.</p>
+ *
+ * @see ACameraManager_registerExtendedAvailabilityCallback
+ */
+typedef struct ACameraManager_ExtendedAvailabilityListener {
+ ///
+ ACameraManager_AvailabilityCallbacks availabilityCallbacks;
+
+ /// Called when there is camera access permission change
+ ACameraManager_AccessPrioritiesChangedCallback onCameraAccessPrioritiesChanged;
+
+ /// Reserved for future use, please ensure that all entries are set to NULL
+ void *reserved[6];
+} ACameraManager_ExtendedAvailabilityCallbacks;
+
+/**
+ * Register camera extended availability callbacks.
+ *
+ * <p>onCameraUnavailable will be called whenever a camera device is opened by any camera API
+ * client. Other camera API clients may still be able to open such a camera device, evicting the
+ * existing client if they have higher priority than the existing client of a camera device.
+ * See {@link ACameraManager_openCamera} for more details.</p>
+ *
+ * <p>The callbacks will be called on a dedicated thread shared among all ACameraManager
+ * instances.</p>
+ *
+ * <p>Since this callback will be registered with the camera service, remember to unregister it
+ * once it is no longer needed; otherwise the callback will continue to receive events
+ * indefinitely and it may prevent other resources from being released. Specifically, the
+ * callbacks will be invoked independently of the general activity lifecycle and independently
+ * of the state of individual ACameraManager instances.</p>
+ *
+ * @param manager the {@link ACameraManager} of interest.
+ * @param callback the {@link ACameraManager_ExtendedAvailabilityCallbacks} to be registered.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if manager or callback is NULL, or
+ * {ACameraManager_ExtendedAvailabilityCallbacks#onCameraAccessPrioritiesChanged}
+ * or {ACameraManager_AvailabilityCallbacks#onCameraAvailable} or
+ * {ACameraManager_AvailabilityCallbacks#onCameraUnavailable} is NULL.</li></ul>
+ */
+camera_status_t ACameraManager_registerExtendedAvailabilityCallback(
+ ACameraManager* manager,
+ const ACameraManager_ExtendedAvailabilityCallbacks* callback) __INTRODUCED_IN(29);
+
+/**
+ * Unregister camera extended availability callbacks.
+ *
+ * <p>Removing a callback that isn't registered has no effect.</p>
+ *
+ * @param manager the {@link ACameraManager} of interest.
+ * @param callback the {@link ACameraManager_ExtendedAvailabilityCallbacks} to be unregistered.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if callback,
+ * {ACameraManager_ExtendedAvailabilityCallbacks#onCameraAccessPrioritiesChanged}
+ * or {ACameraManager_AvailabilityCallbacks#onCameraAvailable} or
+ * {ACameraManager_AvailabilityCallbacks#onCameraUnavailable} is NULL.</li></ul>
+ */
+camera_status_t ACameraManager_unregisterExtendedAvailabilityCallback(
+ ACameraManager* manager,
+ const ACameraManager_ExtendedAvailabilityCallbacks* callback) __INTRODUCED_IN(29);
+
+#endif /* __ANDROID_API__ >= 29 */
+
__END_DECLS
#endif /* _NDK_CAMERA_MANAGER_H */
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 8c19e1d..c26ca69 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -3046,6 +3046,28 @@
*/
ACAMERA_REQUEST_AVAILABLE_SESSION_KEYS = // int32[n]
ACAMERA_REQUEST_START + 16,
+ /**
+ * <p>A subset of the available request keys that can be overridden for
+ * physical devices backing a logical multi-camera.</p>
+ *
+ * <p>Type: int32[n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>This is a subset of ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS which contains a list
+ * of keys that can be overridden using <a href="https://developer.android.com/reference/CaptureRequest/Builder.html#setPhysicalCameraKey">Builder#setPhysicalCameraKey</a>.
+ * The respective value of such request key can be obtained by calling
+ * <a href="https://developer.android.com/reference/CaptureRequest/Builder.html#getPhysicalCameraKey">Builder#getPhysicalCameraKey</a>. Capture requests that contain
+ * individual physical device requests must be built via
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraDevice.html#createCaptureRequest(int,">Set)</a>.</p>
+ *
+ * @see ACAMERA_REQUEST_AVAILABLE_REQUEST_KEYS
+ */
+ ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS = // int32[n]
+ ACAMERA_REQUEST_START + 17,
ACAMERA_REQUEST_END,
/**
@@ -5688,13 +5710,17 @@
*
* <p>The ID of the active physical camera that's backing the logical camera. All camera
* streams and metadata that are not physical camera specific will be originating from this
- * physical camera. This must be one of valid physical IDs advertised in the physicalIds
- * static tag.</p>
+ * physical camera.</p>
* <p>For a logical camera made up of physical cameras where each camera's lenses have
* different characteristics, the camera device may choose to switch between the physical
* cameras when application changes FOCAL_LENGTH or SCALER_CROP_REGION.
* At the time of lens switch, this result metadata reflects the new active physical camera
* ID.</p>
+ * <p>This key will be available if the camera device advertises this key via {@link ACAMERA_REQUEST_AVAILABLE_RESULT_KEYS }.
+ * When available, this must be one of valid physical IDs backing this logical multi-camera.
+ * If this key is not available for a logical multi-camera, the camera device implementation
+ * may still switch between different active physical cameras based on use case, but the
+ * current active physical camera information won't be available to the application.</p>
*/
ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID = // byte
ACAMERA_LOGICAL_MULTI_CAMERA_START + 2,
diff --git a/camera/ndk/include/camera/NdkCaptureRequest.h b/camera/ndk/include/camera/NdkCaptureRequest.h
index 136989a..d3f8826 100644
--- a/camera/ndk/include/camera/NdkCaptureRequest.h
+++ b/camera/ndk/include/camera/NdkCaptureRequest.h
@@ -358,6 +358,219 @@
#endif /* __ANDROID_API__ >= 28 */
+#if __ANDROID_API__ >= 29
+
+/**
+ * Get a metadata entry from input {@link ACaptureRequest} for
+ * a physical camera backing a logical multi-camera device.
+ *
+ * <p>Same as ACaptureRequest_getConstEntry, except that if the key is contained
+ * in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
+ * returns the entry set by ACaptureRequest_setEntry_physicalCamera_* class of
+ * functions on the particular physical camera.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest created by
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param physicalId one of the physical Ids used when request is created with
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param tag the capture request metadata tag in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}
+ * that is set by ACaptureRequest_setEntry_physicalCamera_* class of functions.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if metadata, physicalId, or entry is NULL,
+ * physicalId is not one of the Ids used in creating the request, or if the capture
+ * request is a regular request with no physical Ids at all.</li>
+ * <li>{@link ACAMERA_ERROR_METADATA_NOT_FOUND} if the capture request does not contain an
+ * entry of input tag value.</li></ul>
+ */
+camera_status_t ACaptureRequest_getConstEntry_physicalCamera(
+ const ACaptureRequest* request, const char* physicalId, uint32_t tag,
+ ACameraMetadata_const_entry* entry) __INTRODUCED_IN(29);
+
+/**
+ * Set/change a camera capture control entry with unsigned 8 bits data type for
+ * a physical camera backing a logical multi-camera device.
+ *
+ * <p>Same as ACaptureRequest_setEntry_u8, except that if {@link tag} is contained
+ * in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
+ * sets the entry for a particular physical sub-camera backing the logical multi-camera.
+ * If {@link tag} is not contained in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
+ * by the camera device.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest created by
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param physicalId one of the physical Ids used when request is created with
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param tag one of the capture request metadata tags in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request or physicalId is NULL, count is
+ * larger than zero while data is NULL, the data type of the tag is not unsigned 8 bits,
+ * the tag is not controllable by application, physicalId is not one of the Ids used
+ * in creating the request, or if the capture request is a regular request with no
+ * physical Ids at all.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_physicalCamera_u8(
+ ACaptureRequest* request, const char* physicalId, uint32_t tag,
+ uint32_t count, const uint8_t* data) __INTRODUCED_IN(29);
+
+/**
+ * Set/change a camera capture control entry with signed 32 bits data type for
+ * a physical camera of a logical multi-camera device.
+ *
+ * <p>Same as ACaptureRequest_setEntry_i32, except that if {@link tag} is contained
+ * in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
+ * sets the entry for a particular physical sub-camera backing the logical multi-camera.
+ * If {@link tag} is not contained in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
+ * by the camera device.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest created by
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param physicalId one of the physical Ids used when request is created with
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param tag one of the capture request metadata tags in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request or physicalId is NULL, count is
+ * larger than zero while data is NULL, the data type of the tag is not signed 32 bits,
+ * the tag is not controllable by application, physicalId is not one of the Ids used
+ * in creating the request, or if the capture request is a regular request with no
+ * physical Ids at all.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_physicalCamera_i32(
+ ACaptureRequest* request, const char* physicalId, uint32_t tag,
+ uint32_t count, const int32_t* data) __INTRODUCED_IN(29);
+
+/**
+ * Set/change a camera capture control entry with float data type for
+ * a physical camera of a logical multi-camera device.
+ *
+ * <p>Same as ACaptureRequest_setEntry_float, except that if {@link tag} is contained
+ * in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
+ * sets the entry for a particular physical sub-camera backing the logical multi-camera.
+ * If {@link tag} is not contained in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
+ * by the camera device.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest created by
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param physicalId one of the physical Ids used when request is created with
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param tag one of the capture request metadata tags in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request or physicalId is NULL, count is
+ * larger than zero while data is NULL, the data type of the tag is not float,
+ * the tag is not controllable by application, physicalId is not one of the Ids used
+ * in creating the request, or if the capture request is a regular request with no
+ * physical Ids at all.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_physicalCamera_float(
+ ACaptureRequest* request, const char* physicalId, uint32_t tag,
+ uint32_t count, const float* data) __INTRODUCED_IN(29);
+
+/**
+ * Set/change a camera capture control entry with signed 64 bits data type for
+ * a physical camera of a logical multi-camera device.
+ *
+ * <p>Same as ACaptureRequest_setEntry_i64, except that if {@link tag} is contained
+ * in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
+ * sets the entry for a particular physical sub-camera backing the logical multi-camera.
+ * If {@link tag} is not contained in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
+ * by the camera device.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest created by
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param physicalId one of the physical Ids used when request is created with
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param tag one of the capture request metadata tags in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request or physicalId is NULL, count is
+ * larger than zero while data is NULL, the data type of the tag is not signed 64 bits,
+ * the tag is not controllable by application, physicalId is not one of the Ids used
+ * in creating the request, or if the capture request is a regular request with no
+ * physical Ids at all.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_physicalCamera_i64(
+ ACaptureRequest* request, const char* physicalId, uint32_t tag,
+ uint32_t count, const int64_t* data) __INTRODUCED_IN(29);
+
+/**
+ * Set/change a camera capture control entry with double data type for
+ * a physical camera of a logical multi-camera device.
+ *
+ * <p>Same as ACaptureRequest_setEntry_double, except that if {@link tag} is contained
+ * in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
+ * sets the entry for a particular physical sub-camera backing the logical multi-camera.
+ * If {@link tag} is not contained in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
+ * by the camera device.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest created by
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param physicalId one of the physical Ids used when request is created with
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param tag one of the capture request metadata tags in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request or physicalId is NULL, count is
+ * larger than zero while data is NULL, the data type of the tag is not double,
+ * the tag is not controllable by application, physicalId is not one of the Ids used
+ * in creating the request, or if the capture request is a regular request with no
+ * physical Ids at all.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_physicalCamera_double(
+ ACaptureRequest* request, const char* physicalId, uint32_t tag,
+ uint32_t count, const double* data) __INTRODUCED_IN(29);
+
+/**
+ * Set/change a camera capture control entry with rational data type for
+ * a physical camera of a logical multi-camera device.
+ *
+ * <p>Same as ACaptureRequest_setEntry_rational, except that if {@link tag} is contained
+ * in {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, this function
+ * sets the entry for a particular physical sub-camera backing the logical multi-camera.
+ * If {@link tag} is not contained in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}, the key will be ignored
+ * by the camera device.</p>
+ *
+ * @param request the {@link ACaptureRequest} of interest created by
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param physicalId one of the physical Ids used when request is created with
+ * {@link ACameraDevice_createCaptureRequest_withPhysicalIds}.
+ * @param tag one of the capture request metadata tags in
+ * {@link ACAMERA_REQUEST_AVAILABLE_PHYSICAL_CAMERA_REQUEST_KEYS}.
+ *
+ * @return <ul>
+ * <li>{@link ACAMERA_OK} if the method call succeeds.</li>
+ * <li>{@link ACAMERA_ERROR_INVALID_PARAMETER} if request or physicalId is NULL, count is
+ * larger than zero while data is NULL, the data type of the tag is not rational,
+ * the tag is not controllable by application, physicalId is not one of the Ids used
+ * in creating the request, or if the capture request is a regular request with no
+ * physical Ids at all.</li></ul>
+ */
+camera_status_t ACaptureRequest_setEntry_physicalCamera_rational(
+ ACaptureRequest* request, const char* physicalId, uint32_t tag,
+ uint32_t count, const ACameraMetadata_rational* data) __INTRODUCED_IN(29);
+
+#endif /* __ANDROID_API__ >= 29 */
+
__END_DECLS
#endif /* _NDK_CAPTURE_REQUEST_H */
diff --git a/camera/ndk/libcamera2ndk.map.txt b/camera/ndk/libcamera2ndk.map.txt
index 5a00022..946a98e 100644
--- a/camera/ndk/libcamera2ndk.map.txt
+++ b/camera/ndk/libcamera2ndk.map.txt
@@ -11,6 +11,7 @@
ACameraCaptureSession_updateSharedOutput; # introduced=28
ACameraDevice_close;
ACameraDevice_createCaptureRequest;
+ ACameraDevice_createCaptureRequest_withPhysicalIds; # introduced=29
ACameraDevice_createCaptureSession;
ACameraDevice_createCaptureSessionWithSessionParameters; # introduced=28
ACameraDevice_getId;
@@ -22,6 +23,8 @@
ACameraManager_openCamera;
ACameraManager_registerAvailabilityCallback;
ACameraManager_unregisterAvailabilityCallback;
+ ACameraManager_registerExtendedAvailabilityCallback; # introduced=29
+ ACameraManager_unregisterExtendedAvailabilityCallback; # introduced=29
ACameraMetadata_copy;
ACameraMetadata_free;
ACameraMetadata_getAllTags;
@@ -34,14 +37,21 @@
ACaptureRequest_free;
ACaptureRequest_getAllTags;
ACaptureRequest_getConstEntry;
+ ACaptureRequest_getConstEntry_physicalCamera; # introduced=29
ACaptureRequest_getUserContext; # introduced=28
ACaptureRequest_removeTarget;
ACaptureRequest_setEntry_double;
+ ACaptureRequest_setEntry_physicalCamera_double; # introduced=29
ACaptureRequest_setEntry_float;
+ ACaptureRequest_setEntry_physicalCamera_float; # introduced=29
ACaptureRequest_setEntry_i32;
+ ACaptureRequest_setEntry_physicalCamera_i32; # introduced=29
ACaptureRequest_setEntry_i64;
+ ACaptureRequest_setEntry_physicalCamera_i64; # introduced=29
ACaptureRequest_setEntry_rational;
+ ACaptureRequest_setEntry_physicalCamera_rational; # introduced=29
ACaptureRequest_setEntry_u8;
+ ACaptureRequest_setEntry_physicalCamera_u8; # introduced=29
ACaptureRequest_setUserContext; # introduced=28
ACaptureSessionOutputContainer_add;
ACaptureSessionOutputContainer_create;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
index f7863a5..a38a31e 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.cpp
@@ -138,6 +138,7 @@
camera_status_t
CameraDevice::createCaptureRequest(
ACameraDevice_request_template templateId,
+ const ACameraIdList* physicalCameraIdList,
ACaptureRequest** request) const {
Mutex::Autolock _l(mDeviceLock);
camera_status_t ret = checkCameraClosedOrErrorLocked();
@@ -168,6 +169,12 @@
}
ACaptureRequest* outReq = new ACaptureRequest();
outReq->settings = new ACameraMetadata(rawRequest.release(), ACameraMetadata::ACM_REQUEST);
+ if (physicalCameraIdList != nullptr) {
+ for (auto i = 0; i < physicalCameraIdList->numCameras; i++) {
+ outReq->physicalSettings.emplace(physicalCameraIdList->cameraIds[i],
+ new ACameraMetadata(*(outReq->settings)));
+ }
+ }
outReq->targets = new ACameraOutputTargets();
*request = outReq;
return ACAMERA_OK;
@@ -289,32 +296,19 @@
}
camera_status_t
-CameraDevice::allocateCaptureRequest(
+CameraDevice::allocateCaptureRequestLocked(
const ACaptureRequest* request, /*out*/sp<CaptureRequest> &outReq) {
sp<CaptureRequest> req(new CaptureRequest());
- req->mCaptureRequest.physicalCameraSettings.resize(1);
- req->mCaptureRequest.physicalCameraSettings[0].id = mCameraId;
- // TODO: Do we really need to copy the metadata here ?
- CameraMetadata metadataCopy = request->settings->getInternalData();
- const camera_metadata_t *cameraMetadata = metadataCopy.getAndLock();
- HCameraMetadata hCameraMetadata;
- utils::convertToHidl(cameraMetadata, &hCameraMetadata);
- metadataCopy.unlock(cameraMetadata);
- if (request->settings != nullptr) {
- if (hCameraMetadata.data() != nullptr &&
- mCaptureRequestMetadataQueue != nullptr &&
- mCaptureRequestMetadataQueue->write(
- reinterpret_cast<const uint8_t *>(hCameraMetadata.data()),
- hCameraMetadata.size())) {
- // The metadata field of the union would've been destructued, so no need
- // to re-size it.
- req->mCaptureRequest.physicalCameraSettings[0].settings.fmqMetadataSize(
- hCameraMetadata.size());
- } else {
- ALOGE("Fmq write capture result failed, falling back to hwbinder");
- req->mCaptureRequest.physicalCameraSettings[0].settings.metadata(
- std::move(hCameraMetadata));
- }
+ req->mCaptureRequest.physicalCameraSettings.resize(1 + request->physicalSettings.size());
+
+ size_t index = 0;
+ allocateOneCaptureRequestMetadata(
+ req->mCaptureRequest.physicalCameraSettings[index++], mCameraId, request->settings);
+
+ for (auto& physicalEntry : request->physicalSettings) {
+ allocateOneCaptureRequestMetadata(
+ req->mCaptureRequest.physicalCameraSettings[index++],
+ physicalEntry.first, physicalEntry.second);
}
std::vector<int32_t> requestStreamIdxList;
@@ -356,13 +350,48 @@
return ACAMERA_OK;
}
+void CameraDevice::allocateOneCaptureRequestMetadata(
+ PhysicalCameraSettings& cameraSettings,
+ const std::string& id, const sp<ACameraMetadata>& metadata) {
+ cameraSettings.id = id;
+ // TODO: Do we really need to copy the metadata here ?
+ CameraMetadata metadataCopy = metadata->getInternalData();
+ const camera_metadata_t *cameraMetadata = metadataCopy.getAndLock();
+ HCameraMetadata hCameraMetadata;
+ utils::convertToHidl(cameraMetadata, &hCameraMetadata);
+ metadataCopy.unlock(cameraMetadata);
+ if (metadata != nullptr) {
+ if (hCameraMetadata.data() != nullptr &&
+ mCaptureRequestMetadataQueue != nullptr &&
+ mCaptureRequestMetadataQueue->write(
+ reinterpret_cast<const uint8_t *>(hCameraMetadata.data()),
+ hCameraMetadata.size())) {
+ // The metadata field of the union would've been destructued, so no need
+ // to re-size it.
+ cameraSettings.settings.fmqMetadataSize(hCameraMetadata.size());
+ } else {
+ ALOGE("Fmq write capture result failed, falling back to hwbinder");
+ cameraSettings.settings.metadata(std::move(hCameraMetadata));
+ }
+ }
+}
+
+
ACaptureRequest*
-CameraDevice::allocateACaptureRequest(sp<CaptureRequest>& req) {
+CameraDevice::allocateACaptureRequest(sp<CaptureRequest>& req, const char* deviceId) {
ACaptureRequest* pRequest = new ACaptureRequest();
- CameraMetadata clone;
- utils::convertFromHidlCloned(req->mPhysicalCameraSettings[0].settings.metadata(), &clone);
- pRequest->settings = new ACameraMetadata(clone.release(), ACameraMetadata::ACM_REQUEST);
- pRequest->targets = new ACameraOutputTargets();
+ for (size_t i = 0; i < req->mPhysicalCameraSettings.size(); i++) {
+ const std::string& id = req->mPhysicalCameraSettings[i].id;
+ CameraMetadata clone;
+ utils::convertFromHidlCloned(req->mPhysicalCameraSettings[i].settings.metadata(), &clone);
+ if (id == deviceId) {
+ pRequest->settings = new ACameraMetadata(clone.release(), ACameraMetadata::ACM_REQUEST);
+ } else {
+ pRequest->physicalSettings[req->mPhysicalCameraSettings[i].id] =
+ new ACameraMetadata(clone.release(), ACameraMetadata::ACM_REQUEST);
+ }
+ }
+ pRequest->targets = new ACameraOutputTargets();
for (size_t i = 0; i < req->mSurfaceList.size(); i++) {
native_handle_t* anw = req->mSurfaceList[i];
ACameraOutputTarget outputTarget(anw);
@@ -930,6 +959,7 @@
return;
}
sp<ACameraCaptureSession> session(static_cast<ACameraCaptureSession*>(obj.get()));
+ ACameraDevice* device = session->getDevice();
mCachedSessions.push(session);
sp<CaptureRequest> requestSp = nullptr;
switch (msg->what()) {
@@ -979,7 +1009,7 @@
ALOGE("%s: Cannot find timestamp!", __FUNCTION__);
return;
}
- ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ ACaptureRequest* request = allocateACaptureRequest(requestSp, device->getId());
(*onStart)(context, session.get(), request, timestamp);
freeACaptureRequest(request);
break;
@@ -1002,7 +1032,7 @@
return;
}
sp<ACameraMetadata> result(static_cast<ACameraMetadata*>(obj.get()));
- ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ ACaptureRequest* request = allocateACaptureRequest(requestSp, device->getId());
(*onResult)(context, session.get(), request, result.get());
freeACaptureRequest(request);
break;
@@ -1055,7 +1085,7 @@
physicalMetadataCopyPtrs.push_back(physicalMetadataCopy[i].get());
}
- ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ ACaptureRequest* request = allocateACaptureRequest(requestSp, device->getId());
(*onResult)(context, session.get(), request, result.get(),
physicalResultInfo.size(), physicalCameraIdPtrs.data(),
physicalMetadataCopyPtrs.data());
@@ -1084,7 +1114,7 @@
static_cast<CameraCaptureFailure*>(obj.get()));
ACameraCaptureFailure* failure =
static_cast<ACameraCaptureFailure*>(failureSp.get());
- ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ ACaptureRequest* request = allocateACaptureRequest(requestSp, device->getId());
(*onFail)(context, session.get(), request, failure);
freeACaptureRequest(request);
break;
@@ -1161,7 +1191,7 @@
return;
}
- ACaptureRequest* request = allocateACaptureRequest(requestSp);
+ ACaptureRequest* request = allocateACaptureRequest(requestSp, device->getId());
(*onBufferLost)(context, session.get(), request, anw, frameNumber);
freeACaptureRequest(request);
break;
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDevice.h b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
index c63b97f..28092fd 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDevice.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraDevice.h
@@ -92,6 +92,7 @@
camera_status_t createCaptureRequest(
ACameraDevice_request_template templateId,
+ const ACameraIdList* physicalCameraIdList,
ACaptureRequest** request) const;
camera_status_t createCaptureSession(
@@ -169,10 +170,18 @@
camera_status_t updateOutputConfigurationLocked(ACaptureSessionOutput *output);
- camera_status_t allocateCaptureRequest(
+ // Since this writes to ICameraDeviceUser's fmq, clients must take care that:
+ // a) This function is called serially.
+ // b) This function is called in accordance with ICameraDeviceUser.submitRequestList,
+ // otherwise, the wrong capture request might have the wrong settings
+ // metadata associated with it.
+ camera_status_t allocateCaptureRequestLocked(
const ACaptureRequest* request, sp<CaptureRequest>& outReq);
+ void allocateOneCaptureRequestMetadata(
+ PhysicalCameraSettings& cameraSettings,
+ const std::string& id, const sp<ACameraMetadata>& metadata);
- static ACaptureRequest* allocateACaptureRequest(sp<CaptureRequest>& req);
+ static ACaptureRequest* allocateACaptureRequest(sp<CaptureRequest>& req, const char* deviceId);
static void freeACaptureRequest(ACaptureRequest*);
// only For session to hold device lock
@@ -375,8 +384,9 @@
camera_status_t createCaptureRequest(
ACameraDevice_request_template templateId,
+ const ACameraIdList* physicalCameraIdList,
ACaptureRequest** request) const {
- return mDevice->createCaptureRequest(templateId, request);
+ return mDevice->createCaptureRequest(templateId, physicalCameraIdList, request);
}
camera_status_t createCaptureSession(
diff --git a/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc b/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
index 7d2304e..8bd5a52 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
+++ b/camera/ndk/ndk_vendor/impl/ACameraDeviceVendor.inc
@@ -73,7 +73,7 @@
requestsV.setCapacity(numRequests);
for (int i = 0; i < numRequests; i++) {
sp<CaptureRequest> req;
- ret = allocateCaptureRequest(requests[i], req);
+ ret = allocateCaptureRequestLocked(requests[i], req);
// We need to call this method since after submitRequestList is called,
// the request metadata queue might have removed the capture request
// metadata. Therefore we simply add the metadata to its wrapper class,
diff --git a/camera/ndk/ndk_vendor/impl/ACameraManager.h b/camera/ndk/ndk_vendor/impl/ACameraManager.h
index 6b1365a..df69353 100644
--- a/camera/ndk/ndk_vendor/impl/ACameraManager.h
+++ b/camera/ndk/ndk_vendor/impl/ACameraManager.h
@@ -64,6 +64,11 @@
void unregisterAvailabilityCallback(
const ACameraManager_AvailabilityCallbacks *callback);
+ void registerExtendedAvailabilityCallback(
+ const ACameraManager_ExtendedAvailabilityCallbacks* /*callback*/) {}
+ void unregisterExtendedAvailabilityCallback(
+ const ACameraManager_ExtendedAvailabilityCallbacks* /*callback*/) {}
+
/**
* Return camera IDs that support camera2
*/
diff --git a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
index 93108b0..2398922 100644
--- a/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
+++ b/camera/ndk/ndk_vendor/tests/AImageReaderVendorTest.cpp
@@ -61,7 +61,8 @@
native_handle_t* anw;
};
int initCamera(native_handle_t* imgReaderAnw,
- const std::vector<PhysicalImgReaderInfo>& physicalImgReaders) {
+ const std::vector<PhysicalImgReaderInfo>& physicalImgReaders,
+ bool usePhysicalSettings) {
if (imgReaderAnw == nullptr) {
ALOGE("Cannot initialize camera before image reader get initialized.");
return -1;
@@ -97,6 +98,7 @@
return ret;
}
+ std::vector<const char*> idPointerList;
for (auto& physicalStream : physicalImgReaders) {
ACaptureSessionOutput* sessionOutput = nullptr;
ret = ACaptureSessionPhysicalOutput_create(physicalStream.anw,
@@ -113,7 +115,11 @@
mExtraOutputs.push_back(sessionOutput);
// Assume that at most one physical stream per physical camera.
mPhysicalCameraIds.push_back(physicalStream.physicalCameraId);
+ idPointerList.push_back(physicalStream.physicalCameraId);
}
+ ACameraIdList cameraIdList;
+ cameraIdList.numCameras = idPointerList.size();
+ cameraIdList.cameraIds = idPointerList.data();
ret = ACameraDevice_createCaptureSession(mDevice, mOutputs, &mSessionCb, &mSession);
if (ret != AMEDIA_OK) {
@@ -122,7 +128,13 @@
}
// Create capture request
- ret = ACameraDevice_createCaptureRequest(mDevice, TEMPLATE_STILL_CAPTURE, &mStillRequest);
+ if (usePhysicalSettings) {
+ ret = ACameraDevice_createCaptureRequest_withPhysicalIds(mDevice,
+ TEMPLATE_STILL_CAPTURE, &cameraIdList, &mStillRequest);
+ } else {
+ ret = ACameraDevice_createCaptureRequest(mDevice,
+ TEMPLATE_STILL_CAPTURE, &mStillRequest);
+ }
if (ret != AMEDIA_OK) {
ALOGE("ACameraDevice_createCaptureRequest failed, ret=%d", ret);
return ret;
@@ -557,7 +569,8 @@
}
CameraHelper cameraHelper(id, mCameraManager);
- ret = cameraHelper.initCamera(testCase.getNativeWindow(), {});
+ ret = cameraHelper.initCamera(testCase.getNativeWindow(),
+ {}/*physicalImageReaders*/, false/*usePhysicalSettings*/);
if (ret < 0) {
ALOGE("Unable to initialize camera helper");
return false;
@@ -695,6 +708,69 @@
*cameraId = nullptr;
return;
}
+
+ void testLogicalCameraPhysicalStream(bool usePhysicalSettings) {
+ const char* cameraId = nullptr;
+ ACameraMetadata* staticMetadata = nullptr;
+ std::vector<const char*> physicalCameraIds;
+
+ findCandidateLogicalCamera(&cameraId, &staticMetadata, &physicalCameraIds);
+ if (cameraId == nullptr) {
+ // Couldn't find logical camera to test
+ return;
+ }
+
+ // Test streaming the logical multi-camera
+ uint64_t readerUsage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN;
+ int32_t readerMaxImages = 8;
+ bool readerAsync = false;
+ const int pictureCount = 6;
+ std::vector<ImageReaderTestCase*> testCases;
+ for (size_t i = 0; i < 3; i++) {
+ ImageReaderTestCase* testCase = new ImageReaderTestCase(
+ kTestImageWidth, kTestImageHeight, kTestImageFormat, readerUsage,
+ readerMaxImages, readerAsync);
+ ASSERT_EQ(testCase->initImageReader(), 0);
+ testCases.push_back(testCase);
+ }
+
+ CameraHelper cameraHelper(cameraId, mCameraManager);
+ std::vector<CameraHelper::PhysicalImgReaderInfo> physicalImgReaderInfo;
+ physicalImgReaderInfo.push_back({physicalCameraIds[0], testCases[1]->getNativeWindow()});
+ physicalImgReaderInfo.push_back({physicalCameraIds[1], testCases[2]->getNativeWindow()});
+
+ int ret = cameraHelper.initCamera(testCases[0]->getNativeWindow(),
+ physicalImgReaderInfo, usePhysicalSettings);
+ ASSERT_EQ(ret, 0);
+
+ if (!cameraHelper.isCameraReady()) {
+ ALOGW("Camera is not ready after successful initialization. It's either due to camera "
+ "on board lacks BACKWARDS_COMPATIBLE capability or the device does not have "
+ "camera on board.");
+ return;
+ }
+
+ for (int i = 0; i < pictureCount; i++) {
+ ret = cameraHelper.takeLogicalCameraPicture();
+ ASSERT_EQ(ret, 0);
+ }
+
+ // Sleep until all capture finished
+ for (int i = 0; i < kCaptureWaitRetry * pictureCount; i++) {
+ usleep(kCaptureWaitUs);
+ if (testCases[0]->getAcquiredImageCount() == pictureCount) {
+ ALOGI("Session take ~%d ms to capture %d images", i * kCaptureWaitUs / 1000,
+ pictureCount);
+ break;
+ }
+ }
+ ASSERT_EQ(testCases[0]->getAcquiredImageCount(), pictureCount);
+ ASSERT_EQ(testCases[1]->getAcquiredImageCount(), pictureCount);
+ ASSERT_EQ(testCases[2]->getAcquiredImageCount(), pictureCount);
+ ASSERT_TRUE(cameraHelper.checkCallbacks(pictureCount));
+
+ ACameraMetadata_free(staticMetadata);
+ }
};
TEST_F(AImageReaderVendorTest, CreateWindowNativeHandle) {
@@ -722,65 +798,8 @@
}
TEST_F(AImageReaderVendorTest, LogicalCameraPhysicalStream) {
- const char* cameraId = nullptr;
- ACameraMetadata* staticMetadata = nullptr;
- std::vector<const char*> physicalCameraIds;
-
- findCandidateLogicalCamera(&cameraId, &staticMetadata, &physicalCameraIds);
- if (cameraId == nullptr) {
- // Couldn't find logical camera to test
- return;
- }
-
- // Test streaming the logical multi-camera
- uint64_t readerUsage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN;
- int32_t readerMaxImages = 8;
- bool readerAsync = false;
- const int pictureCount = 6;
- std::vector<ImageReaderTestCase*> testCases;
- for (size_t i = 0; i < 3; i++) {
- ImageReaderTestCase* testCase = new ImageReaderTestCase(
- kTestImageWidth, kTestImageHeight, kTestImageFormat, readerUsage, readerMaxImages,
- readerAsync);
- ASSERT_EQ(testCase->initImageReader(), 0);
- testCases.push_back(testCase);
- }
-
- CameraHelper cameraHelper(cameraId, mCameraManager);
- std::vector<CameraHelper::PhysicalImgReaderInfo> physicalImgReaderInfo;
- physicalImgReaderInfo.push_back({physicalCameraIds[0], testCases[1]->getNativeWindow()});
- physicalImgReaderInfo.push_back({physicalCameraIds[1], testCases[2]->getNativeWindow()});
-
- int ret = cameraHelper.initCamera(testCases[0]->getNativeWindow(), physicalImgReaderInfo);
- ASSERT_EQ(ret, 0);
-
- if (!cameraHelper.isCameraReady()) {
- ALOGW("Camera is not ready after successful initialization. It's either due to camera on "
- "board lacks BACKWARDS_COMPATIBLE capability or the device does not have camera on "
- "board.");
- return;
- }
-
- for (int i = 0; i < pictureCount; i++) {
- ret = cameraHelper.takeLogicalCameraPicture();
- ASSERT_EQ(ret, 0);
- }
-
- // Sleep until all capture finished
- for (int i = 0; i < kCaptureWaitRetry * pictureCount; i++) {
- usleep(kCaptureWaitUs);
- if (testCases[0]->getAcquiredImageCount() == pictureCount) {
- ALOGI("Session take ~%d ms to capture %d images", i * kCaptureWaitUs / 1000,
- pictureCount);
- break;
- }
- }
- ASSERT_EQ(testCases[0]->getAcquiredImageCount(), pictureCount);
- ASSERT_EQ(testCases[1]->getAcquiredImageCount(), pictureCount);
- ASSERT_EQ(testCases[2]->getAcquiredImageCount(), pictureCount);
- ASSERT_TRUE(cameraHelper.checkCallbacks(pictureCount));
-
- ACameraMetadata_free(staticMetadata);
+ testLogicalCameraPhysicalStream(false/*usePhysicalSettings*/);
+ testLogicalCameraPhysicalStream(true/*usePhysicalSettings*/);
}
} // namespace
diff --git a/camera/tests/CameraBinderTests.cpp b/camera/tests/CameraBinderTests.cpp
index 8534b28..8fe029a 100644
--- a/camera/tests/CameraBinderTests.cpp
+++ b/camera/tests/CameraBinderTests.cpp
@@ -90,6 +90,11 @@
return binder::Status::ok();
};
+ virtual binder::Status onCameraAccessPrioritiesChanged() {
+ // No op
+ return binder::Status::ok();
+ }
+
bool waitForNumCameras(size_t num) const {
Mutex::Autolock l(mLock);
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
index fc0cceb..23a35e5 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
@@ -135,7 +135,7 @@
}
if (destBuffer.offset + destBuffer.size > destBase->getSize()) {
- _hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0, "invalid buffer size");
+ _hidl_cb(Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE, 0, "invalid buffer size");
return Void();
}
destPtr = static_cast<void *>(base + destination.nonsecureMemory.offset);
diff --git a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
index badb99e..d7dacb8 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/DrmPlugin.cpp
@@ -527,6 +527,10 @@
mMockError = Status_V1_2::ERROR_DRM_SESSION_LOST_STATE;
} else if (value == kFrameTooLargeValue) {
mMockError = Status_V1_2::ERROR_DRM_FRAME_TOO_LARGE;
+ } else if (value == kInvalidStateValue) {
+ mMockError = Status_V1_2::ERROR_DRM_INVALID_STATE;
+ } else {
+ mMockError = Status_V1_2::ERROR_DRM_UNKNOWN;
}
}
@@ -683,6 +687,10 @@
Return<void> DrmPlugin::getOfflineLicenseKeySetIds(getOfflineLicenseKeySetIds_cb _hidl_cb) {
std::vector<std::string> licenseNames = mFileHandle.ListLicenses();
std::vector<KeySetId> keySetIds;
+ if (mMockError != Status_V1_2::OK) {
+ _hidl_cb(toStatus_1_0(mMockError), keySetIds);
+ return Void();
+ }
for (const auto& name : licenseNames) {
std::vector<uint8_t> keySetId(name.begin(), name.end());
keySetIds.push_back(keySetId);
@@ -693,6 +701,9 @@
Return<Status> DrmPlugin::removeOfflineLicense(const KeySetId& keySetId) {
+ if (mMockError != Status_V1_2::OK) {
+ return toStatus_1_0(mMockError);
+ }
std::string licenseName(keySetId.begin(), keySetId.end());
if (mFileHandle.DeleteLicense(licenseName)) {
return Status::OK;
@@ -706,7 +717,9 @@
DeviceFiles::LicenseState state;
std::string license;
OfflineLicenseState hLicenseState;
- if (mFileHandle.RetrieveLicense(licenseName, &state, &license)) {
+ if (mMockError != Status_V1_2::OK) {
+ _hidl_cb(toStatus_1_0(mMockError), OfflineLicenseState::UNKNOWN);
+ } else if (mFileHandle.RetrieveLicense(licenseName, &state, &license)) {
switch (state) {
case DeviceFiles::kLicenseStateActive:
hLicenseState = OfflineLicenseState::USABLE;
diff --git a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyDrmProperties.h b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyDrmProperties.h
index 1bbc822..b83ce69 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyDrmProperties.h
+++ b/drm/mediadrm/plugins/clearkey/hidl/include/ClearKeyDrmProperties.h
@@ -40,6 +40,7 @@
static const std::string kResourceContentionValue("resourceContention");
static const std::string kLostStateValue("lostState");
static const std::string kFrameTooLargeValue("frameTooLarge");
+static const std::string kInvalidStateValue("invalidState");
static const std::string kDeviceIdKey("deviceId");
static const uint8_t kTestDeviceIdData[] =
diff --git a/include/media/AudioVolumeGroup.h b/include/media/AudioVolumeGroup.h
new file mode 120000
index 0000000..d6f1c99
--- /dev/null
+++ b/include/media/AudioVolumeGroup.h
@@ -0,0 +1 @@
+../../media/libaudioclient/include/media/AudioVolumeGroup.h
\ No newline at end of file
diff --git a/media/audioserver/Android.mk b/media/audioserver/Android.mk
index 10d8b13..1f24413 100644
--- a/media/audioserver/Android.mk
+++ b/media/audioserver/Android.mk
@@ -16,6 +16,7 @@
libhwbinder \
libmedia \
libmedialogservice \
+ libmediautils \
libnbaio \
libnblog \
libsoundtriggerservice \
diff --git a/media/bufferpool/2.0/Android.bp b/media/bufferpool/2.0/Android.bp
index cd4e06e..c71ac17 100644
--- a/media/bufferpool/2.0/Android.bp
+++ b/media/bufferpool/2.0/Android.bp
@@ -1,6 +1,9 @@
cc_library {
name: "libstagefright_bufferpool@2.0",
vendor_available: true,
+ vndk: {
+ enabled: true,
+ },
srcs: [
"Accessor.cpp",
"AccessorImpl.cpp",
diff --git a/media/codec2/components/aac/C2SoftAacDec.cpp b/media/codec2/components/aac/C2SoftAacDec.cpp
index 04dda8f..4d00d35 100644
--- a/media/codec2/components/aac/C2SoftAacDec.cpp
+++ b/media/codec2/components/aac/C2SoftAacDec.cpp
@@ -75,7 +75,7 @@
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({
7350, 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -84,15 +84,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 960000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -103,10 +103,10 @@
.build());
addParameter(
- DefineParam(mAacFormat, C2_NAME_STREAM_AAC_FORMAT_SETTING)
- .withDefault(new C2StreamAacFormatInfo::input(0u, C2AacStreamFormatRaw))
+ DefineParam(mAacFormat, C2_PARAMKEY_AAC_PACKAGING)
+ .withDefault(new C2StreamAacFormatInfo::input(0u, C2Config::AAC_PACKAGING_RAW))
.withFields({C2F(mAacFormat, value).oneOf({
- C2AacStreamFormatRaw, C2AacStreamFormatAdts
+ C2Config::AAC_PACKAGING_RAW, C2Config::AAC_PACKAGING_ADTS
})})
.withSetter(Setter<decltype(*mAacFormat)>::StrictValueWithNoDeps)
.build());
@@ -191,7 +191,7 @@
.build());
}
- bool isAdts() const { return mAacFormat->value == C2AacStreamFormatAdts; }
+ bool isAdts() const { return mAacFormat->value == C2Config::AAC_PACKAGING_ADTS; }
static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me) {
(void)mayBlock;
(void)me; // TODO: validate
@@ -205,13 +205,13 @@
int32_t getDrcEffectType() const { return mDrcEffectType->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamAacFormatInfo::input> mAacFormat;
std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
diff --git a/media/codec2/components/aac/C2SoftAacEnc.cpp b/media/codec2/components/aac/C2SoftAacEnc.cpp
index d1bdf0d..137e775 100644
--- a/media/codec2/components/aac/C2SoftAacEnc.cpp
+++ b/media/codec2/components/aac/C2SoftAacEnc.cpp
@@ -37,29 +37,29 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_AAC))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({
8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -68,15 +68,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 6)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 960000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -125,13 +125,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
};
@@ -323,8 +323,8 @@
return;
}
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(encInfo.confSize, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(encInfo.confSize, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
index c591e21..edad75a 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrDec.cpp
@@ -47,18 +47,18 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
#ifdef AMRNB
MEDIA_MIMETYPE_AUDIO_AMR_NB
#else
@@ -67,13 +67,13 @@
)).build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
#ifdef AMRNB
.withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
.withFields({C2F(mSampleRate, value).equalTo(8000)})
@@ -85,19 +85,19 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
#ifdef AMRNB
- .withDefault(new C2BitrateTuning::input(0u, 4750))
+ .withDefault(new C2StreamBitrateInfo::input(0u, 4750))
.withFields({C2F(mBitrate, value).inRange(4750, 12200)})
#else
- .withDefault(new C2BitrateTuning::input(0u, 6600))
+ .withDefault(new C2StreamBitrateInfo::input(0u, 6600))
.withFields({C2F(mBitrate, value).inRange(6600, 23850)})
#endif
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
@@ -110,13 +110,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
index 8c03257..3d3aa7d 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
@@ -36,38 +36,38 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_AMR_NB))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 8000))
.withFields({C2F(mSampleRate, value).equalTo(8000)})
.withSetter(
@@ -75,8 +75,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 4750))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 4750))
.withFields({C2F(mBitrate, value).inRange(4750, 12200)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -92,13 +92,13 @@
uint32_t getBitrate() const { return mBitrate->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
index 074493c..379cb32 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
@@ -38,38 +38,38 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_AMR_WB))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 16000))
.withFields({C2F(mSampleRate, value).equalTo(16000)})
.withSetter(
@@ -77,8 +77,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 6600))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 6600))
.withFields({C2F(mBitrate, value).inRange(6600, 23850)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -94,13 +94,13 @@
uint32_t getBitrate() const { return mBitrate->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 6be1807..4bcc2c6 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -141,7 +141,7 @@
static C2R SizeSetter(bool mayBlock,
const C2P<C2StreamPictureSizeInfo::output>& oldMe,
- C2P<C2VideoSizeStreamInfo::output>& me) {
+ C2P<C2StreamPictureSizeInfo::output>& me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -586,7 +586,7 @@
mWidth = img->d_w;
mHeight = img->d_h;
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == C2_OK) {
diff --git a/media/codec2/components/avc/C2SoftAvcDec.cpp b/media/codec2/components/avc/C2SoftAvcDec.cpp
index 86cd3d8..9290d74 100644
--- a/media/codec2/components/avc/C2SoftAvcDec.cpp
+++ b/media/codec2/components/avc/C2SoftAvcDec.cpp
@@ -198,7 +198,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -845,7 +845,7 @@
mHeight = s_decode_op.u4_pic_ht;
CHECK_EQ(0u, s_decode_op.u4_output_present);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == OK) {
diff --git a/media/codec2/components/avc/C2SoftAvcEnc.cpp b/media/codec2/components/avc/C2SoftAvcEnc.cpp
index 6ddb9ff..b851908 100644
--- a/media/codec2/components/avc/C2SoftAvcEnc.cpp
+++ b/media/codec2/components/avc/C2SoftAvcEnc.cpp
@@ -45,36 +45,36 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatVideo))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_VIDEO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_VIDEO_AVC))
.build());
addParameter(
- DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+ DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
.withConstValue(new C2StreamUsageTuning::input(
0u, (uint64_t)C2MemoryUsage::CPU_READ))
.build());
addParameter(
- DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
- .withDefault(new C2VideoSizeStreamTuning::input(0u, 320, 240))
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
.withFields({
C2F(mSize, width).inRange(2, 2560, 2),
C2F(mSize, height).inRange(2, 2560, 2),
@@ -83,7 +83,7 @@
.build());
addParameter(
- DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
.withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
// TODO: More restriction?
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -91,8 +91,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
.withSetter(BitrateSetter)
.build());
@@ -182,9 +182,9 @@
static C2R ProfileLevelSetter(
bool mayBlock,
C2P<C2StreamProfileLevelInfo::output> &me,
- const C2P<C2VideoSizeStreamTuning::input> &size,
+ const C2P<C2StreamPictureSizeInfo::input> &size,
const C2P<C2StreamFrameRateInfo::output> &frameRate,
- const C2P<C2BitrateTuning::output> &bitrate) {
+ const C2P<C2StreamBitrateInfo::output> &bitrate) {
(void)mayBlock;
if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
me.set().profile = PROFILE_AVC_CONSTRAINED_BASELINE;
@@ -325,16 +325,16 @@
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> getRequestSync_l() const { return mRequestSync; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
- std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
std::shared_ptr<C2StreamIntraRefreshTuning::output> mIntraRefresh;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
};
@@ -1332,8 +1332,8 @@
mSpsPpsHeaderReceived = true;
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(s_encode_op.s_out_buf.u4_bytes, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(s_encode_op.s_out_buf.u4_bytes, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
@@ -1492,7 +1492,7 @@
if (IV_IDR_FRAME == s_encode_op.u4_encoded_frame_type) {
ALOGV("IDR frame produced");
buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
- 0u /* stream id */, C2PictureTypeKeyFrame));
+ 0u /* stream id */, C2Config::SYNC_FRAME));
}
work->worklets.front()->output.buffers.push_back(buffer);
}
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index b158f8f..44f1fe0 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -473,7 +473,7 @@
if (!mOutputBlockPool) {
c2_status_t err = [this] {
// TODO: don't use query_vb
- C2StreamFormatConfig::output outputFormat(0u);
+ C2StreamBufferTypeSetting::output outputFormat(0u);
std::vector<std::unique_ptr<C2Param>> params;
c2_status_t err = intf()->query_vb(
{ &outputFormat },
@@ -485,7 +485,7 @@
return err;
}
C2BlockPool::local_id_t poolId =
- outputFormat.value == C2FormatVideo
+ outputFormat.value == C2BufferData::GRAPHIC
? C2BlockPool::BASIC_GRAPHIC
: C2BlockPool::BASIC_LINEAR;
if (params.size()) {
diff --git a/media/codec2/components/flac/C2SoftFlacDec.cpp b/media/codec2/components/flac/C2SoftFlacDec.cpp
index 86b16e8..10b14ce 100644
--- a/media/codec2/components/flac/C2SoftFlacDec.cpp
+++ b/media/codec2/components/flac/C2SoftFlacDec.cpp
@@ -37,44 +37,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_FLAC))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).inRange(1, 655350)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 768000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 768000))
.withFields({C2F(mBitrate, value).inRange(1, 21000000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -99,13 +99,13 @@
int32_t getPcmEncodingInfo() const { return mPcmEncodingInfo->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamPcmEncodingInfo::output> mPcmEncodingInfo;
};
diff --git a/media/codec2/components/flac/C2SoftFlacEnc.cpp b/media/codec2/components/flac/C2SoftFlacEnc.cpp
index 4ea35c2..0ce2543 100644
--- a/media/codec2/components/flac/C2SoftFlacEnc.cpp
+++ b/media/codec2/components/flac/C2SoftFlacEnc.cpp
@@ -34,38 +34,38 @@
: C2InterfaceHelper(helper) {
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_FLAC))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 44100))
.withFields({C2F(mSampleRate, value).inRange(1, 655350)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 2)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 768000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 768000))
.withFields({C2F(mBitrate, value).inRange(1, 21000000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -92,13 +92,13 @@
int32_t getPcmEncodingInfo() const { return mPcmEncodingInfo->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamPcmEncodingInfo::input> mPcmEncodingInfo;
};
@@ -223,8 +223,8 @@
}
if (!mWroteHeader) {
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(mHeaderOffset, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(mHeaderOffset, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
diff --git a/media/codec2/components/g711/C2SoftG711Dec.cpp b/media/codec2/components/g711/C2SoftG711Dec.cpp
index 1c71d45..504ca78 100644
--- a/media/codec2/components/g711/C2SoftG711Dec.cpp
+++ b/media/codec2/components/g711/C2SoftG711Dec.cpp
@@ -41,18 +41,18 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
#ifdef ALAW
MEDIA_MIMETYPE_AUDIO_G711_ALAW
#else
@@ -61,28 +61,28 @@
)).build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
.withFields({C2F(mSampleRate, value).inRange(8000, 48000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).equalTo(64000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -94,13 +94,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/gsm/C2SoftGsmDec.cpp b/media/codec2/components/gsm/C2SoftGsmDec.cpp
index 7101c79..69d4885 100644
--- a/media/codec2/components/gsm/C2SoftGsmDec.cpp
+++ b/media/codec2/components/gsm/C2SoftGsmDec.cpp
@@ -36,44 +36,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_MSGSM))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 8000))
.withFields({C2F(mSampleRate, value).equalTo(8000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).equalTo(1)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 13200))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 13200))
.withFields({C2F(mBitrate, value).equalTo(13200)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -85,13 +85,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/hevc/C2SoftHevcDec.cpp b/media/codec2/components/hevc/C2SoftHevcDec.cpp
index f0d7d88..bb8dda0 100644
--- a/media/codec2/components/hevc/C2SoftHevcDec.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcDec.cpp
@@ -192,7 +192,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -839,7 +839,7 @@
mHeight = s_decode_op.u4_pic_ht;
CHECK_EQ(0u, s_decode_op.u4_output_present);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err =
mIntf->config({&size}, C2_MAY_BLOCK, &failures);
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
index 5e6f44f..7045b6a 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -39,43 +39,44 @@
namespace android {
class C2SoftHevcEnc::IntfImpl : public C2InterfaceHelper {
- public:
+ public:
explicit IntfImpl(const std::shared_ptr<C2ReflectorHelper>& helper)
: C2InterfaceHelper(helper) {
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatVideo))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_VIDEO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_VIDEO_HEVC))
.build());
- addParameter(DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+ addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
.withConstValue(new C2StreamUsageTuning::input(
0u, (uint64_t)C2MemoryUsage::CPU_READ))
.build());
+ // matches size limits in codec library
addParameter(
- DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
- .withDefault(new C2VideoSizeStreamTuning::input(0u, 320, 240))
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
.withFields({
C2F(mSize, width).inRange(320, 1920, 2),
C2F(mSize, height).inRange(128, 1088, 2),
@@ -84,20 +85,22 @@
.build());
addParameter(
- DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
.withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
.withSetter(
Setter<decltype(*mFrameRate)>::StrictValueWithNoDeps)
.build());
+ // matches limits in codec library
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
.withSetter(BitrateSetter)
.build());
+ // matches levels allowed within codec library
addParameter(
DefineParam(mProfileLevel, C2_PARAMKEY_PROFILE_LEVEL)
.withDefault(new C2StreamProfileLevelInfo::output(
@@ -137,7 +140,7 @@
C2P<C2StreamBitrateInfo::output>& me) {
(void)mayBlock;
C2R res = C2R::Ok();
- if (me.v.value <= 4096) {
+ if (me.v.value < 4096) {
me.set().value = 4096;
}
return res;
@@ -162,9 +165,9 @@
static C2R ProfileLevelSetter(
bool mayBlock,
C2P<C2StreamProfileLevelInfo::output> &me,
- const C2P<C2VideoSizeStreamTuning::input> &size,
+ const C2P<C2StreamPictureSizeInfo::input> &size,
const C2P<C2StreamFrameRateInfo::output> &frameRate,
- const C2P<C2BitrateTuning::output> &bitrate) {
+ const C2P<C2StreamBitrateInfo::output> &bitrate) {
(void)mayBlock;
if (!me.F(me.v.profile).supportsAtAll(me.v.profile)) {
me.set().profile = PROFILE_HEVC_MAIN;
@@ -278,7 +281,7 @@
return (uint32_t)c2_max(c2_min(period + 0.5, double(UINT32_MAX)), 1.);
}
- std::shared_ptr<C2StreamPictureSizeInfo::input> getSize_l() const {
+ std::shared_ptr<C2StreamPictureSizeInfo::input> getSize_l() const {
return mSize;
}
std::shared_ptr<C2StreamFrameRateInfo::output> getFrameRate_l() const {
@@ -292,30 +295,33 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
- std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
};
+
constexpr char COMPONENT_NAME[] = "c2.android.hevc.encoder";
static size_t GetCPUCoreCount() {
- long cpuCoreCount = 1;
+ long cpuCoreCount = 0;
+
#if defined(_SC_NPROCESSORS_ONLN)
cpuCoreCount = sysconf(_SC_NPROCESSORS_ONLN);
#else
// _SC_NPROC_ONLN must be defined...
cpuCoreCount = sysconf(_SC_NPROC_ONLN);
#endif
- CHECK(cpuCoreCount >= 1);
- ALOGV("Number of CPU cores: %ld", cpuCoreCount);
+
+ if (cpuCoreCount < 1)
+ cpuCoreCount = 1;
return (size_t)cpuCoreCount;
}
@@ -383,7 +389,7 @@
c2_status_t C2SoftHevcEnc::initEncParams() {
mCodecCtx = nullptr;
- mNumCores = MIN(GetCPUCoreCount(), CODEC_MAX_CORES);
+ mNumCores = std::min(GetCPUCoreCount(), (size_t) CODEC_MAX_CORES);
memset(&mEncParams, 0, sizeof(ihevce_static_cfg_params_t));
// default configuration
@@ -397,7 +403,8 @@
mEncParams.s_src_prms.i4_width = mSize->width;
mEncParams.s_src_prms.i4_height = mSize->height;
mEncParams.s_src_prms.i4_frm_rate_denom = 1000;
- mEncParams.s_src_prms.i4_frm_rate_num = mFrameRate->value * mEncParams.s_src_prms.i4_frm_rate_denom;
+ mEncParams.s_src_prms.i4_frm_rate_num =
+ mFrameRate->value * mEncParams.s_src_prms.i4_frm_rate_denom;
mEncParams.s_tgt_lyr_prms.as_tgt_params[0].i4_quality_preset = IHEVCE_QUALITY_P5;
mEncParams.s_tgt_lyr_prms.as_tgt_params[0].ai4_tgt_bitrate[0] =
mBitrate->value;
@@ -470,7 +477,7 @@
const C2GraphicView* const input,
uint64_t timestamp) {
ihevce_static_cfg_params_t* params = &mEncParams;
- memset(ps_encode_ip, 0, sizeof(ihevce_inp_buf_t));
+ memset(ps_encode_ip, 0, sizeof(*ps_encode_ip));
if (!input) {
return C2_OK;
@@ -495,13 +502,14 @@
int32_t uStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
int32_t vStride = layout.planes[C2PlanarLayout::PLANE_V].rowInc;
- uint32_t width = mSize->width;
- uint32_t height = mSize->height;
+ const uint32_t width = mSize->width;
+ const uint32_t height = mSize->height;
- // width and height are always even
- // width and height are always even (as block size is 16x16)
- CHECK_EQ((width & 1u), 0u);
- CHECK_EQ((height & 1u), 0u);
+ // width and height must be even
+ if (width & 1u || height & 1u) {
+ ALOGW("height(%u) and width(%u) must both be even", height, width);
+ return C2_BAD_VALUE;
+ }
size_t yPlaneSize = width * height;
@@ -650,6 +658,7 @@
if (view->error() != C2_OK) {
ALOGE("graphic view map err = %d", view->error());
mSignalledError = true;
+ work->result = C2_CORRUPTED;
return;
}
}
@@ -661,8 +670,8 @@
ihevce_out_buf_t s_header_op{};
err = ihevce_encode_header(mCodecCtx, &s_header_op);
if (err == IHEVCE_EOK && s_header_op.i4_bytes_generated) {
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(
s_header_op.i4_bytes_generated, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
@@ -687,8 +696,8 @@
status = setEncodeArgs(&s_encode_ip, view.get(), timestamp);
if (C2_OK != status) {
- mSignalledError = true;
ALOGE("setEncodeArgs failed : 0x%x", status);
+ mSignalledError = true;
work->result = status;
return;
}
@@ -746,7 +755,7 @@
ALOGV("IDR frame produced");
buffer->setInfo(
std::make_shared<C2StreamPictureTypeMaskInfo::output>(
- 0u /* stream id */, C2PictureTypeKeyFrame));
+ 0u /* stream id */, C2Config::SYNC_FRAME));
}
work->worklets.front()->output.buffers.push_back(buffer);
}
@@ -761,8 +770,9 @@
: mHelper(std::static_pointer_cast<C2ReflectorHelper>(
GetCodec2PlatformComponentStore()->getParamReflector())) {}
- virtual c2_status_t createComponent(
- c2_node_id_t id, std::shared_ptr<C2Component>* const component,
+ c2_status_t createComponent(
+ c2_node_id_t id,
+ std::shared_ptr<C2Component>* const component,
std::function<void(C2Component*)> deleter) override {
*component = std::shared_ptr<C2Component>(
new C2SoftHevcEnc(
@@ -772,8 +782,9 @@
return C2_OK;
}
- virtual c2_status_t createInterface(
- c2_node_id_t id, std::shared_ptr<C2ComponentInterface>* const interface,
+ c2_status_t createInterface(
+ c2_node_id_t id,
+ std::shared_ptr<C2ComponentInterface>* const interface,
std::function<void(C2ComponentInterface*)> deleter) override {
*interface = std::shared_ptr<C2ComponentInterface>(
new SimpleInterface<C2SoftHevcEnc::IntfImpl>(
@@ -783,7 +794,7 @@
return C2_OK;
}
- virtual ~C2SoftHevcEncFactory() override = default;
+ ~C2SoftHevcEncFactory() override = default;
private:
std::shared_ptr<C2ReflectorHelper> mHelper;
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.h b/media/codec2/components/hevc/C2SoftHevcEnc.h
index c22fea2..9d90b95 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.h
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2018 The Android Open Source Project
+ * Copyright 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,18 +17,18 @@
#ifndef ANDROID_C2_SOFT_HEVC_ENC_H_
#define ANDROID_C2_SOFT_HEVC_ENC_H_
-#include <map>
-#include <utils/Vector.h>
-#include <media/stagefright/foundation/ColorUtils.h>
#include <SimpleC2Component.h>
+#include <algorithm>
+#include <map>
+#include <media/stagefright/foundation/ColorUtils.h>
+#include <utils/Vector.h>
#include "ihevc_typedefs.h"
namespace android {
-#define MIN(a, b) ((a) < (b)) ? (a) : (b)
/** Get time */
-#define GETTIME(a, b) gettimeofday(a, b);
+#define GETTIME(a, b) gettimeofday(a, b)
/** Compute difference between start and end */
#define TIME_DIFF(start, end, diff) \
@@ -55,7 +55,7 @@
const std::shared_ptr<C2BlockPool>& pool) override;
protected:
- virtual ~C2SoftHevcEnc();
+ ~C2SoftHevcEnc() override;
private:
std::shared_ptr<IntfImpl> mIntf;
diff --git a/media/codec2/components/mp3/C2SoftMp3Dec.cpp b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
index c8b8397..9db6d8f 100644
--- a/media/codec2/components/mp3/C2SoftMp3Dec.cpp
+++ b/media/codec2/components/mp3/C2SoftMp3Dec.cpp
@@ -40,29 +40,29 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_MPEG))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({8000, 11025, 12000, 16000,
22050, 24000, 32000, 44100, 48000})})
@@ -70,15 +70,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 2))
.withFields({C2F(mChannelCount, value).inRange(1, 2)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 320000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -90,13 +90,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
@@ -555,4 +555,3 @@
ALOGV("in %s", __func__);
delete factory;
}
-
diff --git a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
index da32ec0..290677e 100644
--- a/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
+++ b/media/codec2/components/mpeg2/C2SoftMpeg2Dec.cpp
@@ -180,7 +180,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -892,7 +892,7 @@
ALOGI("Configuring decoder: mWidth %d , mHeight %d ",
mWidth, mHeight);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err =
mIntf->config({&size}, C2_MAY_BLOCK, &failures);
@@ -931,7 +931,7 @@
ALOGI("Configuring decoder out: mWidth %d , mHeight %d ",
mWidth, mHeight);
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err =
mIntf->config({&size}, C2_MAY_BLOCK, &failures);
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 0b89cff..3d4a733 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -186,7 +186,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -574,7 +574,7 @@
PVSetPostProcType(mDecHandle, 0);
if (handleResChange(work)) {
ALOGI("Setting width and height");
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == OK) {
@@ -646,7 +646,7 @@
return;
} else if (resChange) {
ALOGI("Setting width and height");
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == OK) {
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
index c8796f3..89fa59d 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Enc.cpp
@@ -52,26 +52,26 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatVideo))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_VIDEO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
#ifdef MPEG4
MEDIA_MIMETYPE_VIDEO_MPEG4
#else
@@ -80,14 +80,14 @@
))
.build());
- addParameter(DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+ addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
.withConstValue(new C2StreamUsageTuning::input(
0u, (uint64_t)C2MemoryUsage::CPU_READ))
.build());
addParameter(
- DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
- .withDefault(new C2VideoSizeStreamTuning::input(0u, 176, 144))
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 176, 144))
.withFields({
#ifdef MPEG4
C2F(mSize, width).inRange(16, 176, 16),
@@ -101,7 +101,7 @@
.build());
addParameter(
- DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
.withDefault(new C2StreamFrameRateInfo::output(0u, 17.))
// TODO: More restriction?
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -110,8 +110,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(4096, 12000000)})
.withSetter(BitrateSetter)
.build());
@@ -217,14 +217,14 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
- std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
};
@@ -446,8 +446,8 @@
}
++mNumInputFrames;
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(outputSize, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(outputSize, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
@@ -595,7 +595,7 @@
work->worklets.front()->output.ordinal.timestamp = inputTimeStamp;
if (hintTrack.CodeType == 0) {
buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
- 0u /* stream id */, C2PictureTypeKeyFrame));
+ 0u /* stream id */, C2Config::SYNC_FRAME));
}
work->worklets.front()->output.buffers.push_back(buffer);
}
diff --git a/media/codec2/components/opus/C2SoftOpusDec.cpp b/media/codec2/components/opus/C2SoftOpusDec.cpp
index 3ce1fd6..7dcd53d 100644
--- a/media/codec2/components/opus/C2SoftOpusDec.cpp
+++ b/media/codec2/components/opus/C2SoftOpusDec.cpp
@@ -40,44 +40,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_OPUS))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 48000))
.withFields({C2F(mSampleRate, value).equalTo(48000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 6000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 6000))
.withFields({C2F(mBitrate, value).inRange(6000, 510000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -89,13 +89,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
@@ -252,20 +252,25 @@
const uint8_t *data = rView.data() + inOffset;
if (mInputBufferCount < 3) {
if (mInputBufferCount == 0) {
- size_t opusHeadSize = inSize;
+ size_t opusHeadSize = 0;
size_t codecDelayBufSize = 0;
size_t seekPreRollBufSize = 0;
- void *opusHeadBuf = (void *)data;
+ void *opusHeadBuf = NULL;
void *codecDelayBuf = NULL;
void *seekPreRollBuf = NULL;
- GetOpusHeaderBuffers(data, inSize, &opusHeadBuf,
- &opusHeadSize, &codecDelayBuf,
- &codecDelayBufSize, &seekPreRollBuf,
- &seekPreRollBufSize);
+ if (!GetOpusHeaderBuffers(data, inSize, &opusHeadBuf,
+ &opusHeadSize, &codecDelayBuf,
+ &codecDelayBufSize, &seekPreRollBuf,
+ &seekPreRollBufSize)) {
+ ALOGE("%s encountered error in GetOpusHeaderBuffers", __func__);
+ mSignalledError = true;
+ work->result = C2_CORRUPTED;
+ return;
+ }
if (!ParseOpusHeader((uint8_t *)opusHeadBuf, opusHeadSize, &mHeader)) {
- ALOGE("Encountered error while Parsing Opus Header.");
+ ALOGE("%s Encountered error while Parsing Opus Header.", __func__);
mSignalledError = true;
work->result = C2_CORRUPTED;
return;
@@ -304,16 +309,16 @@
return;
}
- if (codecDelayBuf && codecDelayBufSize == 8) {
+ if (codecDelayBuf && codecDelayBufSize == sizeof(uint64_t)) {
uint64_t value;
memcpy(&value, codecDelayBuf, sizeof(uint64_t));
mCodecDelay = ns_to_samples(value, kRate);
mSamplesToDiscard = mCodecDelay;
++mInputBufferCount;
}
- if (seekPreRollBuf && seekPreRollBufSize == 8) {
+ if (seekPreRollBuf && seekPreRollBufSize == sizeof(uint64_t)) {
uint64_t value;
- memcpy(&value, codecDelayBuf, sizeof(uint64_t));
+ memcpy(&value, seekPreRollBuf, sizeof(uint64_t));
mSeekPreRoll = ns_to_samples(value, kRate);
++mInputBufferCount;
}
diff --git a/media/codec2/components/opus/C2SoftOpusEnc.cpp b/media/codec2/components/opus/C2SoftOpusEnc.cpp
index 68fcea1..a0b2443 100644
--- a/media/codec2/components/opus/C2SoftOpusEnc.cpp
+++ b/media/codec2/components/opus/C2SoftOpusEnc.cpp
@@ -42,29 +42,29 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatAudio))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_OPUS))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::input(0u, 48000))
.withFields({C2F(mSampleRate, value).oneOf({
8000, 12000, 16000, 24000, 48000})})
@@ -72,15 +72,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::input(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter((Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 128000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 128000))
.withFields({C2F(mBitrate, value).inRange(500, 512000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -104,13 +104,13 @@
uint32_t getComplexity() const { return mComplexity->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::input> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::input> mChannelCount;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamComplexityTuning::output> mComplexity;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
@@ -423,8 +423,8 @@
int headerLen = WriteOpusHeaders(opusHeader, mSampleRate, header,
sizeof(header), mCodecDelay, mSeekPreRoll);
- std::unique_ptr<C2StreamCsdInfo::output> csd =
- C2StreamCsdInfo::output::AllocUnique(headerLen, 0u);
+ std::unique_ptr<C2StreamInitDataInfo::output> csd =
+ C2StreamInitDataInfo::output::AllocUnique(headerLen, 0u);
if (!csd) {
ALOGE("CSD allocation failed");
mSignalledError = true;
diff --git a/media/codec2/components/raw/C2SoftRawDec.cpp b/media/codec2/components/raw/C2SoftRawDec.cpp
index 5c83481..802caa4 100644
--- a/media/codec2/components/raw/C2SoftRawDec.cpp
+++ b/media/codec2/components/raw/C2SoftRawDec.cpp
@@ -37,44 +37,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).inRange(8000, 192000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 2))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(1, 10000000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -98,13 +98,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamPcmEncodingInfo::output> mPcmEncodingInfo;
};
diff --git a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
index 48825e4..e7393ee 100644
--- a/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
+++ b/media/codec2/components/vorbis/C2SoftVorbisDec.cpp
@@ -45,44 +45,44 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_VORBIS))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 48000))
.withFields({C2F(mSampleRate, value).inRange(8000, 96000)})
.withSetter((Setter<decltype(*mSampleRate)>::StrictValueWithNoDeps))
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(32000, 500000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -94,13 +94,13 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
};
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 9ba2362..3120f7a 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -215,7 +215,7 @@
}
static C2R SizeSetter(bool mayBlock, const C2P<C2StreamPictureSizeInfo::output> &oldMe,
- C2P<C2VideoSizeStreamInfo::output> &me) {
+ C2P<C2StreamPictureSizeInfo::output> &me) {
(void)mayBlock;
C2R res = C2R::Ok();
if (!me.F(me.v.width).supportsAtAll(me.v.width)) {
@@ -700,7 +700,7 @@
mWidth = img->d_w;
mHeight = img->d_h;
- C2VideoSizeStreamInfo::output size(0u, mWidth, mHeight);
+ C2StreamPictureSizeInfo::output size(0u, mWidth, mHeight);
std::vector<std::unique_ptr<C2SettingResult>> failures;
c2_status_t err = mIntf->config({&size}, C2_MAY_BLOCK, &failures);
if (err == C2_OK) {
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.cpp b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
index 155a84f..6509a88 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.cpp
@@ -633,7 +633,7 @@
std::shared_ptr<C2Buffer> buffer = createLinearBuffer(block);
if (encoded_packet->data.frame.flags & VPX_FRAME_IS_KEY) {
buffer->setInfo(std::make_shared<C2StreamPictureTypeMaskInfo::output>(
- 0u /* stream id */, C2PictureTypeKeyFrame));
+ 0u /* stream id */, C2Config::SYNC_FRAME));
}
work->worklets.front()->output.buffers.push_back(buffer);
work->worklets.front()->output.ordinal = work->input.ordinal;
diff --git a/media/codec2/components/vpx/C2SoftVpxEnc.h b/media/codec2/components/vpx/C2SoftVpxEnc.h
index 87ed1a9..5591a49 100644
--- a/media/codec2/components/vpx/C2SoftVpxEnc.h
+++ b/media/codec2/components/vpx/C2SoftVpxEnc.h
@@ -229,26 +229,26 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::input(0u, C2FormatVideo))
+ new C2StreamBufferTypeSetting::input(0u, C2BufferData::GRAPHIC))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
.withConstValue(
- new C2StreamFormatConfig::output(0u, C2FormatCompressed))
+ new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_VIDEO_RAW))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
#ifdef VP9
MEDIA_MIMETYPE_VIDEO_VP9
#else
@@ -257,14 +257,14 @@
))
.build());
- addParameter(DefineParam(mUsage, C2_NAME_INPUT_STREAM_USAGE_SETTING)
+ addParameter(DefineParam(mUsage, C2_PARAMKEY_INPUT_STREAM_USAGE)
.withConstValue(new C2StreamUsageTuning::input(
0u, (uint64_t)C2MemoryUsage::CPU_READ))
.build());
addParameter(
- DefineParam(mSize, C2_NAME_STREAM_VIDEO_SIZE_SETTING)
- .withDefault(new C2VideoSizeStreamTuning::input(0u, 320, 240))
+ DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
.withFields({
C2F(mSize, width).inRange(2, 2048, 2),
C2F(mSize, height).inRange(2, 2048, 2),
@@ -285,7 +285,7 @@
.build());
addParameter(
- DefineParam(mFrameRate, C2_NAME_STREAM_FRAME_RATE_SETTING)
+ DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
.withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
// TODO: More restriction?
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
@@ -312,8 +312,8 @@
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::output(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::output(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(4096, 40000000)})
.withSetter(BitrateSetter)
.build());
@@ -416,18 +416,18 @@
}
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamUsageTuning::input> mUsage;
- std::shared_ptr<C2VideoSizeStreamTuning::input> mSize;
+ std::shared_ptr<C2StreamPictureSizeInfo::input> mSize;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
std::shared_ptr<C2StreamTemporalLayeringTuning::output> mLayering;
std::shared_ptr<C2StreamIntraRefreshTuning::output> mIntraRefresh;
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
- std::shared_ptr<C2BitrateTuning::output> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
std::shared_ptr<C2StreamBitrateModeTuning::output> mBitrateMode;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
};
diff --git a/media/codec2/components/xaac/C2SoftXaacDec.cpp b/media/codec2/components/xaac/C2SoftXaacDec.cpp
index 1c0e70b..ed730c3 100644
--- a/media/codec2/components/xaac/C2SoftXaacDec.cpp
+++ b/media/codec2/components/xaac/C2SoftXaacDec.cpp
@@ -66,29 +66,29 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mInputFormat, C2_NAME_INPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::input(0u, C2FormatCompressed))
+ DefineParam(mInputFormat, C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::input(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mOutputFormat, C2_NAME_OUTPUT_STREAM_FORMAT_SETTING)
- .withConstValue(new C2StreamFormatConfig::output(0u, C2FormatAudio))
+ DefineParam(mOutputFormat, C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE)
+ .withConstValue(new C2StreamBufferTypeSetting::output(0u, C2BufferData::LINEAR))
.build());
addParameter(
- DefineParam(mInputMediaType, C2_NAME_INPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::input>(
+ DefineParam(mInputMediaType, C2_PARAMKEY_INPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::input>(
MEDIA_MIMETYPE_AUDIO_AAC))
.build());
addParameter(
- DefineParam(mOutputMediaType, C2_NAME_OUTPUT_PORT_MIME_SETTING)
- .withConstValue(AllocSharedString<C2PortMimeConfig::output>(
+ DefineParam(mOutputMediaType, C2_PARAMKEY_OUTPUT_MEDIA_TYPE)
+ .withConstValue(AllocSharedString<C2PortMediaTypeSetting::output>(
MEDIA_MIMETYPE_AUDIO_RAW))
.build());
addParameter(
- DefineParam(mSampleRate, C2_NAME_STREAM_SAMPLE_RATE_SETTING)
+ DefineParam(mSampleRate, C2_PARAMKEY_SAMPLE_RATE)
.withDefault(new C2StreamSampleRateInfo::output(0u, 44100))
.withFields({C2F(mSampleRate, value).oneOf({
7350, 8000, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000
@@ -97,15 +97,15 @@
.build());
addParameter(
- DefineParam(mChannelCount, C2_NAME_STREAM_CHANNEL_COUNT_SETTING)
+ DefineParam(mChannelCount, C2_PARAMKEY_CHANNEL_COUNT)
.withDefault(new C2StreamChannelCountInfo::output(0u, 1))
.withFields({C2F(mChannelCount, value).inRange(1, 8)})
.withSetter(Setter<decltype(*mChannelCount)>::StrictValueWithNoDeps)
.build());
addParameter(
- DefineParam(mBitrate, C2_NAME_STREAM_BITRATE_SETTING)
- .withDefault(new C2BitrateTuning::input(0u, 64000))
+ DefineParam(mBitrate, C2_PARAMKEY_BITRATE)
+ .withDefault(new C2StreamBitrateInfo::input(0u, 64000))
.withFields({C2F(mBitrate, value).inRange(8000, 960000)})
.withSetter(Setter<decltype(*mBitrate)>::NonStrictValueWithNoDeps)
.build());
@@ -116,10 +116,10 @@
.build());
addParameter(
- DefineParam(mAacFormat, C2_NAME_STREAM_AAC_FORMAT_SETTING)
- .withDefault(new C2StreamAacFormatInfo::input(0u, C2AacStreamFormatRaw))
+ DefineParam(mAacFormat, C2_PARAMKEY_AAC_PACKAGING)
+ .withDefault(new C2StreamAacFormatInfo::input(0u, C2Config::AAC_PACKAGING_RAW))
.withFields({C2F(mAacFormat, value).oneOf({
- C2AacStreamFormatRaw, C2AacStreamFormatAdts
+ C2Config::AAC_PACKAGING_RAW, C2Config::AAC_PACKAGING_ADTS
})})
.withSetter(Setter<decltype(*mAacFormat)>::StrictValueWithNoDeps)
.build());
@@ -203,7 +203,7 @@
.build());
}
- bool isAdts() const { return mAacFormat->value == C2AacStreamFormatAdts; }
+ bool isAdts() const { return mAacFormat->value == C2Config::AAC_PACKAGING_ADTS; }
uint32_t getBitrate() const { return mBitrate->value; }
static C2R ProfileLevelSetter(bool mayBlock, C2P<C2StreamProfileLevelInfo::input> &me) {
(void)mayBlock;
@@ -218,13 +218,13 @@
int32_t getDrcEffectType() const { return mDrcEffectType->value; }
private:
- std::shared_ptr<C2StreamFormatConfig::input> mInputFormat;
- std::shared_ptr<C2StreamFormatConfig::output> mOutputFormat;
- std::shared_ptr<C2PortMimeConfig::input> mInputMediaType;
- std::shared_ptr<C2PortMimeConfig::output> mOutputMediaType;
+ std::shared_ptr<C2StreamBufferTypeSetting::input> mInputFormat;
+ std::shared_ptr<C2StreamBufferTypeSetting::output> mOutputFormat;
+ std::shared_ptr<C2PortMediaTypeSetting::input> mInputMediaType;
+ std::shared_ptr<C2PortMediaTypeSetting::output> mOutputMediaType;
std::shared_ptr<C2StreamSampleRateInfo::output> mSampleRate;
std::shared_ptr<C2StreamChannelCountInfo::output> mChannelCount;
- std::shared_ptr<C2BitrateTuning::input> mBitrate;
+ std::shared_ptr<C2StreamBitrateInfo::input> mBitrate;
std::shared_ptr<C2StreamMaxBufferSizeInfo::input> mInputMaxBufSize;
std::shared_ptr<C2StreamAacFormatInfo::input> mAacFormat;
std::shared_ptr<C2StreamProfileLevelInfo::input> mProfileLevel;
@@ -1067,6 +1067,8 @@
int i_loud_norm;
int i_target_loudness;
unsigned int i_sbr_mode;
+ uint32_t ui_proc_mem_tabs_size = 0;
+ pVOID pv_alloc_ptr = NULL;
/* Sampling Frequency */
err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_CONFIG_PARAM,
@@ -1115,6 +1117,24 @@
IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE, &i_sbr_mode);
RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
+ /* Get memory info tables size */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEMTABS_SIZE, 0,
+ &ui_proc_mem_tabs_size);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEMTABS_SIZE");
+
+ pv_alloc_ptr = memalign(4, ui_proc_mem_tabs_size);
+ if (pv_alloc_ptr == NULL) {
+ ALOGE(" Cannot create requested memory %d", ui_proc_mem_tabs_size);
+ return IA_FATAL_ERROR;
+ }
+ memset(pv_alloc_ptr, 0, ui_proc_mem_tabs_size);
+ mMemoryVec.push(pv_alloc_ptr);
+
+ /* Set pointer for process memory tables */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEMTABS_PTR, 0,
+ pv_alloc_ptr);
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEMTABS_PTR");
+
err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS, nullptr);
diff --git a/media/codec2/core/Android.bp b/media/codec2/core/Android.bp
index b723755..a7e8997 100644
--- a/media/codec2/core/Android.bp
+++ b/media/codec2/core/Android.bp
@@ -7,6 +7,9 @@
cc_library_shared {
name: "libcodec2",
vendor_available: true,
+ vndk: {
+ enabled: true,
+ },
srcs: ["C2.cpp"],
diff --git a/media/codec2/core/include/C2Buffer.h b/media/codec2/core/include/C2Buffer.h
index c428122..3d3587c 100644
--- a/media/codec2/core/include/C2Buffer.h
+++ b/media/codec2/core/include/C2Buffer.h
@@ -1994,7 +1994,6 @@
GRAPHIC, ///< the buffer contains a single graphic block
GRAPHIC_CHUNKS, ///< the buffer contains one of more graphic blocks
};
- typedef type_t Type; // deprecated
/**
* Gets the type of this buffer (data).
@@ -2042,23 +2041,6 @@
*/
const C2BufferData data() const;
- /**
- * These will still work if used in onDeathNotify.
- */
-#if 0
- inline std::shared_ptr<C2LinearBuffer> asLinearBuffer() const {
- return mType == LINEAR ? std::shared_ptr::reinterpret_cast<C2LinearBuffer>(this) : nullptr;
- }
-
- inline std::shared_ptr<C2GraphicBuffer> asGraphicBuffer() const {
- return mType == GRAPHIC ? std::shared_ptr::reinterpret_cast<C2GraphicBuffer>(this) : nullptr;
- }
-
- inline std::shared_ptr<C2CircularBuffer> asCircularBuffer() const {
- return mType == CIRCULAR ? std::shared_ptr::reinterpret_cast<C2CircularBuffer>(this) : nullptr;
- }
-#endif
-
///@name Pre-destroy notification handling
///@{
@@ -2163,8 +2145,6 @@
*/
static std::shared_ptr<C2Buffer> CreateGraphicBuffer(const C2ConstGraphicBlock &block);
-
-
protected:
// no public constructor
explicit C2Buffer(const std::vector<C2ConstLinearBlock> &blocks);
@@ -2173,7 +2153,6 @@
private:
class Impl;
std::shared_ptr<Impl> mImpl;
-// Type _mType;
};
/**
@@ -2200,109 +2179,6 @@
/// @}
-/// \cond INTERNAL
-
-/// \todo These are no longer used
-
-/// \addtogroup linear
-/// @{
-
-/** \deprecated */
-class C2LinearBuffer
- : public C2Buffer, public _C2LinearRangeAspect,
- public std::enable_shared_from_this<C2LinearBuffer> {
-public:
- /** \todo what is this? */
- const C2Handle *handle() const;
-
-protected:
- inline C2LinearBuffer(const C2ConstLinearBlock &block);
-
-private:
- class Impl;
- Impl *mImpl;
-};
-
-class C2ReadCursor;
-
-class C2WriteCursor {
-public:
- uint32_t remaining() const; // remaining data to be read
- void commit(); // commits the current position. discard data before current position
- void reset() const; // resets position to the last committed position
- // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
- // sliced off.
- C2ReadCursor slice(uint32_t size) const;
- // slices off at most |size| bytes, and moves cursor ahead by the number of bytes
- // sliced off.
- C2WriteCursor reserve(uint32_t size);
- // bool read(T&);
- // bool write(T&);
- C2Fence waitForSpace(uint32_t size);
-};
-
-/// @}
-
-/// \addtogroup graphic
-/// @{
-
-struct C2ColorSpace {
-//public:
- enum Standard {
- BT601,
- BT709,
- BT2020,
- // TODO
- };
-
- enum Range {
- LIMITED,
- FULL,
- // TODO
- };
-
- enum TransferFunction {
- BT709Transfer,
- BT2020Transfer,
- HybridLogGamma2,
- HybridLogGamma4,
- // TODO
- };
-};
-
-/** \deprecated */
-class C2GraphicBuffer : public C2Buffer {
-public:
- // constant attributes
- inline uint32_t width() const { return mWidth; }
- inline uint32_t height() const { return mHeight; }
- inline uint32_t format() const { return mFormat; }
- inline const C2MemoryUsage usage() const { return mUsage; }
-
- // modifiable attributes
-
-
- virtual const C2ColorSpace colorSpace() const = 0;
- // best effort
- virtual void setColorSpace_be(const C2ColorSpace &colorSpace) = 0;
- virtual bool setColorSpace(const C2ColorSpace &colorSpace) = 0;
-
- const C2Handle *handle() const;
-
-protected:
- uint32_t mWidth;
- uint32_t mHeight;
- uint32_t mFormat;
- C2MemoryUsage mUsage;
-
- class Impl;
- Impl *mImpl;
-};
-
-/// @}
-
-/// \endcond
-
/// @}
#endif // C2BUFFER_H_
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index fb6edb6..9545c45 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -240,19 +240,6 @@
kParamIndexTimestampGapAdjustment, // input-surface, struct
kParamIndexSurfaceAllocator, // u32
-
- // deprecated indices due to renaming
- kParamIndexAacStreamFormat = kParamIndexAacPackaging,
- kParamIndexCsd = kParamIndexInitData,
- kParamIndexMaxVideoSizeHint = kParamIndexMaxPictureSize,
- kParamIndexMime = kParamIndexMediaType,
- kParamIndexRequestedInfos = kParamIndexSubscribedParamIndices,
-
-
- // deprecated indices due to removal
- kParamIndexSupportedParams = 0xDEAD0000,
- kParamIndexReadOnlyParams,
- kParamIndexTemporal,
};
}
@@ -337,14 +324,8 @@
// read-only
typedef C2GlobalParam<C2Setting, C2SimpleValueStruct<C2Component::domain_t>, kParamIndexDomain>
C2ComponentDomainSetting;
-typedef C2ComponentDomainSetting C2ComponentDomainInfo; // deprecated
-typedef C2Component::domain_t C2DomainKind; // deprecated
constexpr char C2_PARAMKEY_COMPONENT_DOMAIN[] = "component.domain";
-constexpr C2Component::domain_t C2DomainAudio = C2Component::DOMAIN_AUDIO; // deprecated
-constexpr C2Component::domain_t C2DomainOther = C2Component::DOMAIN_OTHER; // deprecate
-constexpr C2Component::domain_t C2DomainVideo = C2Component::DOMAIN_VIDEO; // deprecate
-
/**
* Component attributes.
*
@@ -359,9 +340,6 @@
C2ComponentAttributesSetting;
constexpr char C2_PARAMKEY_COMPONENT_ATTRIBUTES[] = "component.attributes";
-// deprecated
-typedef C2ComponentAttributesSetting C2ComponentTemporalInfo;
-
/**
* Time stretching.
*
@@ -707,7 +685,6 @@
typedef C2StreamParam<C2Info, C2ProfileLevelStruct, kParamIndexProfileLevel>
C2StreamProfileLevelInfo;
constexpr char C2_PARAMKEY_PROFILE_LEVEL[] = "coded.pl";
-#define C2_PARAMKEY_STREAM_PROFILE_LEVEL C2_PARAMKEY_PROFILE_LEVEL
/**
* Codec-specific initialization data.
@@ -719,9 +696,7 @@
* TODO: define for other codecs.
*/
typedef C2StreamParam<C2Info, C2BlobValue, kParamIndexInitData> C2StreamInitDataInfo;
-typedef C2StreamInitDataInfo C2StreamCsdInfo; // deprecated
constexpr char C2_PARAMKEY_INIT_DATA[] = "coded.init-data";
-#define C2_PARAMKEY_STREAM_INIT_DATA C2_PARAMKEY_INIT_DATA
/**
* Supplemental Data.
@@ -781,11 +756,8 @@
* port media type.
*/
typedef C2PortParam<C2Setting, C2StringValue, kParamIndexMediaType> C2PortMediaTypeSetting;
-typedef C2PortMediaTypeSetting C2PortMimeConfig; // deprecated
constexpr char C2_PARAMKEY_INPUT_MEDIA_TYPE[] = "input.media-type";
constexpr char C2_PARAMKEY_OUTPUT_MEDIA_TYPE[] = "output.media-type";
-#define C2_NAME_INPUT_PORT_MIME_SETTING C2_PARAMKEY_INPUT_MEDIA_TYPE
-#define C2_NAME_OUTPUT_PORT_MIME_SETTING C2_PARAMKEY_OUTPUT_MEDIA_TYPE
typedef C2StreamParam<C2Setting, C2StringValue, kParamIndexMediaType> C2StreamMediaTypeSetting;
@@ -808,24 +780,20 @@
*/
typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexDelayRequest> C2PortRequestedDelayTuning;
-typedef C2PortRequestedDelayTuning C2PortRequestedLatencyTuning; // deprecated
constexpr char C2_PARAMKEY_INPUT_DELAY_REQUEST[] = "input.delay.requested";
constexpr char C2_PARAMKEY_OUTPUT_DELAY_REQUEST[] = "output.delay.requested";
typedef C2GlobalParam<C2Tuning, C2Uint32Value, kParamIndexDelayRequest>
C2RequestedPipelineDelayTuning;
-typedef C2RequestedPipelineDelayTuning C2ComponentRequestedLatencyTuning; // deprecated
constexpr char C2_PARAMKEY_PIPELINE_DELAY_REQUEST[] = "pipeline-delay.requested";
// read-only
typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexDelay> C2PortActualDelayTuning;
-typedef C2PortActualDelayTuning C2PortLatencyInfo; // deprecated
constexpr char C2_PARAMKEY_INPUT_DELAY[] = "input.delay.actual";
constexpr char C2_PARAMKEY_OUTPUT_DELAY[] = "output.delay.actual";
// read-only
typedef C2GlobalParam<C2Tuning, C2Uint32Value, kParamIndexDelay> C2ActualPipelineDelayTuning;
-typedef C2ActualPipelineDelayTuning C2ComponentLatencyInfo; // deprecated
constexpr char C2_PARAMKEY_PIPELINE_DELAY[] = "algo.delay.actual";
/**
@@ -875,7 +843,6 @@
*/
// private
typedef C2PortParam<C2Tuning, C2Uint32Value, kParamIndexStreamCount> C2PortStreamCountTuning;
-typedef C2PortStreamCountTuning C2PortStreamCountConfig; // deprecated
constexpr char C2_PARAMKEY_INPUT_STREAM_COUNT[] = "input.stream-count";
constexpr char C2_PARAMKEY_OUTPUT_STREAM_COUNT[] = "output.stream-count";
@@ -985,20 +952,9 @@
typedef C2StreamParam<C2Setting, C2SimpleValueStruct<C2EasyEnum<C2BufferData::type_t>>,
kParamIndexBufferType>
C2StreamBufferTypeSetting;
-
-constexpr C2BufferData::type_t C2FormatAudio = C2BufferData::LINEAR; // deprecated
-constexpr C2BufferData::type_t C2FormatCompressed = C2BufferData::LINEAR; // deprecated
-constexpr C2BufferData::type_t C2FormatVideo = C2BufferData::GRAPHIC; // deprecated
-typedef C2BufferData::type_t C2FormatKind; // deprecated
-
-typedef C2StreamBufferTypeSetting C2StreamFormatConfig; // deprecated
constexpr char C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE[] = "input.buffers.type";
constexpr char C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE[] = "output.buffers.type";
-// deprecated
-#define C2_NAME_INPUT_STREAM_FORMAT_SETTING C2_PARAMKEY_INPUT_STREAM_BUFFER_TYPE
-#define C2_NAME_OUTPUT_STREAM_FORMAT_SETTING C2_PARAMKEY_OUTPUT_STREAM_BUFFER_TYPE
-
/**
* Memory usage.
*
@@ -1007,8 +963,6 @@
typedef C2StreamParam<C2Tuning, C2Uint64Value, kParamIndexUsage> C2StreamUsageTuning;
constexpr char C2_PARAMKEY_INPUT_STREAM_USAGE[] = "input.buffers.usage";
constexpr char C2_PARAMKEY_OUTPUT_STREAM_USAGE[] = "output.buffers.usage";
-// deprecated
-#define C2_NAME_INPUT_STREAM_USAGE_SETTING C2_PARAMKEY_INPUT_STREAM_USAGE
/**
* Picture (video or image frame) size.
@@ -1068,8 +1022,6 @@
constexpr char C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE[] = "input.buffers.max-size";
constexpr char C2_PARAMKEY_OUTPUT_MAX_BUFFER_SIZE[] = "output.buffers.max-size";
-#define C2_NAME_STREAM_MAX_BUFFER_SIZE_SETTING C2_PARAMKEY_INPUT_MAX_BUFFER_SIZE
-
/* ---------------------------------------- misc. state ---------------------------------------- */
/**
@@ -1170,9 +1122,7 @@
* Bitrate
*/
typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexBitrate> C2StreamBitrateInfo;
-typedef C2StreamBitrateInfo C2BitrateTuning; // deprecated
constexpr char C2_PARAMKEY_BITRATE[] = "coded.bitrate";
-#define C2_NAME_STREAM_BITRATE_SETTING C2_PARAMKEY_BITRATE
/**
* Bitrate mode.
@@ -1261,15 +1211,8 @@
*
* This is used for the output of the video decoder, and the input of the video encoder.
*/
-typedef C2PictureSizeStruct C2VideoSizeStruct; // deprecated
-
typedef C2StreamParam<C2Info, C2PictureSizeStruct, kParamIndexPictureSize> C2StreamPictureSizeInfo;
constexpr char C2_PARAMKEY_PICTURE_SIZE[] = "raw.size";
-#define C2_PARAMKEY_STREAM_PICTURE_SIZE C2_PARAMKEY_PICTURE_SIZE
-#define C2_NAME_STREAM_VIDEO_SIZE_INFO C2_PARAMKEY_PICTURE_SIZE
-typedef C2StreamPictureSizeInfo C2VideoSizeStreamInfo; // deprecated
-typedef C2StreamPictureSizeInfo C2VideoSizeStreamTuning; // deprecated
-#define C2_NAME_STREAM_VIDEO_SIZE_SETTING C2_PARAMKEY_PICTURE_SIZE
/**
* Crop rectangle.
@@ -1344,12 +1287,10 @@
kParamIndexScalingMethod>
C2StreamScalingMethodTuning;
constexpr char C2_PARAMKEY_SCALING_MODE[] = "raw.scaling-method";
-#define C2_PARAMKEY_STREAM_SCALING_MODE C2_PARAMKEY_SCALING_MODE
typedef C2StreamParam<C2Tuning, C2PictureSizeStruct, kParamIndexScaledPictureSize>
C2StreamScaledPictureSizeTuning;
constexpr char C2_PARAMKEY_SCALED_PICTURE_SIZE[] = "raw.scaled-size";
-#define C2_PARAMKEY_STREAM_SCALED_PICTURE_SIZE C2_PARAMKEY_SCALED_PICTURE_SIZE
typedef C2StreamParam<C2Tuning, C2RectStruct, kParamIndexScaledCropRect>
C2StreamScaledCropRectTuning;
@@ -1504,15 +1445,8 @@
MATRIX_BT2020_CONSTANT, ///< Rec.ITU-R BT.2020 constant luminance
MATRIX_VENDOR_START = 0x80, ///< vendor-specific matrix coefficient values start here
MATRIX_OTHER = 0xff, ///< max value, reserved for undefined values
-
- MATRIX_SMPTE240M = MATRIX_240M, // deprecated
- MATRIX_BT2020CONSTANT = MATRIX_BT2020_CONSTANT, // deprecated
)
-constexpr C2Color::matrix_t MATRIX_BT470_6M = MATRIX_FCC47_73_682; // deprecated
-constexpr C2Color::matrix_t MATRIX_BT709_5 = MATRIX_BT709; // deprecated
-constexpr C2Color::matrix_t MATRIX_BT601_6 = MATRIX_BT601; // deprecated
-
struct C2ColorAspectsStruct {
C2Color::range_t range;
C2Color::primaries_t primaries;
@@ -1635,7 +1569,6 @@
*/
typedef C2StreamParam<C2Info, C2FloatValue, kParamIndexFrameRate> C2StreamFrameRateInfo;
constexpr char C2_PARAMKEY_FRAME_RATE[] = "coded.frame-rate";
-#define C2_NAME_STREAM_FRAME_RATE_SETTING C2_PARAMKEY_FRAME_RATE
typedef C2PortParam<C2Info, C2FloatValue, kParamIndexFrameRate> C2PortFrameRateInfo;
constexpr char C2_PARAMKEY_INPUT_FRAME_RATE[] = "input.frame-rate";
@@ -1668,9 +1601,6 @@
B_FRAME = (1 << 3), ///< backward predicted (out-of-order) frame
)
-typedef C2Config::picture_type_t C2PictureTypeMask; // deprecated
-constexpr C2Config::picture_type_t C2PictureTypeKeyFrame = C2Config::SYNC_FRAME; // deprecated
-
/**
* Allowed picture types.
*/
@@ -1750,8 +1680,6 @@
typedef C2StreamParam<C2Tuning, C2Int64Value, kParamIndexSyncFrameInterval>
C2StreamSyncFrameIntervalTuning;
constexpr char C2_PARAMKEY_SYNC_FRAME_INTERVAL[] = "coding.sync-frame-interval";
-// deprecated
-#define C2_PARAMKEY_SYNC_FRAME_PERIOD C2_PARAMKEY_SYNC_FRAME_INTERVAL
/**
* Temporal layering
@@ -1885,8 +1813,6 @@
typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexSampleRate> C2StreamSampleRateInfo;
constexpr char C2_PARAMKEY_SAMPLE_RATE[] = "raw.sample-rate";
constexpr char C2_PARAMKEY_CODED_SAMPLE_RATE[] = "coded.sample-rate";
-// deprecated
-#define C2_NAME_STREAM_SAMPLE_RATE_SETTING C2_PARAMKEY_SAMPLE_RATE
/**
* Channel count.
@@ -1894,8 +1820,6 @@
typedef C2StreamParam<C2Info, C2Uint32Value, kParamIndexChannelCount> C2StreamChannelCountInfo;
constexpr char C2_PARAMKEY_CHANNEL_COUNT[] = "raw.channel-count";
constexpr char C2_PARAMKEY_CODED_CHANNEL_COUNT[] = "coded.channel-count";
-// deprecated
-#define C2_NAME_STREAM_CHANNEL_COUNT_SETTING C2_PARAMKEY_CHANNEL_COUNT
/**
* Max channel count. Used to limit the number of coded or decoded channels.
@@ -2005,16 +1929,10 @@
AAC_PACKAGING_ADTS
)
-typedef C2Config::aac_packaging_t C2AacStreamFormatKind; // deprecated
-// deprecated
-constexpr C2Config::aac_packaging_t C2AacStreamFormatRaw = C2Config::AAC_PACKAGING_RAW;
-constexpr C2Config::aac_packaging_t C2AacStreamFormatAdts = C2Config::AAC_PACKAGING_ADTS;
-
typedef C2StreamParam<C2Info, C2SimpleValueStruct<C2EasyEnum<C2Config::aac_packaging_t>>,
kParamIndexAacPackaging> C2StreamAacPackagingInfo;
typedef C2StreamAacPackagingInfo C2StreamAacFormatInfo;
constexpr char C2_PARAMKEY_AAC_PACKAGING[] = "coded.aac-packaging";
-#define C2_NAME_STREAM_AAC_FORMAT_SETTING C2_PARAMKEY_AAC_PACKAGING
/* ================================ PLATFORM-DEFINED PARAMETERS ================================ */
@@ -2134,7 +2052,6 @@
typedef C2GlobalParam<C2Tuning, C2EasyBoolValue, kParamIndexInputSurfaceEos>
C2InputSurfaceEosTuning;
constexpr char C2_PARAMKEY_INPUT_SURFACE_EOS[] = "input-surface.eos";
-#define C2_NAME_INPUT_SURFACE_EOS_TUNING C2_PARAMKEY_INPUT_SURFACE_EOS
/**
* Start/suspend/resume/stop controls and timestamps for input surface.
diff --git a/media/codec2/core/include/C2Param.h b/media/codec2/core/include/C2Param.h
index efc5c89..d264bf3 100644
--- a/media/codec2/core/include/C2Param.h
+++ b/media/codec2/core/include/C2Param.h
@@ -1012,15 +1012,6 @@
_mNamedValues(_NamedValuesGetter<B>::getNamedValues()),
_mFieldId(offset) {}
-/*
- template<typename T, typename B=typename std::remove_extent<T>::type>
- inline C2FieldDescriptor<T, B, false>(T* offset, const char *name)
- : _mType(this->GetType((B*)nullptr)),
- _mExtent(std::is_array<T>::value ? std::extent<T>::value : 1),
- _mName(name),
- _mFieldId(offset) {}
-*/
-
/// \deprecated
template<typename T, typename S, class B=typename std::remove_extent<T>::type>
inline C2FieldDescriptor(S*, T S::* field, const char *name)
diff --git a/media/codec2/hidl/1.0/utils/Android.bp b/media/codec2/hidl/1.0/utils/Android.bp
index d0296a5..f5aa65b 100644
--- a/media/codec2/hidl/1.0/utils/Android.bp
+++ b/media/codec2/hidl/1.0/utils/Android.bp
@@ -3,6 +3,9 @@
cc_library {
name: "libcodec2_hidl@1.0",
vendor_available: true,
+ vndk: {
+ enabled: true,
+ },
defaults: ["hidl_defaults"],
diff --git a/media/codec2/hidl/1.0/utils/Component.cpp b/media/codec2/hidl/1.0/utils/Component.cpp
index 0473b57..f3bf6f7 100644
--- a/media/codec2/hidl/1.0/utils/Component.cpp
+++ b/media/codec2/hidl/1.0/utils/Component.cpp
@@ -22,7 +22,6 @@
#include <codec2/hidl/1.0/ComponentStore.h>
#include <codec2/hidl/1.0/InputBufferManager.h>
-#include <android/hardware/media/c2/1.0/IInputSink.h>
#include <hidl/HidlBinderSupport.h>
#include <utils/Timers.h>
@@ -298,19 +297,12 @@
Return<void> Component::connectToInputSurface(
const sp<IInputSurface>& inputSurface,
connectToInputSurface_cb _hidl_cb) {
- sp<Sink> sink;
- {
- std::lock_guard<std::mutex> lock(mSinkMutex);
- if (!mSink) {
- mSink = new Sink(shared_from_this());
- }
- sink = mSink;
- }
Status status;
sp<IInputSurfaceConnection> connection;
- auto transStatus = inputSurface->connect(sink,
- [&status, &connection](Status s,
- const sp<IInputSurfaceConnection>& c) {
+ auto transStatus = inputSurface->connect(
+ asInputSink(),
+ [&status, &connection](
+ Status s, const sp<IInputSurfaceConnection>& c) {
status = s;
connection = c;
}
@@ -454,6 +446,14 @@
return sp<IComponentInterface>(mInterface);
}
+Return<sp<IInputSink>> Component::asInputSink() {
+ std::lock_guard<std::mutex> lock(mSinkMutex);
+ if (!mSink) {
+ mSink = new Sink(shared_from_this());
+ }
+ return {mSink};
+}
+
std::shared_ptr<C2Component> Component::findLocalComponent(
const sp<IInputSink>& sink) {
return Component::Sink::findLocalComponent(sink);
diff --git a/media/codec2/hidl/1.0/utils/InputSurface.cpp b/media/codec2/hidl/1.0/utils/InputSurface.cpp
index 2cbe64b..85c44c3 100644
--- a/media/codec2/hidl/1.0/utils/InputSurface.cpp
+++ b/media/codec2/hidl/1.0/utils/InputSurface.cpp
@@ -45,7 +45,7 @@
setDerivedInstance(this);
addParameter(
- DefineParam(mEos, C2_NAME_INPUT_SURFACE_EOS_TUNING)
+ DefineParam(mEos, C2_PARAMKEY_INPUT_SURFACE_EOS)
.withDefault(new C2InputSurfaceEosTuning(false))
.withFields({C2F(mEos, value).oneOf({true, false})})
.withSetter(EosSetter)
diff --git a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
index 1024f50..c9932ef 100644
--- a/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
+++ b/media/codec2/hidl/1.0/utils/InputSurfaceConnection.cpp
@@ -124,7 +124,7 @@
}
// TODO: read settings properly from the interface
- C2VideoSizeStreamTuning::input inputSize;
+ C2StreamPictureSizeInfo::input inputSize;
C2StreamUsageTuning::input usage;
c2_status_t c2Status = queryFromSink({ &inputSize, &usage },
{},
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
index 4ac95c5..e444013 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
@@ -26,6 +26,7 @@
#include <android/hardware/media/c2/1.0/IComponentInterface.h>
#include <android/hardware/media/c2/1.0/IComponentListener.h>
#include <android/hardware/media/c2/1.0/IComponentStore.h>
+#include <android/hardware/media/c2/1.0/IInputSink.h>
#include <hidl/Status.h>
#include <hwbinder/IBinder.h>
@@ -94,6 +95,7 @@
virtual Return<Status> reset() override;
virtual Return<Status> release() override;
virtual Return<sp<IComponentInterface>> getInterface() override;
+ virtual Return<sp<IInputSink>> asInputSink() override;
// Returns a C2Component associated to the given sink if the sink is indeed
// a local component. Returns nullptr otherwise.
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
index d3b37d7..89947d4 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioDecTest.cpp
@@ -506,16 +506,17 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-class Codec2AudioDecDecodeTest : public Codec2AudioDecHidlTest,
- public ::testing::WithParamInterface<int32_t> {
+class Codec2AudioDecDecodeTest
+ : public Codec2AudioDecHidlTest,
+ public ::testing::WithParamInterface<std::pair<int32_t, bool>> {
};
TEST_P(Codec2AudioDecDecodeTest, DecodeTest) {
description("Decodes input file");
if (mDisableTest) return;
- uint32_t streamIndex = GetParam();
- ASSERT_EQ(mComponent->start(), C2_OK);
+ uint32_t streamIndex = GetParam().first;
+ bool signalEOS = GetParam().second;
mTimestampDevTest = true;
char mURL[512], info[512];
std::ifstream eleStream, eleInfo;
@@ -567,16 +568,27 @@
ASSERT_EQ(eleStream.is_open(), true);
ASSERT_NO_FATAL_FAILURE(decodeNFrames(
mComponent, mQueueLock, mQueueCondition, mWorkQueue, mFlushedIndices,
- mLinearPool, eleStream, &Info, 0, (int)Info.size()));
+ mLinearPool, eleStream, &Info, 0, (int)Info.size(), signalEOS));
+
+ // If EOS is not sent, sending empty input with EOS flag
+ size_t infoSize = Info.size();
+ if (!signalEOS) {
+ ASSERT_NO_FATAL_FAILURE(
+ waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue, 1));
+ ASSERT_NO_FATAL_FAILURE(
+ testInputBuffer(mComponent, mQueueLock, mWorkQueue,
+ C2FrameData::FLAG_END_OF_STREAM, false));
+ infoSize += 1;
+ }
// blocking call to ensures application to Wait till all the inputs are
// consumed
ASSERT_NO_FATAL_FAILURE(
waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue));
eleStream.close();
- if (mFramesReceived != Info.size()) {
+ if (mFramesReceived != infoSize) {
ALOGE("Input buffer count and Output buffer count mismatch");
ALOGE("framesReceived : %d inputFrames : %zu", mFramesReceived,
- Info.size());
+ infoSize);
ASSERT_TRUE(false);
}
ASSERT_EQ(mEos, true);
@@ -608,9 +620,12 @@
}
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-
-INSTANTIATE_TEST_CASE_P(StreamIndexes, Codec2AudioDecDecodeTest,
- ::testing::Values(0, 1));
+// DecodeTest with StreamIndex and EOS / No EOS
+INSTANTIATE_TEST_CASE_P(StreamIndexAndEOS, Codec2AudioDecDecodeTest,
+ ::testing::Values(std::make_pair(0, false),
+ std::make_pair(0, true),
+ std::make_pair(1, false),
+ std::make_pair(1, true)));
// thumbnail test
TEST_F(Codec2AudioDecHidlTest, ThumbnailTest) {
diff --git a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
index a74d43e..0946fa6 100644
--- a/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/audio/VtsHidlC2V1_0TargetAudioEncTest.cpp
@@ -123,6 +123,7 @@
mFramesReceived = 0;
if (mCompName == unknown_comp) mDisableTest = true;
if (mDisableTest) std::cout << "[ WARN ] Test Disabled \n";
+ getInputMaxBufSize();
}
virtual void TearDown() override {
@@ -157,6 +158,7 @@
bool mDisableTest;
standardComp mCompName;
uint32_t mFramesReceived;
+ int32_t mInputMaxBufSize;
std::list<uint64_t> mFlushedIndices;
C2BlockPool::local_id_t mBlockPoolId;
@@ -175,6 +177,27 @@
static void description(const std::string& description) {
RecordProperty("description", description);
}
+
+ // In encoder components, fetch the size of input buffer allocated
+ void getInputMaxBufSize() {
+ int32_t bitStreamInfo[1] = {0};
+ std::vector<std::unique_ptr<C2Param>> inParams;
+ c2_status_t status = mComponent->query(
+ {}, {C2StreamMaxBufferSizeInfo::input::PARAM_TYPE}, C2_DONT_BLOCK,
+ &inParams);
+ if (status != C2_OK && inParams.size() == 0) {
+ ALOGE("Query MaxBufferSizeInfo failed => %d", status);
+ ASSERT_TRUE(false);
+ } else {
+ size_t offset = sizeof(C2Param);
+ for (size_t i = 0; i < inParams.size(); ++i) {
+ C2Param* param = inParams[i].get();
+ bitStreamInfo[i] = *(int32_t*)((uint8_t*)param + offset);
+ }
+ }
+ mInputMaxBufSize = bitStreamInfo[0];
+ }
+
};
void validateComponent(
@@ -355,60 +378,79 @@
ASSERT_EQ(mDisableTest, false);
}
-TEST_F(Codec2AudioEncHidlTest, EncodeTest) {
+class Codec2AudioEncEncodeTest
+ : public Codec2AudioEncHidlTest,
+ public ::testing::WithParamInterface<std::pair<bool, int32_t>> {
+};
+
+TEST_P(Codec2AudioEncEncodeTest, EncodeTest) {
ALOGV("EncodeTest");
if (mDisableTest) return;
char mURL[512];
strcpy(mURL, gEnv->getRes().c_str());
GetURLForComponent(mCompName, mURL);
+ bool signalEOS = GetParam().first;
+ // Ratio w.r.t to mInputMaxBufSize
+ int32_t inputMaxBufRatio = GetParam().second;
- // Setting default configuration
+ // Setting default sampleRate
int32_t nChannels = 2;
int32_t nSampleRate = 44100;
- int32_t samplesPerFrame = 1024;
switch (mCompName) {
case aac:
nChannels = 2;
nSampleRate = 48000;
- samplesPerFrame = 1024;
break;
case flac:
nChannels = 2;
nSampleRate = 48000;
- samplesPerFrame = 1152;
break;
case opus:
nChannels = 2;
nSampleRate = 48000;
- samplesPerFrame = 960;
break;
case amrnb:
nChannels = 1;
nSampleRate = 8000;
- samplesPerFrame = 160;
break;
case amrwb:
nChannels = 1;
nSampleRate = 16000;
- samplesPerFrame = 160;
break;
default:
ASSERT_TRUE(false);
}
+ int32_t samplesPerFrame =
+ ((mInputMaxBufSize / inputMaxBufRatio) / (nChannels * 2));
+ ALOGV("signalEOS %d mInputMaxBufSize %d samplesPerFrame %d", signalEOS,
+ mInputMaxBufSize, samplesPerFrame);
+
if (!setupConfigParam(mComponent, nChannels, nSampleRate)) {
std::cout << "[ WARN ] Test Skipped \n";
return;
}
ASSERT_EQ(mComponent->start(), C2_OK);
std::ifstream eleStream;
- uint32_t numFrames = 128;
+ uint32_t numFrames = 16;
eleStream.open(mURL, std::ifstream::binary);
ASSERT_EQ(eleStream.is_open(), true);
ALOGV("mURL : %s", mURL);
ASSERT_NO_FATAL_FAILURE(
encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
mFlushedIndices, mLinearPool, eleStream, numFrames,
- samplesPerFrame, nChannels, nSampleRate));
+ samplesPerFrame, nChannels, nSampleRate, false,
+ signalEOS));
+
+ // If EOS is not sent, sending empty input with EOS flag
+ if (!signalEOS) {
+ ASSERT_NO_FATAL_FAILURE(
+ waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue, 1));
+ ASSERT_NO_FATAL_FAILURE(
+ testInputBuffer(mComponent, mQueueLock, mWorkQueue,
+ C2FrameData::FLAG_END_OF_STREAM, false));
+ numFrames += 1;
+ }
+
// blocking call to ensures application to Wait till all the inputs are
// consumed
ASSERT_NO_FATAL_FAILURE(
@@ -429,6 +471,15 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
+// EncodeTest with EOS / No EOS and inputMaxBufRatio
+// inputMaxBufRatio is ratio w.r.t. to mInputMaxBufSize
+INSTANTIATE_TEST_CASE_P(EncodeTest, Codec2AudioEncEncodeTest,
+ ::testing::Values(std::make_pair(false, 1),
+ std::make_pair(false, 2),
+ std::make_pair(true, 1),
+ std::make_pair(true, 2)));
+
+
TEST_F(Codec2AudioEncHidlTest, EOSTest) {
description("Test empty input buffer with EOS flag");
if (mDisableTest) return;
diff --git a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
index 31da111..1f36270 100644
--- a/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/common/media_c2_hidl_test_common.cpp
@@ -93,14 +93,14 @@
std::vector<std::unique_ptr<C2SettingResult>> failures;
for (size_t i = 0; i < updates.size(); ++i) {
C2Param* param = updates[i].get();
- if (param->index() == C2StreamCsdInfo::output::PARAM_TYPE) {
+ if (param->index() == C2StreamInitDataInfo::output::PARAM_TYPE) {
csd = true;
} else if ((param->index() ==
C2StreamSampleRateInfo::output::PARAM_TYPE) ||
(param->index() ==
C2StreamChannelCountInfo::output::PARAM_TYPE) ||
(param->index() ==
- C2VideoSizeStreamInfo::output::PARAM_TYPE)) {
+ C2StreamPictureSizeInfo::output::PARAM_TYPE)) {
configParam.push_back(param);
}
}
diff --git a/media/codec2/hidl/1.0/vts/functional/master/VtsHidlC2V1_0TargetMasterTest.cpp b/media/codec2/hidl/1.0/vts/functional/master/VtsHidlC2V1_0TargetMasterTest.cpp
index 01e64cb..e88fbc7 100644
--- a/media/codec2/hidl/1.0/vts/functional/master/VtsHidlC2V1_0TargetMasterTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/master/VtsHidlC2V1_0TargetMasterTest.cpp
@@ -64,7 +64,7 @@
C2String name = mClient->getName();
EXPECT_NE(name.empty(), true) << "Invalid Codec2Client Name";
- // Get List of components from HIDL Codec2Client instance
+ // Get List of components from all known services
const std::vector<C2Component::Traits> listTraits =
mClient->ListComponents();
@@ -78,8 +78,9 @@
listener.reset(new CodecListener());
ASSERT_NE(listener, nullptr);
- mClient->createComponent(listTraits[i].name.c_str(), listener,
- &component);
+ // Create component from all known services
+ component = mClient->CreateComponentByName(
+ listTraits[i].name.c_str(), listener, &mClient);
ASSERT_NE(component, nullptr) << "Create component failed for "
<< listTraits[i].name.c_str();
}
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
index 8cbb7a7..dffcb6e 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoDecTest.cpp
@@ -421,8 +421,9 @@
ASSERT_EQ(mDisableTest, false);
}
-class Codec2VideoDecDecodeTest : public Codec2VideoDecHidlTest,
- public ::testing::WithParamInterface<int32_t> {
+class Codec2VideoDecDecodeTest
+ : public Codec2VideoDecHidlTest,
+ public ::testing::WithParamInterface<std::pair<int32_t, bool>> {
};
// Bitstream Test
@@ -430,7 +431,8 @@
description("Decodes input file");
if (mDisableTest) return;
- uint32_t streamIndex = GetParam();
+ uint32_t streamIndex = GetParam().first;
+ bool signalEOS = GetParam().second;
char mURL[512], info[512];
std::ifstream eleStream, eleInfo;
strcpy(mURL, gEnv->getRes().c_str());
@@ -464,8 +466,18 @@
ASSERT_EQ(eleStream.is_open(), true);
ASSERT_NO_FATAL_FAILURE(decodeNFrames(
mComponent, mQueueLock, mQueueCondition, mWorkQueue, mFlushedIndices,
- mLinearPool, eleStream, &Info, 0, (int)Info.size()));
+ mLinearPool, eleStream, &Info, 0, (int)Info.size(), signalEOS));
+ // If EOS is not sent, sending empty input with EOS flag
+ size_t infoSize = Info.size();
+ if (!signalEOS) {
+ ASSERT_NO_FATAL_FAILURE(
+ waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue, 1));
+ ASSERT_NO_FATAL_FAILURE(
+ testInputBuffer(mComponent, mQueueLock, mWorkQueue,
+ C2FrameData::FLAG_END_OF_STREAM, false));
+ infoSize += 1;
+ }
// blocking call to ensures application to Wait till all the inputs are
// consumed
if (!mEos) {
@@ -475,19 +487,22 @@
}
eleStream.close();
- if (mFramesReceived != Info.size()) {
+ if (mFramesReceived != infoSize) {
ALOGE("Input buffer count and Output buffer count mismatch");
ALOGV("framesReceived : %d inputFrames : %zu", mFramesReceived,
- Info.size());
+ infoSize);
ASSERT_TRUE(false);
}
if (mTimestampDevTest) EXPECT_EQ(mTimestampUslist.empty(), true);
ASSERT_EQ(mComponent->stop(), C2_OK);
}
-
-INSTANTIATE_TEST_CASE_P(StreamIndexes, Codec2VideoDecDecodeTest,
- ::testing::Values(0, 1));
+// DecodeTest with StreamIndex and EOS / No EOS
+INSTANTIATE_TEST_CASE_P(StreamIndexAndEOS, Codec2VideoDecDecodeTest,
+ ::testing::Values(std::make_pair(0, false),
+ std::make_pair(0, true),
+ std::make_pair(1, false),
+ std::make_pair(1, true)));
// Adaptive Test
TEST_F(Codec2VideoDecHidlTest, AdaptiveDecodeTest) {
diff --git a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp b/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
index 95d1b72..673dc85 100644
--- a/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
+++ b/media/codec2/hidl/1.0/vts/functional/video/VtsHidlC2V1_0TargetVideoEncTest.cpp
@@ -229,7 +229,7 @@
// Set Default config param.
bool Codec2VideoEncHidlTest::setupConfigParam(int32_t nWidth, int32_t nHeight) {
std::vector<std::unique_ptr<C2SettingResult>> failures;
- C2VideoSizeStreamTuning::input inputSize(0u, nWidth, nHeight);
+ C2StreamPictureSizeInfo::input inputSize(0u, nWidth, nHeight);
std::vector<C2Param*> configParam{&inputSize};
c2_status_t status =
mComponent->config(configParam, C2_DONT_BLOCK, &failures);
@@ -343,13 +343,18 @@
ASSERT_EQ(mDisableTest, false);
}
-TEST_F(Codec2VideoEncHidlTest, EncodeTest) {
+class Codec2VideoEncEncodeTest : public Codec2VideoEncHidlTest,
+ public ::testing::WithParamInterface<bool> {
+};
+
+TEST_P(Codec2VideoEncEncodeTest, EncodeTest) {
description("Encodes input file");
if (mDisableTest) return;
char mURL[512];
int32_t nWidth = ENC_DEFAULT_FRAME_WIDTH;
int32_t nHeight = ENC_DEFAULT_FRAME_HEIGHT;
+ bool signalEOS = GetParam();
strcpy(mURL, gEnv->getRes().c_str());
GetURLForComponent(mURL);
@@ -367,7 +372,18 @@
ASSERT_NO_FATAL_FAILURE(
encodeNFrames(mComponent, mQueueLock, mQueueCondition, mWorkQueue,
mFlushedIndices, mGraphicPool, eleStream,
- 0, ENC_NUM_FRAMES, nWidth, nHeight));
+ 0, ENC_NUM_FRAMES, nWidth, nHeight, false, signalEOS));
+
+ // If EOS is not sent, sending empty input with EOS flag
+ uint32_t inputFrames = ENC_NUM_FRAMES;
+ if (!signalEOS) {
+ ASSERT_NO_FATAL_FAILURE(
+ waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue, 1));
+ ASSERT_NO_FATAL_FAILURE(
+ testInputBuffer(mComponent, mQueueLock, mWorkQueue,
+ C2FrameData::FLAG_END_OF_STREAM, false));
+ inputFrames += 1;
+ }
// blocking call to ensures application to Wait till all the inputs are
// consumed
@@ -376,10 +392,10 @@
waitOnInputConsumption(mQueueLock, mQueueCondition, mWorkQueue));
eleStream.close();
- if (mFramesReceived != ENC_NUM_FRAMES) {
+ if (mFramesReceived != inputFrames) {
ALOGE("Input buffer count and Output buffer count mismatch");
ALOGE("framesReceived : %d inputFrames : %d", mFramesReceived,
- ENC_NUM_FRAMES);
+ inputFrames);
ASSERT_TRUE(false);
}
@@ -394,6 +410,10 @@
ASSERT_EQ(mComponent->stop(), C2_OK);
}
+// EncodeTest with EOS / No EOS
+INSTANTIATE_TEST_CASE_P(EncodeTestwithEOS, Codec2VideoEncEncodeTest,
+ ::testing::Values(true, false));
+
TEST_F(Codec2VideoEncHidlTest, EOSTest) {
description("Test empty input buffer with EOS flag");
if (mDisableTest) return;
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index dce3222..2d10c67 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -709,6 +709,49 @@
Mutexed<Config>::Locked config(mConfig);
config->mUsingSurface = surface != nullptr;
+ // Enforce required parameters
+ int32_t i32;
+ float flt;
+ if (config->mDomain & Config::IS_AUDIO) {
+ if (!msg->findInt32(KEY_SAMPLE_RATE, &i32)) {
+ ALOGD("sample rate is missing, which is required for audio components.");
+ return BAD_VALUE;
+ }
+ if (!msg->findInt32(KEY_CHANNEL_COUNT, &i32)) {
+ ALOGD("channel count is missing, which is required for audio components.");
+ return BAD_VALUE;
+ }
+ if ((config->mDomain & Config::IS_ENCODER)
+ && !mime.equalsIgnoreCase(MEDIA_MIMETYPE_AUDIO_FLAC)
+ && !msg->findInt32(KEY_BIT_RATE, &i32)
+ && !msg->findFloat(KEY_BIT_RATE, &flt)) {
+ ALOGD("bitrate is missing, which is required for audio encoders.");
+ return BAD_VALUE;
+ }
+ }
+ if (config->mDomain & (Config::IS_IMAGE | Config::IS_VIDEO)) {
+ if (!msg->findInt32(KEY_WIDTH, &i32)) {
+ ALOGD("width is missing, which is required for image/video components.");
+ return BAD_VALUE;
+ }
+ if (!msg->findInt32(KEY_HEIGHT, &i32)) {
+ ALOGD("height is missing, which is required for image/video components.");
+ return BAD_VALUE;
+ }
+ if ((config->mDomain & Config::IS_ENCODER) && (config->mDomain & Config::IS_VIDEO)) {
+ if (!msg->findInt32(KEY_BIT_RATE, &i32)
+ && !msg->findFloat(KEY_BIT_RATE, &flt)) {
+ ALOGD("bitrate is missing, which is required for video encoders.");
+ return BAD_VALUE;
+ }
+ if (!msg->findInt32(KEY_I_FRAME_INTERVAL, &i32)
+ && !msg->findFloat(KEY_I_FRAME_INTERVAL, &flt)) {
+ ALOGD("I frame interval is missing, which is required for video encoders.");
+ return BAD_VALUE;
+ }
+ }
+ }
+
/*
* Handle input surface configuration
*/
@@ -718,13 +761,14 @@
{
config->mISConfig->mMinFps = 0;
int64_t value;
- if (msg->findInt64("repeat-previous-frame-after", &value) && value > 0) {
+ if (msg->findInt64(KEY_REPEAT_PREVIOUS_FRAME_AFTER, &value) && value > 0) {
config->mISConfig->mMinFps = 1e6 / value;
}
- (void)msg->findFloat("max-fps-to-encoder", &config->mISConfig->mMaxFps);
+ (void)msg->findFloat(
+ KEY_MAX_FPS_TO_ENCODER, &config->mISConfig->mMaxFps);
config->mISConfig->mMinAdjustedFps = 0;
config->mISConfig->mFixedAdjustedFps = 0;
- if (msg->findInt64("max-pts-gap-to-encoder", &value)) {
+ if (msg->findInt64(KEY_MAX_PTS_GAP_TO_ENCODER, &value)) {
if (value < 0 && value >= INT32_MIN) {
config->mISConfig->mFixedAdjustedFps = -1e6 / value;
} else if (value > 0 && value <= INT32_MAX) {
@@ -745,7 +789,7 @@
config->mISConfig->mSuspended = false;
config->mISConfig->mSuspendAtUs = -1;
int32_t value;
- if (msg->findInt32("create-input-buffers-suspended", &value) && value) {
+ if (msg->findInt32(KEY_CREATE_INPUT_SURFACE_SUSPENDED, &value) && value) {
config->mISConfig->mSuspended = true;
}
}
@@ -1453,7 +1497,7 @@
*/
if ((config->mDomain & (Config::IS_VIDEO | Config::IS_IMAGE))
&& (config->mDomain & Config::IS_ENCODER) && config->mInputSurface && config->mISConfig) {
- (void)params->findInt64("time-offset-us", &config->mISConfig->mTimeOffsetUs);
+ (void)params->findInt64(PARAMETER_KEY_OFFSET_TIME, &config->mISConfig->mTimeOffsetUs);
if (params->findInt64("skip-frames-before", &config->mISConfig->mStartAtUs)) {
config->mISConfig->mStopped = false;
@@ -1462,10 +1506,10 @@
}
int32_t value;
- if (params->findInt32("drop-input-frames", &value)) {
+ if (params->findInt32(PARAMETER_KEY_SUSPEND, &value)) {
config->mISConfig->mSuspended = value;
config->mISConfig->mSuspendAtUs = -1;
- (void)params->findInt64("drop-start-time-us", &config->mISConfig->mSuspendAtUs);
+ (void)params->findInt64(PARAMETER_KEY_SUSPEND_TIME, &config->mISConfig->mSuspendAtUs);
}
(void)config->mInputSurface->configure(*config->mISConfig);
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index ff2419d..fb6af93 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -99,6 +99,34 @@
*/
virtual size_t numClientBuffers() const = 0;
+ void handleImageData(const sp<Codec2Buffer> &buffer) {
+ sp<ABuffer> imageDataCandidate = buffer->getImageData();
+ if (imageDataCandidate == nullptr) {
+ return;
+ }
+ sp<ABuffer> imageData;
+ if (!mFormat->findBuffer("image-data", &imageData)
+ || imageDataCandidate->size() != imageData->size()
+ || memcmp(imageDataCandidate->data(), imageData->data(), imageData->size()) != 0) {
+ ALOGD("[%s] updating image-data", mName);
+ sp<AMessage> newFormat = dupFormat();
+ newFormat->setBuffer("image-data", imageDataCandidate);
+ MediaImage2 *img = (MediaImage2*)imageDataCandidate->data();
+ if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
+ int32_t stride = img->mPlane[0].mRowInc;
+ newFormat->setInt32(KEY_STRIDE, stride);
+ ALOGD("[%s] updating stride = %d", mName, stride);
+ if (img->mNumPlanes > 1 && stride > 0) {
+ int32_t vstride = (img->mPlane[1].mOffset - img->mPlane[0].mOffset) / stride;
+ newFormat->setInt32(KEY_SLICE_HEIGHT, vstride);
+ ALOGD("[%s] updating vstride = %d", mName, vstride);
+ }
+ }
+ setFormat(newFormat);
+ buffer->setFormat(newFormat);
+ }
+ }
+
protected:
std::string mComponentName; ///< name of component for debugging
std::string mChannelName; ///< name of channel for debugging
@@ -186,7 +214,7 @@
* MediaCodec behavior.
*/
virtual status_t registerCsd(
- const C2StreamCsdInfo::output * /* csd */,
+ const C2StreamInitDataInfo::output * /* csd */,
size_t * /* index */,
sp<MediaCodecBuffer> * /* clientBuffer */) = 0;
@@ -255,34 +283,6 @@
mSkipCutBuffer = scb;
}
- void handleImageData(const sp<Codec2Buffer> &buffer) {
- sp<ABuffer> imageDataCandidate = buffer->getImageData();
- if (imageDataCandidate == nullptr) {
- return;
- }
- sp<ABuffer> imageData;
- if (!mFormat->findBuffer("image-data", &imageData)
- || imageDataCandidate->size() != imageData->size()
- || memcmp(imageDataCandidate->data(), imageData->data(), imageData->size()) != 0) {
- ALOGD("[%s] updating image-data", mName);
- sp<AMessage> newFormat = dupFormat();
- newFormat->setBuffer("image-data", imageDataCandidate);
- MediaImage2 *img = (MediaImage2*)imageDataCandidate->data();
- if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
- int32_t stride = img->mPlane[0].mRowInc;
- newFormat->setInt32(KEY_STRIDE, stride);
- ALOGD("[%s] updating stride = %d", mName, stride);
- if (img->mNumPlanes > 1 && stride > 0) {
- int32_t vstride = (img->mPlane[1].mOffset - img->mPlane[0].mOffset) / stride;
- newFormat->setInt32(KEY_SLICE_HEIGHT, vstride);
- ALOGD("[%s] updating vstride = %d", mName, vstride);
- }
- }
- setFormat(newFormat);
- buffer->setFormat(newFormat);
- }
- }
-
protected:
sp<SkipCutBuffer> mSkipCutBuffer;
@@ -783,6 +783,7 @@
status_t err = mImpl.grabBuffer(index, &c2Buffer);
if (err == OK) {
c2Buffer->setFormat(mFormat);
+ handleImageData(c2Buffer);
*buffer = c2Buffer;
return true;
}
@@ -1053,6 +1054,7 @@
return false;
}
*index = mImpl.assignSlot(newBuffer);
+ handleImageData(newBuffer);
*buffer = newBuffer;
return true;
}
@@ -1187,7 +1189,7 @@
}
status_t registerCsd(
- const C2StreamCsdInfo::output *csd,
+ const C2StreamInitDataInfo::output *csd,
size_t *index,
sp<MediaCodecBuffer> *clientBuffer) final {
sp<Codec2Buffer> c2Buffer;
@@ -1286,7 +1288,7 @@
}
status_t registerCsd(
- const C2StreamCsdInfo::output *csd,
+ const C2StreamInitDataInfo::output *csd,
size_t *index,
sp<MediaCodecBuffer> *clientBuffer) final {
sp<Codec2Buffer> newBuffer = new LocalLinearBuffer(
@@ -1592,6 +1594,7 @@
mFirstValidFrameIndex(0u),
mMetaMode(MODE_NONE),
mInputMetEos(false) {
+ mOutputSurface.lock()->maxDequeueBuffers = kSmoothnessFactor + kRenderingDepth;
Mutexed<std::unique_ptr<InputBuffers>>::Locked buffers(mInputBuffers);
buffers->reset(new DummyInputBuffers(""));
}
@@ -2153,7 +2156,7 @@
1 << C2PlatformAllocatorStore::BUFFERQUEUE);
if (inputFormat != nullptr) {
- bool graphic = (iStreamFormat.value == C2FormatVideo);
+ bool graphic = (iStreamFormat.value == C2BufferData::GRAPHIC);
std::shared_ptr<C2BlockPool> pool;
{
Mutexed<BlockPools>::Locked pools(mBlockPools);
@@ -2269,12 +2272,16 @@
uint32_t outputGeneration;
{
Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ output->maxDequeueBuffers = mNumOutputSlots + reorderDepth.value + kRenderingDepth;
outputSurface = output->surface ?
output->surface->getIGraphicBufferProducer() : nullptr;
+ if (outputSurface) {
+ output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
+ }
outputGeneration = output->generation;
}
- bool graphic = (oStreamFormat.value == C2FormatVideo);
+ bool graphic = (oStreamFormat.value == C2BufferData::GRAPHIC);
C2BlockPool::local_id_t outputPoolId_;
{
@@ -2447,7 +2454,7 @@
return OK;
}
- C2StreamFormatConfig::output oStreamFormat(0u);
+ C2StreamBufferTypeSetting::output oStreamFormat(0u);
c2_status_t err = mComponent->query({ &oStreamFormat }, {}, C2_DONT_BLOCK, nullptr);
if (err != C2_OK) {
return UNKNOWN_ERROR;
@@ -2638,6 +2645,11 @@
mReorderStash.lock()->setDepth(reorderDepth.value);
ALOGV("[%s] onWorkDone: updated reorder depth to %u",
mName, reorderDepth.value);
+ Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ output->maxDequeueBuffers = mNumOutputSlots + reorderDepth.value + kRenderingDepth;
+ if (output->surface) {
+ output->surface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
+ }
} else {
ALOGD("[%s] onWorkDone: failed to read reorder depth", mName);
}
@@ -2734,7 +2746,7 @@
// TODO: properly translate these to metadata
switch (info->coreIndex().coreIndex()) {
case C2StreamPictureTypeMaskInfo::CORE_INDEX:
- if (((C2StreamPictureTypeMaskInfo *)info.get())->value & C2PictureTypeKeyFrame) {
+ if (((C2StreamPictureTypeMaskInfo *)info.get())->value & C2Config::SYNC_FRAME) {
flags |= MediaCodec::BUFFER_FLAG_SYNCFRAME;
}
break;
@@ -2813,7 +2825,6 @@
sp<IGraphicBufferProducer> producer;
if (newSurface) {
newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
- newSurface->setMaxDequeuedBufferCount(mNumOutputSlots + kRenderingDepth);
producer = newSurface->getIGraphicBufferProducer();
producer->setGenerationNumber(generation);
} else {
@@ -2841,6 +2852,7 @@
{
Mutexed<OutputSurface>::Locked output(mOutputSurface);
+ newSurface->setMaxDequeuedBufferCount(output->maxDequeueBuffers);
output->surface = newSurface;
output->generation = generation;
}
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 9ce886a..1ea29b4 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -250,6 +250,7 @@
struct OutputSurface {
sp<Surface> surface;
uint32_t generation;
+ int maxDequeueBuffers;
};
Mutexed<OutputSurface> mOutputSurface;
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index 0a6a717..6da131f 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -101,7 +101,7 @@
{ C2Color::MATRIX_BT709, ColorAspects::MatrixBT709_5 },
{ C2Color::MATRIX_FCC47_73_682, ColorAspects::MatrixBT470_6M },
{ C2Color::MATRIX_BT601, ColorAspects::MatrixBT601_6 },
- { C2Color::MATRIX_SMPTE240M, ColorAspects::MatrixSMPTE240M },
+ { C2Color::MATRIX_240M, ColorAspects::MatrixSMPTE240M },
{ C2Color::MATRIX_BT2020, ColorAspects::MatrixBT2020 },
{ C2Color::MATRIX_BT2020_CONSTANT, ColorAspects::MatrixBT2020Constant },
{ C2Color::MATRIX_OTHER, ColorAspects::MatrixOther },
@@ -855,19 +855,19 @@
switch (primaries) {
case C2Color::PRIMARIES_BT601_525:
- *dataSpace |= (matrix == C2Color::MATRIX_SMPTE240M
+ *dataSpace |= (matrix == C2Color::MATRIX_240M
|| matrix == C2Color::MATRIX_BT709)
? HAL_DATASPACE_STANDARD_BT601_525_UNADJUSTED
: HAL_DATASPACE_STANDARD_BT601_525;
break;
case C2Color::PRIMARIES_BT601_625:
- *dataSpace |= (matrix == C2Color::MATRIX_SMPTE240M
+ *dataSpace |= (matrix == C2Color::MATRIX_240M
|| matrix == C2Color::MATRIX_BT709)
? HAL_DATASPACE_STANDARD_BT601_625_UNADJUSTED
: HAL_DATASPACE_STANDARD_BT601_625;
break;
case C2Color::PRIMARIES_BT2020:
- *dataSpace |= (matrix == C2Color::MATRIX_BT2020CONSTANT
+ *dataSpace |= (matrix == C2Color::MATRIX_BT2020_CONSTANT
? HAL_DATASPACE_STANDARD_BT2020_CONSTANT_LUMINANCE
: HAL_DATASPACE_STANDARD_BT2020);
break;
diff --git a/media/codec2/tests/C2ComponentInterface_test.cpp b/media/codec2/tests/C2ComponentInterface_test.cpp
index e907964..67f733d 100644
--- a/media/codec2/tests/C2ComponentInterface_test.cpp
+++ b/media/codec2/tests/C2ComponentInterface_test.cpp
@@ -182,9 +182,9 @@
return std::make_unique<T>();
}
-template <> std::unique_ptr<C2PortMimeConfig::input> makeParam() {
+template <> std::unique_ptr<C2PortMediaTypeSetting::input> makeParam() {
// TODO(hiroh): Set more precise length.
- return C2PortMimeConfig::input::AllocUnique(100);
+ return C2PortMediaTypeSetting::input::AllocUnique(100);
}
#define TRACED_FAILURE(func) \
@@ -323,17 +323,17 @@
EXPECT_EQ(C2SettingResult::BAD_VALUE, failures[0]->failure);
}
-// There is only used enum type for the field type, that is C2DomainKind.
+// There is only used enum type for the field type, that is C2Component::domain_t.
// If another field type is added, it is necessary to add function for that.
template <>
void C2CompIntfTest::getTestValues(
const C2FieldSupportedValues &validValueInfos,
- std::vector<C2DomainKind> *const validValues,
- std::vector<C2DomainKind> *const invalidValues) {
+ std::vector<C2Component::domain_t> *const validValues,
+ std::vector<C2Component::domain_t> *const invalidValues) {
UNUSED(validValueInfos);
- validValues->emplace_back(C2DomainVideo);
- validValues->emplace_back(C2DomainAudio);
- validValues->emplace_back(C2DomainOther);
+ validValues->emplace_back(C2Component::DOMAIN_VIDEO);
+ validValues->emplace_back(C2Component::DOMAIN_AUDIO);
+ validValues->emplace_back(C2Component::DOMAIN_OTHER);
// There is no invalid value.
UNUSED(invalidValues);
@@ -634,20 +634,20 @@
std::vector<std::shared_ptr<C2ParamDescriptor>> supportedParams;
ASSERT_EQ(C2_OK, mIntf->querySupportedParams_nb(&supportedParams));
- EACH_TEST_SELF(C2ComponentLatencyInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_SELF(C2ComponentTemporalInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2PortLatencyInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2PortLatencyInfo, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2StreamFormatConfig, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2StreamFormatConfig, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2PortStreamCountConfig, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2PortStreamCountConfig, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_SELF(C2ActualPipelineDelayTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_SELF(C2ComponentAttributesSetting, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2PortActualDelayTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2PortActualDelayTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2StreamBufferTypeSetting, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2StreamBufferTypeSetting, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2PortStreamCountTuning, TEST_U32_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2PortStreamCountTuning, TEST_U32_WRITABLE_FIELD);
- EACH_TEST_SELF(C2ComponentDomainInfo, TEST_ENUM_WRITABLE_FIELD);
+ EACH_TEST_SELF(C2ComponentDomainSetting, TEST_ENUM_WRITABLE_FIELD);
// TODO(hiroh): Support parameters based on uint32_t[] and char[].
- // EACH_TEST_INPUT(C2PortMimeConfig, TEST_STRING_WRITABLE_FIELD);
- // EACH_TEST_OUTPUT(C2PortMimeConfig, TEST_STRING_WRITABLE_FIELD);
+ // EACH_TEST_INPUT(C2PortMediaTypeSetting, TEST_STRING_WRITABLE_FIELD);
+ // EACH_TEST_OUTPUT(C2PortMediaTypeSetting, TEST_STRING_WRITABLE_FIELD);
// EACH_TEST_INPUT(C2StreamMimeConfig, TEST_STRING_WRITABLE_FIELD);
// EACH_TEST_OUTPUT(C2StreamMimeConfig, TEST_STRING_WRITABLE_FIELD);
@@ -656,10 +656,10 @@
// EACH_TEST_SELF(C2ReadOnlyParamsInfo, TEST_U32ARRAY_WRITABLE_FIELD);
// EACH_TEST_SELF(C2RequestedInfosInfo, TEST_U32ARRAY_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2VideoSizeStreamInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2VideoSizeStreamInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
- EACH_TEST_INPUT(C2VideoSizeStreamTuning, TEST_VSSTRUCT_WRITABLE_FIELD);
- EACH_TEST_OUTPUT(C2VideoSizeStreamTuning, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_INPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
+ EACH_TEST_OUTPUT(C2StreamPictureSizeInfo, TEST_VSSTRUCT_WRITABLE_FIELD);
EACH_TEST_INPUT(C2MaxVideoSizeHintPortSetting, TEST_VSSTRUCT_WRITABLE_FIELD);
EACH_TEST_OUTPUT(C2MaxVideoSizeHintPortSetting, TEST_VSSTRUCT_WRITABLE_FIELD);
diff --git a/media/codec2/tests/C2SampleComponent_test.cpp b/media/codec2/tests/C2SampleComponent_test.cpp
index cd354ad..9956834 100644
--- a/media/codec2/tests/C2SampleComponent_test.cpp
+++ b/media/codec2/tests/C2SampleComponent_test.cpp
@@ -152,7 +152,7 @@
std::unordered_map<uint32_t, C2Param &> mMyParams;
- C2ComponentDomainInfo mDomainInfo;
+ C2ComponentDomainSetting mDomainInfo;
MyComponentInstance() {
mMyParams.insert({mDomainInfo.index(), mDomainInfo});
@@ -187,12 +187,12 @@
c2_blocking_t mayBlock) const override {
(void)mayBlock;
for (C2FieldSupportedValuesQuery &query : fields) {
- if (query.field() == C2ParamField(&mDomainInfo, &C2ComponentDomainInfo::value)) {
+ if (query.field() == C2ParamField(&mDomainInfo, &C2ComponentDomainSetting::value)) {
query.values = C2FieldSupportedValues(
false /* flag */,
&mDomainInfo.value
//,
- //{(int32_t)C2DomainVideo}
+ //{(int32_t)C2Component::DOMAIN_VIDEO}
);
query.status = C2_OK;
} else {
@@ -391,20 +391,20 @@
}
TEST_F(C2SampleComponentTest, ReflectorTest) {
- C2ComponentDomainInfo domainInfo;
+ C2ComponentDomainSetting domainInfo;
std::shared_ptr<MyComponentInstance> myComp(new MyComponentInstance);
std::shared_ptr<C2ComponentInterface> comp = myComp;
std::unique_ptr<C2StructDescriptor> desc{
- myComp->getParamReflector()->describe(C2ComponentDomainInfo::CORE_INDEX)};
+ myComp->getParamReflector()->describe(C2ComponentDomainSetting::CORE_INDEX)};
dumpStruct(*desc);
std::vector<C2FieldSupportedValuesQuery> query = {
- { C2ParamField(&domainInfo, &C2ComponentDomainInfo::value),
+ { C2ParamField(&domainInfo, &C2ComponentDomainSetting::value),
C2FieldSupportedValuesQuery::CURRENT },
- C2FieldSupportedValuesQuery(C2ParamField(&domainInfo, &C2ComponentDomainInfo::value),
+ C2FieldSupportedValuesQuery(C2ParamField(&domainInfo, &C2ComponentDomainSetting::value),
C2FieldSupportedValuesQuery::CURRENT),
- C2FieldSupportedValuesQuery::Current(C2ParamField(&domainInfo, &C2ComponentDomainInfo::value)),
+ C2FieldSupportedValuesQuery::Current(C2ParamField(&domainInfo, &C2ComponentDomainSetting::value)),
};
EXPECT_EQ(C2_OK, comp->querySupportedValues_vb(query, C2_DONT_BLOCK));
diff --git a/media/codec2/vndk/Android.bp b/media/codec2/vndk/Android.bp
index e0b1355..ab6a105 100644
--- a/media/codec2/vndk/Android.bp
+++ b/media/codec2/vndk/Android.bp
@@ -14,6 +14,9 @@
cc_library_shared {
name: "libcodec2_vndk",
vendor_available: true,
+ vndk: {
+ enabled: true,
+ },
srcs: [
"C2AllocatorIon.cpp",
diff --git a/media/codec2/vndk/C2AllocatorIon.cpp b/media/codec2/vndk/C2AllocatorIon.cpp
index 736aac5..d22153d 100644
--- a/media/codec2/vndk/C2AllocatorIon.cpp
+++ b/media/codec2/vndk/C2AllocatorIon.cpp
@@ -140,6 +140,7 @@
protected:
class Impl;
+ class ImplV2;
Impl *mImpl;
// TODO: we could make this encapsulate shared_ptr and copiable
@@ -147,7 +148,7 @@
};
class C2AllocationIon::Impl {
-private:
+protected:
/**
* Constructs an ion allocation.
*
@@ -191,11 +192,7 @@
* \return created ion allocation (implementation) which may be invalid if the
* import failed.
*/
- static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id) {
- ion_user_handle_t buffer = -1;
- int ret = ion_import(ionFd, bufferFd, &buffer);
- return new Impl(ionFd, capacity, bufferFd, buffer, id, ret);
- }
+ static Impl *Import(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id);
/**
* Constructs an ion allocation by allocating an ion buffer.
@@ -209,24 +206,7 @@
* \return created ion allocation (implementation) which may be invalid if the
* allocation failed.
*/
- static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
- int bufferFd = -1;
- ion_user_handle_t buffer = -1;
- size_t alignedSize = align == 0 ? size : (size + align - 1) & ~(align - 1);
- int ret = ion_alloc(ionFd, alignedSize, align, heapMask, flags, &buffer);
- ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
- "returned (%d) ; buffer = %d",
- ionFd, alignedSize, align, heapMask, flags, ret, buffer);
- if (ret == 0) {
- // get buffer fd for native handle constructor
- ret = ion_share(ionFd, buffer, &bufferFd);
- if (ret != 0) {
- ion_free(ionFd, buffer);
- buffer = -1;
- }
- }
- return new Impl(ionFd, alignedSize, bufferFd, buffer, id, ret);
- }
+ static Impl *Alloc(int ionFd, size_t size, size_t align, unsigned heapMask, unsigned flags, C2Allocator::id_t id);
c2_status_t map(size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
(void)fence; // TODO: wait for fence
@@ -256,32 +236,7 @@
size_t mapSize = size + alignmentBytes;
Mapping map = { nullptr, alignmentBytes, mapSize };
- c2_status_t err = C2_OK;
- if (mMapFd == -1) {
- int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
- flags, mapOffset, (unsigned char**)&map.addr, &mMapFd);
- ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, "
- "offset = %zu) returned (%d)",
- mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret);
- if (ret) {
- mMapFd = -1;
- map.addr = *addr = nullptr;
- err = c2_map_errno<EINVAL>(-ret);
- } else {
- *addr = (uint8_t *)map.addr + alignmentBytes;
- }
- } else {
- map.addr = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset);
- ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
- "returned (%d)",
- mapSize, prot, flags, mMapFd, mapOffset, errno);
- if (map.addr == MAP_FAILED) {
- map.addr = *addr = nullptr;
- err = c2_map_errno<EINVAL>(errno);
- } else {
- *addr = (uint8_t *)map.addr + alignmentBytes;
- }
- }
+ c2_status_t err = mapInternal(mapSize, mapOffset, alignmentBytes, prot, flags, &(map.addr), addr);
if (map.addr) {
mMappings.push_back(map);
}
@@ -289,7 +244,7 @@
}
c2_status_t unmap(void *addr, size_t size, C2Fence *fence) {
- if (mMapFd < 0 || mMappings.empty()) {
+ if (mMappings.empty()) {
ALOGD("tried to unmap unmapped buffer");
return C2_NOT_FOUND;
}
@@ -307,14 +262,14 @@
*fence = C2Fence(); // not using fences
}
(void)mMappings.erase(it);
- ALOGV("successfully unmapped: %d", mBuffer);
+ ALOGV("successfully unmapped: %d", mHandle.bufferFd());
return C2_OK;
}
ALOGD("unmap failed to find specified map");
return C2_BAD_VALUE;
}
- ~Impl() {
+ virtual ~Impl() {
if (!mMappings.empty()) {
ALOGD("Dangling mappings!");
for (const Mapping &map : mMappings) {
@@ -326,7 +281,9 @@
mMapFd = -1;
}
if (mInit == C2_OK) {
- (void)ion_free(mIonFd, mBuffer);
+ if (mBuffer >= 0) {
+ (void)ion_free(mIonFd, mBuffer);
+ }
native_handle_close(&mHandle);
}
if (mIonFd >= 0) {
@@ -346,11 +303,42 @@
return mId;
}
- ion_user_handle_t ionHandle() const {
+ virtual ion_user_handle_t ionHandle() const {
return mBuffer;
}
-private:
+protected:
+ virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
+ int prot, int flags, void** base, void** addr) {
+ c2_status_t err = C2_OK;
+ if (mMapFd == -1) {
+ int ret = ion_map(mIonFd, mBuffer, mapSize, prot,
+ flags, mapOffset, (unsigned char**)base, &mMapFd);
+ ALOGV("ion_map(ionFd = %d, handle = %d, size = %zu, prot = %d, flags = %d, "
+ "offset = %zu) returned (%d)",
+ mIonFd, mBuffer, mapSize, prot, flags, mapOffset, ret);
+ if (ret) {
+ mMapFd = -1;
+ *base = *addr = nullptr;
+ err = c2_map_errno<EINVAL>(-ret);
+ } else {
+ *addr = (uint8_t *)*base + alignmentBytes;
+ }
+ } else {
+ *base = mmap(nullptr, mapSize, prot, flags, mMapFd, mapOffset);
+ ALOGV("mmap(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
+ "returned (%d)",
+ mapSize, prot, flags, mMapFd, mapOffset, errno);
+ if (*base == MAP_FAILED) {
+ *base = *addr = nullptr;
+ err = c2_map_errno<EINVAL>(errno);
+ } else {
+ *addr = (uint8_t *)*base + alignmentBytes;
+ }
+ }
+ return err;
+ }
+
int mIonFd;
C2HandleIon mHandle;
ion_user_handle_t mBuffer;
@@ -365,6 +353,93 @@
std::list<Mapping> mMappings;
};
+class C2AllocationIon::ImplV2 : public C2AllocationIon::Impl {
+public:
+ /**
+ * Constructs an ion allocation for platforms with new (ion_4.12.h) api
+ *
+ * \note We always create an ion allocation, even if the allocation or import fails
+ * so that we can capture the error.
+ *
+ * \param ionFd ion client (ownership transferred to created object)
+ * \param capacity size of allocation
+ * \param bufferFd buffer handle (ownership transferred to created object). Must be
+ * invalid if err is not 0.
+ * \param err errno during buffer allocation or import
+ */
+ ImplV2(int ionFd, size_t capacity, int bufferFd, C2Allocator::id_t id, int err)
+ : Impl(ionFd, capacity, bufferFd, -1 /*buffer*/, id, err) {
+ }
+
+ virtual ~ImplV2() = default;
+
+ virtual ion_user_handle_t ionHandle() const {
+ return mHandle.bufferFd();
+ }
+
+protected:
+ virtual c2_status_t mapInternal(size_t mapSize, size_t mapOffset, size_t alignmentBytes,
+ int prot, int flags, void** base, void** addr) {
+ c2_status_t err = C2_OK;
+ *base = mmap(nullptr, mapSize, prot, flags, mHandle.bufferFd(), mapOffset);
+ ALOGV("mmapV2(size = %zu, prot = %d, flags = %d, mapFd = %d, offset = %zu) "
+ "returned (%d)",
+ mapSize, prot, flags, mHandle.bufferFd(), mapOffset, errno);
+ if (*base == MAP_FAILED) {
+ *base = *addr = nullptr;
+ err = c2_map_errno<EINVAL>(errno);
+ } else {
+ *addr = (uint8_t *)*base + alignmentBytes;
+ }
+ return err;
+ }
+
+};
+
+C2AllocationIon::Impl *C2AllocationIon::Impl::Import(int ionFd, size_t capacity, int bufferFd,
+ C2Allocator::id_t id) {
+ int ret = 0;
+ if (ion_is_legacy(ionFd)) {
+ ion_user_handle_t buffer = -1;
+ ret = ion_import(ionFd, bufferFd, &buffer);
+ return new Impl(ionFd, capacity, bufferFd, buffer, id, ret);
+ } else {
+ return new ImplV2(ionFd, capacity, bufferFd, id, ret);
+ }
+}
+
+C2AllocationIon::Impl *C2AllocationIon::Impl::Alloc(int ionFd, size_t size, size_t align,
+ unsigned heapMask, unsigned flags, C2Allocator::id_t id) {
+ int bufferFd = -1;
+ ion_user_handle_t buffer = -1;
+ size_t alignedSize = align == 0 ? size : (size + align - 1) & ~(align - 1);
+ int ret;
+
+ if (ion_is_legacy(ionFd)) {
+ ret = ion_alloc(ionFd, alignedSize, align, heapMask, flags, &buffer);
+ ALOGV("ion_alloc(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
+ "returned (%d) ; buffer = %d",
+ ionFd, alignedSize, align, heapMask, flags, ret, buffer);
+ if (ret == 0) {
+ // get buffer fd for native handle constructor
+ ret = ion_share(ionFd, buffer, &bufferFd);
+ if (ret != 0) {
+ ion_free(ionFd, buffer);
+ buffer = -1;
+ }
+ }
+ return new Impl(ionFd, alignedSize, bufferFd, buffer, id, ret);
+
+ } else {
+ ret = ion_alloc_fd(ionFd, alignedSize, align, heapMask, flags, &bufferFd);
+ ALOGV("ion_alloc_fd(ionFd = %d, size = %zu, align = %zu, prot = %d, flags = %d) "
+ "returned (%d) ; bufferFd = %d",
+ ionFd, alignedSize, align, heapMask, flags, ret, bufferFd);
+
+ return new ImplV2(ionFd, alignedSize, bufferFd, id, ret);
+ }
+}
+
c2_status_t C2AllocationIon::map(
size_t offset, size_t size, C2MemoryUsage usage, C2Fence *fence, void **addr) {
return mImpl->map(offset, size, usage, fence, addr);
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index 32588a5..e075849 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -712,8 +712,8 @@
}
uint32_t mediaTypeIndex =
- traits->kind == C2Component::KIND_ENCODER ? C2PortMimeConfig::output::PARAM_TYPE
- : C2PortMimeConfig::input::PARAM_TYPE;
+ traits->kind == C2Component::KIND_ENCODER ? C2PortMediaTypeSetting::output::PARAM_TYPE
+ : C2PortMediaTypeSetting::input::PARAM_TYPE;
std::vector<std::unique_ptr<C2Param>> params;
res = intf->query_vb({}, { mediaTypeIndex }, C2_MAY_BLOCK, ¶ms);
if (res != C2_OK) {
@@ -724,7 +724,7 @@
ALOGD("failed to query interface: unexpected vector size: %zu", params.size());
return mInit;
}
- C2PortMimeConfig *mediaTypeConfig = C2PortMimeConfig::From(params[0].get());
+ C2PortMediaTypeSetting *mediaTypeConfig = C2PortMediaTypeSetting::From(params[0].get());
if (mediaTypeConfig == nullptr) {
ALOGD("failed to query media type");
return mInit;
diff --git a/media/extractors/mkv/MatroskaExtractor.cpp b/media/extractors/mkv/MatroskaExtractor.cpp
index 4200a46..f8aa784 100644
--- a/media/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/extractors/mkv/MatroskaExtractor.cpp
@@ -119,6 +119,9 @@
const mkvparser::BlockEntry *mBlockEntry;
long mBlockEntryIndex;
+ unsigned long mTrackType;
+ void seekwithoutcue_l(int64_t seekTimeUs, int64_t *actualFrameTimeUs);
+
void advance_l();
BlockIterator(const BlockIterator &);
@@ -290,6 +293,7 @@
mCluster(NULL),
mBlockEntry(NULL),
mBlockEntryIndex(0) {
+ mTrackType = mExtractor->mSegment->GetTracks()->GetTrackByNumber(trackNum)->GetType();
reset();
}
@@ -442,12 +446,14 @@
}
if (!pCues) {
- ALOGE("No Cues in file");
+ ALOGV("No Cues in file,seek without cue data");
+ seekwithoutcue_l(seekTimeUs, actualFrameTimeUs);
return;
}
}
else if (!pSH) {
- ALOGE("No SeekHead");
+ ALOGV("No SeekHead, seek without cue data");
+ seekwithoutcue_l(seekTimeUs, actualFrameTimeUs);
return;
}
@@ -456,7 +462,9 @@
while (!pCues->DoneParsing()) {
pCues->LoadCuePoint();
pCP = pCues->GetLast();
- CHECK(pCP);
+ ALOGV("pCP = %s", pCP == NULL ? "NULL" : "not NULL");
+ if (pCP == NULL)
+ continue;
size_t trackCount = mExtractor->mTracks.size();
for (size_t index = 0; index < trackCount; ++index) {
@@ -494,6 +502,7 @@
// Always *search* based on the video track, but finalize based on mTrackNum
if (!pTP) {
ALOGE("Did not locate the video track for seeking");
+ seekwithoutcue_l(seekTimeUs, actualFrameTimeUs);
return;
}
@@ -537,6 +546,31 @@
return (mBlockEntry->GetBlock()->GetTime(mCluster) + 500ll) / 1000ll;
}
+void BlockIterator::seekwithoutcue_l(int64_t seekTimeUs, int64_t *actualFrameTimeUs) {
+ mCluster = mExtractor->mSegment->FindCluster(seekTimeUs * 1000ll);
+ const long status = mCluster->GetFirst(mBlockEntry);
+ if (status < 0) { // error
+ ALOGE("get last blockenry failed!");
+ mCluster = NULL;
+ return;
+ }
+ mBlockEntryIndex = 0;
+ while (!eos() && ((block()->GetTrackNumber() != mTrackNum) || (blockTimeUs() < seekTimeUs))) {
+ advance_l();
+ }
+
+ // video track will seek to the next key frame.
+ if (mTrackType == 1) {
+ while (!eos() && ((block()->GetTrackNumber() != mTrackNum) ||
+ !mBlockEntry->GetBlock()->IsKey())) {
+ advance_l();
+ }
+ }
+ *actualFrameTimeUs = blockTimeUs();
+ ALOGV("seekTimeUs:%lld, actualFrameTimeUs:%lld, tracknum:%lld",
+ (long long)seekTimeUs, (long long)*actualFrameTimeUs, (long long)mTrackNum);
+}
+
////////////////////////////////////////////////////////////////////////////////
static unsigned U24_AT(const uint8_t *ptr) {
@@ -924,6 +958,59 @@
////////////////////////////////////////////////////////////////////////////////
+// trans all FOURCC to lower char
+static uint32_t FourCCtoLower(uint32_t fourcc) {
+ uint8_t ch_1 = tolower((fourcc >> 24) & 0xff);
+ uint8_t ch_2 = tolower((fourcc >> 16) & 0xff);
+ uint8_t ch_3 = tolower((fourcc >> 8) & 0xff);
+ uint8_t ch_4 = tolower((fourcc) & 0xff);
+ uint32_t fourcc_out = ch_1 << 24 | ch_2 << 16 | ch_3 << 8 | ch_4;
+
+ return fourcc_out;
+}
+
+static const char *MKVFourCC2MIME(uint32_t fourcc) {
+ ALOGV("MKVFourCC2MIME fourcc 0x%8.8x", fourcc);
+ uint32_t lowerFourcc = FourCCtoLower(fourcc);
+ switch (lowerFourcc) {
+ case FOURCC("mp4v"):
+ return MEDIA_MIMETYPE_VIDEO_MPEG4;
+
+ case FOURCC("s263"):
+ case FOURCC("h263"):
+ return MEDIA_MIMETYPE_VIDEO_H263;
+
+ case FOURCC("avc1"):
+ case FOURCC("h264"):
+ return MEDIA_MIMETYPE_VIDEO_AVC;
+
+ case FOURCC("mpg2"):
+ return MEDIA_MIMETYPE_VIDEO_MPEG2;
+
+ case FOURCC("xvid"):
+ return MEDIA_MIMETYPE_VIDEO_XVID;
+
+ case FOURCC("divx"):
+ case FOURCC("dx50"):
+ return MEDIA_MIMETYPE_VIDEO_DIVX;
+
+ case FOURCC("div3"):
+ case FOURCC("div4"):
+ return MEDIA_MIMETYPE_VIDEO_DIVX3;
+
+ case FOURCC("mjpg"):
+ case FOURCC("mppg"):
+ return MEDIA_MIMETYPE_VIDEO_MJPEG;
+
+ default:
+ char fourccString[5];
+ MakeFourCCString(fourcc, fourccString);
+ ALOGW("mkv unsupport fourcc %s", fourccString);
+ return "";
+ }
+}
+
+
MatroskaExtractor::MatroskaExtractor(DataSourceHelper *source)
: mDataSource(source),
mReader(new DataSourceBaseReader(mDataSource)),
@@ -956,17 +1043,56 @@
return;
}
- // from mkvparser::Segment::Load(), but stop at first cluster
- ret = mSegment->ParseHeaders();
- if (ret == 0) {
- long len;
- ret = mSegment->LoadCluster(pos, len);
- if (ret >= 1) {
- // no more clusters
- ret = 0;
+ if (mIsLiveStreaming) {
+ // from mkvparser::Segment::Load(), but stop at first cluster
+ ret = mSegment->ParseHeaders();
+ if (ret == 0) {
+ long len;
+ ret = mSegment->LoadCluster(pos, len);
+ if (ret >= 1) {
+ // no more clusters
+ ret = 0;
+ }
+ } else if (ret > 0) {
+ ret = mkvparser::E_BUFFER_NOT_FULL;
}
- } else if (ret > 0) {
- ret = mkvparser::E_BUFFER_NOT_FULL;
+ } else {
+ ret = mSegment->ParseHeaders();
+ if (ret < 0) {
+ ALOGE("Segment parse header return fail %lld", ret);
+ delete mSegment;
+ mSegment = NULL;
+ return;
+ } else if (ret == 0) {
+ const mkvparser::Cues* mCues = mSegment->GetCues();
+ const mkvparser::SeekHead* mSH = mSegment->GetSeekHead();
+ if ((mCues == NULL) && (mSH != NULL)) {
+ size_t count = mSH->GetCount();
+ const mkvparser::SeekHead::Entry* mEntry;
+ for (size_t index = 0; index < count; index++) {
+ mEntry = mSH->GetEntry(index);
+ if (mEntry->id == 0x0C53BB6B) { // Cues ID
+ long len;
+ long long pos;
+ mSegment->ParseCues(mEntry->pos, pos, len);
+ mCues = mSegment->GetCues();
+ ALOGV("find cue data by seekhead");
+ break;
+ }
+ }
+ }
+
+ if (mCues) {
+ long len;
+ ret = mSegment->LoadCluster(pos, len);
+ ALOGV("has Cue data, Cluster num=%ld", mSegment->GetCount());
+ } else {
+ long status_Load = mSegment->Load();
+ ALOGW("no Cue data,Segment Load status:%ld",status_Load);
+ }
+ } else if (ret > 0) {
+ ret = mkvparser::E_BUFFER_NOT_FULL;
+ }
}
if (ret < 0) {
@@ -1235,6 +1361,89 @@
return OK;
}
+status_t MatroskaExtractor::synthesizeMPEG2(TrackInfo *trackInfo, size_t index) {
+ ALOGV("synthesizeMPEG2");
+ BlockIterator iter(this, trackInfo->mTrackNum, index);
+ if (iter.eos()) {
+ return ERROR_MALFORMED;
+ }
+
+ const mkvparser::Block *block = iter.block();
+ if (block->GetFrameCount() <= 0) {
+ return ERROR_MALFORMED;
+ }
+
+ const mkvparser::Block::Frame &frame = block->GetFrame(0);
+ auto tmpData = heapbuffer<unsigned char>(frame.len);
+ long n = frame.Read(mReader, tmpData.get());
+ if (n != 0) {
+ return ERROR_MALFORMED;
+ }
+
+ size_t header_start = 0;
+ size_t header_lenth = 0;
+ for (header_start = 0; header_start < frame.len - 4; header_start++) {
+ if (ntohl(0x000001b3) == *(uint32_t*)((uint8_t*)tmpData.get() + header_start)) {
+ break;
+ }
+ }
+ bool isComplete_csd = false;
+ for (header_lenth = 0; header_lenth < frame.len - 4 - header_start; header_lenth++) {
+ if (ntohl(0x000001b8) == *(uint32_t*)((uint8_t*)tmpData.get()
+ + header_start + header_lenth)) {
+ isComplete_csd = true;
+ break;
+ }
+ }
+ if (!isComplete_csd) {
+ ALOGE("can't parse complete csd for MPEG2!");
+ return ERROR_MALFORMED;
+ }
+ addESDSFromCodecPrivate(trackInfo->mMeta, false,
+ (uint8_t*)(tmpData.get()) + header_start, header_lenth);
+
+ return OK;
+
+}
+
+status_t MatroskaExtractor::synthesizeMPEG4(TrackInfo *trackInfo, size_t index) {
+ ALOGV("synthesizeMPEG4");
+ BlockIterator iter(this, trackInfo->mTrackNum, index);
+ if (iter.eos()) {
+ return ERROR_MALFORMED;
+ }
+
+ const mkvparser::Block *block = iter.block();
+ if (block->GetFrameCount() <= 0) {
+ return ERROR_MALFORMED;
+ }
+
+ const mkvparser::Block::Frame &frame = block->GetFrame(0);
+ auto tmpData = heapbuffer<unsigned char>(frame.len);
+ long n = frame.Read(mReader, tmpData.get());
+ if (n != 0) {
+ return ERROR_MALFORMED;
+ }
+
+ size_t vosend;
+ bool isComplete_csd = false;
+ for (vosend = 0; (long)vosend < frame.len - 4; vosend++) {
+ if (ntohl(0x000001b6) == *(uint32_t*)((uint8_t*)tmpData.get() + vosend)) {
+ isComplete_csd = true;
+ break; // Send VOS until VOP
+ }
+ }
+ if (!isComplete_csd) {
+ ALOGE("can't parse complete csd for MPEG4!");
+ return ERROR_MALFORMED;
+ }
+ addESDSFromCodecPrivate(trackInfo->mMeta, false, tmpData.get(), vosend);
+
+ return OK;
+
+}
+
+
static inline bool isValidInt32ColourValue(long long value) {
return value != mkvparser::Colour::kValueNotPresent
&& value >= INT32_MIN
@@ -1417,6 +1626,8 @@
status_t err = OK;
int32_t nalSize = -1;
+ bool isSetCsdFrom1stFrame = false;
+
switch (track->GetType()) {
case VIDEO_TRACK:
{
@@ -1443,15 +1654,15 @@
continue;
}
} else if (!strcmp("V_MPEG4/ISO/ASP", codecID)) {
+ AMediaFormat_setString(meta,
+ AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_MPEG4);
if (codecPrivateSize > 0) {
- AMediaFormat_setString(meta,
- AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_MPEG4);
addESDSFromCodecPrivate(
meta, false, codecPrivate, codecPrivateSize);
} else {
ALOGW("%s is detected, but does not have configuration.",
codecID);
- continue;
+ isSetCsdFrom1stFrame = true;
}
} else if (!strcmp("V_VP8", codecID)) {
AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_VP8);
@@ -1465,6 +1676,49 @@
}
} else if (!strcmp("V_AV1", codecID)) {
AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_VIDEO_AV1);
+ } else if (!strcmp("V_MPEG2", codecID) || !strcmp("V_MPEG1", codecID)) {
+ AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME,
+ MEDIA_MIMETYPE_VIDEO_MPEG2);
+ if (codecPrivate != NULL) {
+ addESDSFromCodecPrivate(meta, false, codecPrivate, codecPrivateSize);
+ } else {
+ ALOGW("No specific codec private data, find it from the first frame");
+ isSetCsdFrom1stFrame = true;
+ }
+ } else if (!strcmp("V_MJPEG", codecID)) {
+ AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME,
+ MEDIA_MIMETYPE_VIDEO_MJPEG);
+ } else if (!strcmp("V_MS/VFW/FOURCC", codecID)) {
+ if (NULL == codecPrivate ||codecPrivateSize < 20) {
+ ALOGE("V_MS/VFW/FOURCC has no valid private data(%p),codecPrivateSize:%zu",
+ codecPrivate, codecPrivateSize);
+ continue;
+ } else {
+ uint32_t fourcc = *(uint32_t *)(codecPrivate + 16);
+ fourcc = ntohl(fourcc);
+ const char* mime = MKVFourCC2MIME(fourcc);
+ ALOGV("V_MS/VFW/FOURCC type is %s", mime);
+ if (!strncasecmp("video/", mime, 6)) {
+ AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, mime);
+ } else {
+ ALOGE("V_MS/VFW/FOURCC continue,unsupport video type=%s,fourcc=0x%08x.",
+ mime, fourcc);
+ continue;
+ }
+ if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_AVC) ||
+ !strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
+ !strcmp(mime, MEDIA_MIMETYPE_VIDEO_XVID) ||
+ !strcmp(mime, MEDIA_MIMETYPE_VIDEO_DIVX) ||
+ !strcmp(mime, MEDIA_MIMETYPE_VIDEO_DIVX3) ||
+ !strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2) ||
+ !strcmp(mime, MEDIA_MIMETYPE_VIDEO_H263)) {
+ isSetCsdFrom1stFrame = true;
+ } else {
+ ALOGW("FourCC have unsupport codec, type=%s,fourcc=0x%08x.",
+ mime, fourcc);
+ continue;
+ }
+ }
} else {
ALOGW("%s is not supported.", codecID);
continue;
@@ -1557,6 +1811,21 @@
} else if (!strcmp("A_FLAC", codecID)) {
AMediaFormat_setString(meta, AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_FLAC);
err = addFlacMetadata(meta, codecPrivate, codecPrivateSize);
+ } else if ((!strcmp("A_MS/ACM", codecID))) {
+ if ((NULL == codecPrivate) || (codecPrivateSize < 30)) {
+ ALOGW("unsupported audio: A_MS/ACM has no valid private data: %s, size: %zu",
+ codecPrivate == NULL ? "null" : "non-null", codecPrivateSize);
+ continue;
+ } else {
+ uint16_t ID = *(uint16_t *)codecPrivate;
+ if (ID == 0x0055) {
+ AMediaFormat_setString(meta,
+ AMEDIAFORMAT_KEY_MIME, MEDIA_MIMETYPE_AUDIO_MPEG);
+ } else {
+ ALOGW("A_MS/ACM unsupported type , continue");
+ continue;
+ }
+ }
} else {
ALOGW("%s is not supported.", codecID);
continue;
@@ -1593,13 +1862,35 @@
initTrackInfo(track, meta, trackInfo);
trackInfo->mNalLengthSize = nalSize;
- if (!strcmp("V_MPEG4/ISO/AVC", codecID) && codecPrivateSize == 0) {
+ const char *mimetype = "";
+ AMediaFormat_getString(meta, AMEDIAFORMAT_KEY_MIME, &mimetype);
+
+ if ((!strcmp("V_MPEG4/ISO/AVC", codecID) && codecPrivateSize == 0) ||
+ (!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_AVC) && isSetCsdFrom1stFrame)) {
// Attempt to recover from AVC track without codec private data
err = synthesizeAVCC(trackInfo, n);
if (err != OK) {
mTracks.pop();
}
+ } else if ((!strcmp("V_MPEG2", codecID) && codecPrivateSize == 0) ||
+ (!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_MPEG2) && isSetCsdFrom1stFrame)) {
+ // Attempt to recover from MPEG2 track without codec private data
+ err = synthesizeMPEG2(trackInfo, n);
+ if (err != OK) {
+ mTracks.pop();
+ }
+ } else if ((!strcmp("V_MPEG4/ISO/ASP", codecID) && codecPrivateSize == 0) ||
+ (!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_MPEG4) && isSetCsdFrom1stFrame) ||
+ (!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_XVID) && isSetCsdFrom1stFrame) ||
+ (!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_DIVX) && isSetCsdFrom1stFrame) ||
+ (!strcmp(mimetype, MEDIA_MIMETYPE_VIDEO_DIVX3) && isSetCsdFrom1stFrame)) {
+ // Attempt to recover from MPEG4 track without codec private data
+ err = synthesizeMPEG4(trackInfo, n);
+ if (err != OK) {
+ mTracks.pop();
+ }
}
+
}
}
diff --git a/media/extractors/mkv/MatroskaExtractor.h b/media/extractors/mkv/MatroskaExtractor.h
index 3871bdf..d53d9e3 100644
--- a/media/extractors/mkv/MatroskaExtractor.h
+++ b/media/extractors/mkv/MatroskaExtractor.h
@@ -95,6 +95,8 @@
int64_t mSeekPreRollNs;
status_t synthesizeAVCC(TrackInfo *trackInfo, size_t index);
+ status_t synthesizeMPEG2(TrackInfo *trackInfo, size_t index);
+ status_t synthesizeMPEG4(TrackInfo *trackInfo, size_t index);
status_t initTrackInfo(
const mkvparser::Track *track,
AMediaFormat *meta,
diff --git a/media/extractors/mp4/AC4Parser.cpp b/media/extractors/mp4/AC4Parser.cpp
index 59a2e9b..13d60c8 100644
--- a/media/extractors/mp4/AC4Parser.cpp
+++ b/media/extractors/mp4/AC4Parser.cpp
@@ -260,7 +260,7 @@
int32_t short_program_id = -1;
if (bitstream_version > 1) {
- if (ac4_dsi_version == 0){
+ if (ac4_dsi_version == 0) {
ALOGE("invalid ac4 dsi");
return false;
}
@@ -295,6 +295,7 @@
bool b_single_substream_group = false;
uint32_t presentation_config = 0, presentation_version = 0;
uint32_t pres_bytes = 0;
+ uint64_t start = 0;
if (ac4_dsi_version == 0) {
CHECK_BITS_LEFT(1 + 5 + 5);
@@ -315,6 +316,8 @@
mBitReader.skipBits(pres_bytes * 8);
continue;
}
+ /* record a marker, less the size of the presentation_config */
+ start = (mDSISize - mBitReader.numBitsLeft()) / 8;
// ac4_presentation_v0_dsi(), ac4_presentation_v1_dsi() and ac4_presentation_v2_dsi()
// all start with a presentation_config of 5 bits
CHECK_BITS_LEFT(5);
@@ -338,9 +341,6 @@
(presentation_config >= NELEM(PresentationConfig) ?
"reserved" : PresentationConfig[presentation_config]));
- /* record a marker, less the size of the presentation_config */
- uint64_t start = (mDSISize - mBitReader.numBitsLeft()) / 8;
-
bool b_add_emdf_substreams = false;
if (!b_single_substream_group && presentation_config == 6) {
b_add_emdf_substreams = true;
@@ -535,14 +535,14 @@
}
break;
}
- CHECK_BITS_LEFT(1 + 1);
- bool b_pre_virtualized = (mBitReader.getBits(1) == 1);
- mPresentations[presentation].mPreVirtualized = b_pre_virtualized;
- b_add_emdf_substreams = (mBitReader.getBits(1) == 1);
- ALOGV("%u: b_pre_virtualized = %s\n", presentation, BOOLSTR(b_pre_virtualized));
- ALOGV("%u: b_add_emdf_substreams = %s\n", presentation,
- BOOLSTR(b_add_emdf_substreams));
}
+ CHECK_BITS_LEFT(1 + 1);
+ bool b_pre_virtualized = (mBitReader.getBits(1) == 1);
+ mPresentations[presentation].mPreVirtualized = b_pre_virtualized;
+ b_add_emdf_substreams = (mBitReader.getBits(1) == 1);
+ ALOGV("%u: b_pre_virtualized = %s\n", presentation, BOOLSTR(b_pre_virtualized));
+ ALOGV("%u: b_add_emdf_substreams = %s\n", presentation,
+ BOOLSTR(b_add_emdf_substreams));
}
if (b_add_emdf_substreams) {
CHECK_BITS_LEFT(7);
@@ -599,10 +599,6 @@
if (ac4_dsi_version == 1) {
uint64_t end = (mDSISize - mBitReader.numBitsLeft()) / 8;
- if (mBitReader.numBitsLeft() % 8 != 0) {
- end += 1;
- }
-
uint64_t presentation_bytes = end - start;
uint64_t skip_bytes = pres_bytes - presentation_bytes;
ALOGV("skipping = %" PRIu64 " bytes", skip_bytes);
@@ -612,7 +608,7 @@
// we should know this or something is probably wrong
// with the bitstream (or we don't support it)
- if (mPresentations[presentation].mChannelMode == -1){
+ if (mPresentations[presentation].mChannelMode == -1) {
ALOGE("could not determing channel mode of presentation %d", presentation);
return false;
}
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 4b4d767..93f34a8 100755
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -90,7 +90,7 @@
virtual media_status_t getFormat(AMediaFormat *);
virtual media_status_t read(MediaBufferHelper **buffer, const ReadOptions *options = NULL);
- virtual bool supportNonblockingRead() { return true; }
+ bool supportsNonBlockingRead() override { return true; }
virtual media_status_t fragmentedRead(
MediaBufferHelper **buffer, const ReadOptions *options = NULL);
@@ -142,6 +142,7 @@
uint8_t *mSrcBuffer;
bool mIsHeif;
+ bool mIsAudio;
sp<ItemTable> mItemTable;
// Start offset from composition time to presentation time.
@@ -1486,8 +1487,13 @@
}
}
if (duration != 0 && mLastTrack->timescale != 0) {
- AMediaFormat_setInt64(mLastTrack->meta, AMEDIAFORMAT_KEY_DURATION,
- (duration * 1000000) / mLastTrack->timescale);
+ long double durationUs = ((long double)duration * 1000000) / mLastTrack->timescale;
+ if (durationUs < 0 || durationUs > INT64_MAX) {
+ ALOGE("cannot represent %lld * 1000000 / %lld in 64 bits",
+ (long long) duration, (long long) mLastTrack->timescale);
+ return ERROR_MALFORMED;
+ }
+ AMediaFormat_setInt64(mLastTrack->meta, AMEDIAFORMAT_KEY_DURATION, durationUs);
}
uint8_t lang[2];
@@ -3021,8 +3027,10 @@
}
unsigned bsid = br.getBits(5);
- if (bsid < 8) {
- ALOGW("Incorrect bsid in EAC3 header. Possibly AC-3?");
+ if (bsid == 9 || bsid == 10) {
+ ALOGW("EAC3 stream (bsid=%d) may be silenced by the decoder", bsid);
+ } else if (bsid > 16) {
+ ALOGE("EAC3 stream (bsid=%d) is not compatible with ETSI TS 102 366 v1.4.1", bsid);
delete[] chunk;
return ERROR_MALFORMED;
}
@@ -4528,6 +4536,7 @@
}
mIsPcm = !strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_RAW);
+ mIsAudio = !strncasecmp(mime, "audio/", 6);
if (mIsPcm) {
int32_t numChannels = 0;
@@ -5398,7 +5407,7 @@
break;
}
if( mode != ReadOptions::SEEK_FRAME_INDEX) {
- seekTimeUs += ((int64_t)mElstShiftStartTicks * 1000000) / mTimescale;
+ seekTimeUs += ((long double)mElstShiftStartTicks * 1000000) / mTimescale;
}
uint32_t sampleIndex;
@@ -5414,8 +5423,11 @@
findFlags = SampleTable::kFlagBefore;
}
- uint32_t syncSampleIndex;
- if (err == OK) {
+ uint32_t syncSampleIndex = sampleIndex;
+ // assume every audio sample is a sync sample. This works around
+ // seek issues with files that were incorrectly written with an
+ // empty or single-sample stss block for the audio track
+ if (err == OK && !mIsAudio) {
err = mSampleTable->findSyncSampleNear(
sampleIndex, &syncSampleIndex, findFlags);
}
@@ -5543,7 +5555,7 @@
AMediaFormat *meta = mBuffer->meta_data();
AMediaFormat_clear(meta);
AMediaFormat_setInt64(
- meta, AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ meta, AMEDIAFORMAT_KEY_TIME_US, ((long double)cts * 1000000) / mTimescale);
AMediaFormat_setInt32(meta, AMEDIAFORMAT_KEY_IS_SYNC_FRAME, 1);
int32_t byteOrder;
@@ -5578,9 +5590,9 @@
AMediaFormat *meta = mBuffer->meta_data();
AMediaFormat_clear(meta);
AMediaFormat_setInt64(
- meta, AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ meta, AMEDIAFORMAT_KEY_TIME_US, ((long double)cts * 1000000) / mTimescale);
AMediaFormat_setInt64(
- meta, AMEDIAFORMAT_KEY_DURATION, ((int64_t)stts * 1000000) / mTimescale);
+ meta, AMEDIAFORMAT_KEY_DURATION, ((long double)stts * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
AMediaFormat_setInt64(
@@ -5634,9 +5646,9 @@
AMediaFormat *meta = mBuffer->meta_data();
AMediaFormat_clear(meta);
AMediaFormat_setInt64(
- meta, AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ meta, AMEDIAFORMAT_KEY_TIME_US, ((long double)cts * 1000000) / mTimescale);
AMediaFormat_setInt64(
- meta, AMEDIAFORMAT_KEY_DURATION, ((int64_t)stts * 1000000) / mTimescale);
+ meta, AMEDIAFORMAT_KEY_DURATION, ((long double)stts * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
AMediaFormat_setInt64(
@@ -5715,9 +5727,9 @@
AMediaFormat *meta = mBuffer->meta_data();
AMediaFormat_clear(meta);
AMediaFormat_setInt64(
- meta, AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ meta, AMEDIAFORMAT_KEY_TIME_US, ((long double)cts * 1000000) / mTimescale);
AMediaFormat_setInt64(
- meta, AMEDIAFORMAT_KEY_DURATION, ((int64_t)stts * 1000000) / mTimescale);
+ meta, AMEDIAFORMAT_KEY_DURATION, ((long double)stts * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
AMediaFormat_setInt64(
@@ -5764,7 +5776,7 @@
ReadOptions::SeekMode mode;
if (options && options->getSeekTo(&seekTimeUs, &mode)) {
- seekTimeUs += ((int64_t)mElstShiftStartTicks * 1000000) / mTimescale;
+ seekTimeUs += ((long double)mElstShiftStartTicks * 1000000) / mTimescale;
ALOGV("shifted seekTimeUs :%" PRId64 ", mElstShiftStartTicks:%" PRId32, seekTimeUs,
mElstShiftStartTicks);
@@ -5925,9 +5937,9 @@
CHECK(mBuffer != NULL);
mBuffer->set_range(0, size);
AMediaFormat_setInt64(bufmeta,
- AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ AMEDIAFORMAT_KEY_TIME_US, ((long double)cts * 1000000) / mTimescale);
AMediaFormat_setInt64(bufmeta,
- AMEDIAFORMAT_KEY_DURATION, ((int64_t)smpl->duration * 1000000) / mTimescale);
+ AMEDIAFORMAT_KEY_DURATION, ((long double)smpl->duration * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
AMediaFormat_setInt64(bufmeta, AMEDIAFORMAT_KEY_TARGET_TIME, targetSampleTimeUs);
@@ -6040,9 +6052,9 @@
AMediaFormat *bufmeta = mBuffer->meta_data();
AMediaFormat_setInt64(bufmeta,
- AMEDIAFORMAT_KEY_TIME_US, ((int64_t)cts * 1000000) / mTimescale);
+ AMEDIAFORMAT_KEY_TIME_US, ((long double)cts * 1000000) / mTimescale);
AMediaFormat_setInt64(bufmeta,
- AMEDIAFORMAT_KEY_DURATION, ((int64_t)smpl->duration * 1000000) / mTimescale);
+ AMEDIAFORMAT_KEY_DURATION, ((long double)smpl->duration * 1000000) / mTimescale);
if (targetSampleTimeUs >= 0) {
AMediaFormat_setInt64(bufmeta, AMEDIAFORMAT_KEY_TARGET_TIME, targetSampleTimeUs);
diff --git a/media/extractors/wav/WAVExtractor.cpp b/media/extractors/wav/WAVExtractor.cpp
index e58bd1f..5679de8 100644
--- a/media/extractors/wav/WAVExtractor.cpp
+++ b/media/extractors/wav/WAVExtractor.cpp
@@ -81,7 +81,7 @@
virtual media_status_t read(
MediaBufferHelper **buffer, const ReadOptions *options = NULL);
- virtual bool supportNonblockingRead() { return true; }
+ bool supportsNonBlockingRead() override { return true; }
protected:
virtual ~WAVSource();
diff --git a/media/libaaudio/examples/loopback/src/loopback.cpp b/media/libaaudio/examples/loopback/src/loopback.cpp
index 6578156..c7c42eb 100644
--- a/media/libaaudio/examples/loopback/src/loopback.cpp
+++ b/media/libaaudio/examples/loopback/src/loopback.cpp
@@ -295,6 +295,7 @@
AAudioArgsParser::usage();
printf(" -B{frames} input capacity in frames\n");
printf(" -C{channels} number of input channels\n");
+ printf(" -D{deviceId} input device ID\n");
printf(" -F{0,1,2} input format, 1=I16, 2=FLOAT\n");
printf(" -g{gain} recirculating loopback gain\n");
printf(" -h{hangMillis} occasionally hang in the callback\n");
@@ -393,6 +394,7 @@
AAudioStream *outputStream = nullptr;
aaudio_result_t result = AAUDIO_OK;
+ int32_t requestedInputDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t requestedInputSharingMode = AAUDIO_SHARING_MODE_SHARED;
int requestedInputChannelCount = kNumInputChannels;
aaudio_format_t requestedInputFormat = AAUDIO_FORMAT_UNSPECIFIED;
@@ -431,6 +433,9 @@
case 'C':
requestedInputChannelCount = atoi(&arg[2]);
break;
+ case 'D':
+ requestedInputDeviceId = atoi(&arg[2]);
+ break;
case 'F':
requestedInputFormat = atoi(&arg[2]);
break;
@@ -529,6 +534,7 @@
printf("INPUT stream ----------------------------------------\n");
// Use different parameters for the input.
+ argParser.setDeviceId(requestedInputDeviceId);
argParser.setNumberOfBursts(AAUDIO_UNSPECIFIED);
argParser.setFormat(requestedInputFormat);
argParser.setPerformanceMode(inputPerformanceLevel);
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index b111b78..e9b6fb1 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -10,6 +10,7 @@
"AudioAttributes.cpp",
"AudioPolicy.cpp",
"AudioProductStrategy.cpp",
+ "AudioVolumeGroup.cpp",
],
shared_libs: [
"libaudioutils",
diff --git a/media/libaudioclient/AudioAttributes.cpp b/media/libaudioclient/AudioAttributes.cpp
index 0f327cf..1ee6930 100644
--- a/media/libaudioclient/AudioAttributes.cpp
+++ b/media/libaudioclient/AudioAttributes.cpp
@@ -43,7 +43,7 @@
strcpy(mAttributes.tags, "");
}
mStreamType = static_cast<audio_stream_type_t>(parcel->readInt32());
- mGroupId = parcel->readUint32();
+ mGroupId = static_cast<volume_group_t>(parcel->readUint32());
return NO_ERROR;
}
@@ -60,7 +60,7 @@
parcel->writeUtf8AsUtf16(mAttributes.tags);
}
parcel->writeInt32(static_cast<int32_t>(mStreamType));
- parcel->writeUint32(mGroupId);
+ parcel->writeUint32(static_cast<uint32_t>(mGroupId));
return NO_ERROR;
}
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index baa1469..5851533 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -70,70 +70,34 @@
// ---------------------------------------------------------------------------
-static std::string audioFormatTypeString(audio_format_t value) {
- std::string formatType;
- if (FormatConverter::toString(value, formatType)) {
- return formatType;
- }
- char rawbuffer[16]; // room for "%d"
- snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
- return rawbuffer;
-}
-
-static std::string audioSourceString(audio_source_t value) {
- std::string source;
- if (SourceTypeConverter::toString(value, source)) {
- return source;
- }
- char rawbuffer[16]; // room for "%d"
- snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
- return rawbuffer;
-}
-
void AudioRecord::MediaMetrics::gather(const AudioRecord *record)
{
- // key for media statistics is defined in the header
- // attrs for media statistics
- // NB: these are matched with public Java API constants defined
- // in frameworks/base/media/java/android/media/AudioRecord.java
- // These must be kept synchronized with the constants there.
- static constexpr char kAudioRecordEncoding[] = "android.media.audiorecord.encoding";
- static constexpr char kAudioRecordSource[] = "android.media.audiorecord.source";
- static constexpr char kAudioRecordLatency[] = "android.media.audiorecord.latency";
- static constexpr char kAudioRecordSampleRate[] = "android.media.audiorecord.samplerate";
- static constexpr char kAudioRecordChannelCount[] = "android.media.audiorecord.channels";
- static constexpr char kAudioRecordCreated[] = "android.media.audiorecord.createdMs";
- static constexpr char kAudioRecordDuration[] = "android.media.audiorecord.durationMs";
- static constexpr char kAudioRecordCount[] = "android.media.audiorecord.n";
- static constexpr char kAudioRecordError[] = "android.media.audiorecord.errcode";
- static constexpr char kAudioRecordErrorFunction[] = "android.media.audiorecord.errfunc";
+#define MM_PREFIX "android.media.audiorecord." // avoid cut-n-paste errors.
- // constructor guarantees mAnalyticsItem is valid
+ // Java API 28 entries, do not change.
+ mAnalyticsItem->setCString(MM_PREFIX "encoding", toString(record->mFormat).c_str());
+ mAnalyticsItem->setCString(MM_PREFIX "source", toString(record->mAttributes.source).c_str());
+ mAnalyticsItem->setInt32(MM_PREFIX "latency", (int32_t)record->mLatency); // bad estimate.
+ mAnalyticsItem->setInt32(MM_PREFIX "samplerate", (int32_t)record->mSampleRate);
+ mAnalyticsItem->setInt32(MM_PREFIX "channels", (int32_t)record->mChannelCount);
- mAnalyticsItem->setInt32(kAudioRecordLatency, record->mLatency);
- mAnalyticsItem->setInt32(kAudioRecordSampleRate, record->mSampleRate);
- mAnalyticsItem->setInt32(kAudioRecordChannelCount, record->mChannelCount);
- mAnalyticsItem->setCString(kAudioRecordEncoding,
- audioFormatTypeString(record->mFormat).c_str());
- mAnalyticsItem->setCString(kAudioRecordSource,
- audioSourceString(record->mAttributes.source).c_str());
+ // Non-API entries, these can change.
+ mAnalyticsItem->setInt32(MM_PREFIX "portId", (int32_t)record->mPortId);
+ mAnalyticsItem->setInt32(MM_PREFIX "frameCount", (int32_t)record->mFrameCount);
+ mAnalyticsItem->setCString(MM_PREFIX "attributes", toString(record->mAttributes).c_str());
+ mAnalyticsItem->setInt64(MM_PREFIX "channelMask", (int64_t)record->mChannelMask);
- // log total duration recording, including anything currently running [and count].
- nsecs_t active = 0;
+ // log total duration recording, including anything currently running.
+ int64_t activeNs = 0;
if (mStartedNs != 0) {
- active = systemTime() - mStartedNs;
+ activeNs = systemTime() - mStartedNs;
}
- mAnalyticsItem->setInt64(kAudioRecordDuration, (mDurationNs + active) / (1000 * 1000));
- mAnalyticsItem->setInt32(kAudioRecordCount, mCount);
-
- // XXX I don't know that this adds a lot of value, long term
- if (mCreatedNs != 0) {
- mAnalyticsItem->setInt64(kAudioRecordCreated, mCreatedNs / (1000 * 1000));
- }
+ mAnalyticsItem->setDouble(MM_PREFIX "durationMs", (mDurationNs + activeNs) * 1e-6);
+ mAnalyticsItem->setInt64(MM_PREFIX "startCount", (int64_t)mCount);
if (mLastError != NO_ERROR) {
- mAnalyticsItem->setInt32(kAudioRecordError, mLastError);
- mAnalyticsItem->setCString(kAudioRecordErrorFunction, mLastErrorFunc.c_str());
+ mAnalyticsItem->setInt32(MM_PREFIX "lastError.code", (int32_t)mLastError);
+ mAnalyticsItem->setCString(MM_PREFIX "lastError.at", mLastErrorFunc.c_str());
}
}
@@ -153,7 +117,9 @@
: mActive(false), mStatus(NO_INIT), mOpPackageName(opPackageName),
mSessionId(AUDIO_SESSION_ALLOCATE),
mPreviousPriority(ANDROID_PRIORITY_NORMAL), mPreviousSchedulingGroup(SP_DEFAULT),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
+ mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
+ mSelectedMicDirection(MIC_DIRECTION_UNSPECIFIED),
+ mSelectedMicFieldDimension(MIC_FIELD_DIMENSION_DEFAULT)
{
}
@@ -173,7 +139,9 @@
uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes,
- audio_port_handle_t selectedDeviceId)
+ audio_port_handle_t selectedDeviceId,
+ audio_microphone_direction_t selectedMicDirection,
+ float microphoneFieldDimension)
: mActive(false),
mStatus(NO_INIT),
mOpPackageName(opPackageName),
@@ -184,7 +152,8 @@
{
(void)set(inputSource, sampleRate, format, channelMask, frameCount, cbf, user,
notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
- uid, pid, pAttributes, selectedDeviceId);
+ uid, pid, pAttributes, selectedDeviceId,
+ selectedMicDirection, microphoneFieldDimension);
}
AudioRecord::~AudioRecord()
@@ -233,7 +202,9 @@
uid_t uid,
pid_t pid,
const audio_attributes_t* pAttributes,
- audio_port_handle_t selectedDeviceId)
+ audio_port_handle_t selectedDeviceId,
+ audio_microphone_direction_t selectedMicDirection,
+ float microphoneFieldDimension)
{
status_t status = NO_ERROR;
uint32_t channelCount;
@@ -249,6 +220,8 @@
sessionId, transferType, flags, String8(mOpPackageName).string(), uid, pid);
mSelectedDeviceId = selectedDeviceId;
+ mSelectedMicDirection = selectedMicDirection;
+ mSelectedMicFieldDimension = microphoneFieldDimension;
switch (transferType) {
case TRANSFER_DEFAULT:
@@ -349,7 +322,7 @@
mCbf = cbf;
if (cbf != NULL) {
- mAudioRecordThread = new AudioRecordThread(*this, threadCanCallJava);
+ mAudioRecordThread = new AudioRecordThread(*this);
mAudioRecordThread->run("AudioRecord", ANDROID_PRIORITY_AUDIO);
// thread begins in paused state, and will not reference us until start()
}
@@ -436,6 +409,10 @@
status = restoreRecord_l("start");
}
+ // Call these directly because we are already holding the lock.
+ mAudioRecord->setMicrophoneDirection(mSelectedMicDirection);
+ mAudioRecord->setMicrophoneFieldDimension(mSelectedMicFieldDimension);
+
if (status != NO_ERROR) {
mActive = false;
ALOGE("%s(%d): status %d", __func__, mPortId, status);
@@ -653,6 +630,8 @@
mNotificationFramesAct, mNotificationFramesReq);
result.appendFormat(" input(%d), latency(%u), selected device Id(%d), routed device Id(%d)\n",
mInput, mLatency, mSelectedDeviceId, mRoutedDeviceId);
+ result.appendFormat(" mic direction(%d) mic field dimension(%f)",
+ mSelectedMicDirection, mSelectedMicFieldDimension);
::write(fd, result.string(), result.size());
return NO_ERROR;
}
@@ -1405,12 +1384,34 @@
status_t AudioRecord::setMicrophoneDirection(audio_microphone_direction_t direction)
{
AutoMutex lock(mLock);
- return mAudioRecord->setMicrophoneDirection(direction).transactionError();
+ if (mSelectedMicDirection == direction) {
+ // NOP
+ return OK;
+ }
+
+ mSelectedMicDirection = direction;
+ if (mAudioRecord == 0) {
+ // the internal AudioRecord hasn't be created yet, so just stash the attribute.
+ return OK;
+ } else {
+ return mAudioRecord->setMicrophoneDirection(direction).transactionError();
+ }
}
status_t AudioRecord::setMicrophoneFieldDimension(float zoom) {
AutoMutex lock(mLock);
- return mAudioRecord->setMicrophoneFieldDimension(zoom).transactionError();
+ if (mSelectedMicFieldDimension == zoom) {
+ // NOP
+ return OK;
+ }
+
+ mSelectedMicFieldDimension = zoom;
+ if (mAudioRecord == 0) {
+ // the internal AudioRecord hasn't be created yet, so just stash the attribute.
+ return OK;
+ } else {
+ return mAudioRecord->setMicrophoneFieldDimension(zoom).transactionError();
+ }
}
// =========================================================================
@@ -1426,8 +1427,9 @@
// =========================================================================
-AudioRecord::AudioRecordThread::AudioRecordThread(AudioRecord& receiver, bool bCanCallJava)
- : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
+AudioRecord::AudioRecordThread::AudioRecordThread(AudioRecord& receiver)
+ : Thread(true /* bCanCallJava */) // binder recursion on restoreRecord_l() may call Java.
+ , mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
mIgnoreNextPausedInt(false)
{
}
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 01d9b3d..0ce2513 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -792,6 +792,7 @@
int64_t token = IPCThreadState::self()->clearCallingIdentity();
ap->registerClient(apc);
ap->setAudioPortCallbacksEnabled(apc->isAudioPortCbEnabled());
+ ap->setAudioVolumeGroupCallbacksEnabled(apc->isAudioVolumeGroupCbEnabled());
IPCThreadState::self()->restoreCallingIdentity(token);
}
@@ -987,6 +988,38 @@
return aps->getStreamVolumeIndex(stream, index, device);
}
+status_t AudioSystem::setVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int index,
+ audio_devices_t device)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->setVolumeIndexForAttributes(attr, index, device);
+}
+
+status_t AudioSystem::getVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index,
+ audio_devices_t device)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getVolumeIndexForAttributes(attr, index, device);
+}
+
+status_t AudioSystem::getMaxVolumeIndexForAttributes(const audio_attributes_t &attr, int &index)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getMaxVolumeIndexForAttributes(attr, index);
+}
+
+status_t AudioSystem::getMinVolumeIndexForAttributes(const audio_attributes_t &attr, int &index)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getMinVolumeIndexForAttributes(attr, index);
+}
+
uint32_t AudioSystem::getStrategyForStream(audio_stream_type_t stream)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
@@ -1190,6 +1223,38 @@
return (ret < 0) ? INVALID_OPERATION : NO_ERROR;
}
+status_t AudioSystem::addAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+
+ Mutex::Autolock _l(gLockAPS);
+ if (gAudioPolicyServiceClient == 0) {
+ return NO_INIT;
+ }
+ int ret = gAudioPolicyServiceClient->addAudioVolumeGroupCallback(callback);
+ if (ret == 1) {
+ aps->setAudioVolumeGroupCallbacksEnabled(true);
+ }
+ return (ret < 0) ? INVALID_OPERATION : NO_ERROR;
+}
+
+status_t AudioSystem::removeAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+
+ Mutex::Autolock _l(gLockAPS);
+ if (gAudioPolicyServiceClient == 0) {
+ return NO_INIT;
+ }
+ int ret = gAudioPolicyServiceClient->removeAudioVolumeGroupCallback(callback);
+ if (ret == 0) {
+ aps->setAudioVolumeGroupCallbacksEnabled(false);
+ }
+ return (ret < 0) ? INVALID_OPERATION : NO_ERROR;
+}
+
status_t AudioSystem::addAudioDeviceCallback(
const wp<AudioDeviceCallback>& callback, audio_io_handle_t audioIo)
{
@@ -1403,12 +1468,16 @@
audio_stream_type_t AudioSystem::attributesToStreamType(const audio_attributes_t &attr)
{
- product_strategy_t strategyId =
- AudioSystem::getProductStrategyFromAudioAttributes(AudioAttributes(attr));
+ product_strategy_t psId;
+ status_t ret = AudioSystem::getProductStrategyFromAudioAttributes(AudioAttributes(attr), psId);
+ if (ret != NO_ERROR) {
+ ALOGE("no strategy found for attributes %s", toString(attr).c_str());
+ return AUDIO_STREAM_MUSIC;
+ }
AudioProductStrategyVector strategies;
listAudioProductStrategies(strategies);
for (const auto &strategy : strategies) {
- if (strategy.getId() == strategyId) {
+ if (strategy.getId() == psId) {
auto attrVect = strategy.getAudioAttributes();
auto iter = std::find_if(begin(attrVect), end(attrVect), [&attr](const auto &refAttr) {
return AudioProductStrategy::attributesMatches(
@@ -1422,11 +1491,27 @@
return AUDIO_STREAM_MUSIC;
}
-product_strategy_t AudioSystem::getProductStrategyFromAudioAttributes(const AudioAttributes &aa)
+status_t AudioSystem::getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy)
{
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
- if (aps == 0) return PRODUCT_STRATEGY_NONE;
- return aps->getProductStrategyFromAudioAttributes(aa);
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getProductStrategyFromAudioAttributes(aa,productStrategy);
+}
+
+status_t AudioSystem::listAudioVolumeGroups(AudioVolumeGroupVector &groups)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->listAudioVolumeGroups(groups);
+}
+
+status_t AudioSystem::getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup)
+{
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) return PERMISSION_DENIED;
+ return aps->getVolumeGroupFromAudioAttributes(aa, volumeGroup);
}
// ---------------------------------------------------------------------------
@@ -1478,6 +1563,47 @@
}
}
+// ----------------------------------------------------------------------------
+int AudioSystem::AudioPolicyServiceClient::addAudioVolumeGroupCallback(
+ const sp<AudioVolumeGroupCallback>& callback)
+{
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mAudioVolumeGroupCallback.size(); i++) {
+ if (mAudioVolumeGroupCallback[i] == callback) {
+ return -1;
+ }
+ }
+ mAudioVolumeGroupCallback.add(callback);
+ return mAudioVolumeGroupCallback.size();
+}
+
+int AudioSystem::AudioPolicyServiceClient::removeAudioVolumeGroupCallback(
+ const sp<AudioVolumeGroupCallback>& callback)
+{
+ Mutex::Autolock _l(mLock);
+ size_t i;
+ for (i = 0; i < mAudioVolumeGroupCallback.size(); i++) {
+ if (mAudioVolumeGroupCallback[i] == callback) {
+ break;
+ }
+ }
+ if (i == mAudioVolumeGroupCallback.size()) {
+ return -1;
+ }
+ mAudioVolumeGroupCallback.removeAt(i);
+ return mAudioVolumeGroupCallback.size();
+}
+
+void AudioSystem::AudioPolicyServiceClient::onAudioVolumeGroupChanged(volume_group_t group,
+ int flags)
+{
+ Mutex::Autolock _l(mLock);
+ for (size_t i = 0; i < mAudioVolumeGroupCallback.size(); i++) {
+ mAudioVolumeGroupCallback[i]->onAudioVolumeGroupChanged(group, flags);
+ }
+}
+// ----------------------------------------------------------------------------
+
void AudioSystem::AudioPolicyServiceClient::onDynamicPolicyMixStateUpdate(
String8 regId, int32_t state)
{
@@ -1521,6 +1647,9 @@
for (size_t i = 0; i < mAudioPortCallbacks.size(); i++) {
mAudioPortCallbacks[i]->onServiceDied();
}
+ for (size_t i = 0; i < mAudioVolumeGroupCallback.size(); i++) {
+ mAudioVolumeGroupCallback[i]->onServiceDied();
+ }
}
{
Mutex::Autolock _l(gLockAPS);
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 7881bb8..e59f7e0 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -170,44 +170,8 @@
// ---------------------------------------------------------------------------
-static std::string audioContentTypeString(audio_content_type_t value) {
- std::string contentType;
- if (AudioContentTypeConverter::toString(value, contentType)) {
- return contentType;
- }
- char rawbuffer[16]; // room for "%d"
- snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
- return rawbuffer;
-}
-
-static std::string audioUsageString(audio_usage_t value) {
- std::string usage;
- if (UsageTypeConverter::toString(value, usage)) {
- return usage;
- }
- char rawbuffer[16]; // room for "%d"
- snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
- return rawbuffer;
-}
-
void AudioTrack::MediaMetrics::gather(const AudioTrack *track)
{
-
- // key for media statistics is defined in the header
- // attrs for media statistics
- // NB: these are matched with public Java API constants defined
- // in frameworks/base/media/java/android/media/AudioTrack.java
- // These must be kept synchronized with the constants there.
- static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
- static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
- static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
- static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
- static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
-
- // NB: These are not yet exposed as public Java API constants.
- static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
- static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
-
// only if we're in a good state...
// XXX: shall we gather alternative info if failing?
const status_t lstatus = track->initCheck();
@@ -216,28 +180,22 @@
return;
}
- // constructor guarantees mAnalyticsItem is valid
+#define MM_PREFIX "android.media.audiotrack." // avoid cut-n-paste errors.
- const int32_t underrunFrames = track->getUnderrunFrames();
- if (underrunFrames != 0) {
- mAnalyticsItem->setInt32(kAudioTrackUnderrunFrames, underrunFrames);
- }
+ // Java API 28 entries, do not change.
+ mAnalyticsItem->setCString(MM_PREFIX "streamtype", toString(track->streamType()).c_str());
+ mAnalyticsItem->setCString(MM_PREFIX "type",
+ toString(track->mAttributes.content_type).c_str());
+ mAnalyticsItem->setCString(MM_PREFIX "usage", toString(track->mAttributes.usage).c_str());
- if (track->mTimestampStartupGlitchReported) {
- mAnalyticsItem->setInt32(kAudioTrackStartupGlitch, 1);
- }
-
- if (track->mStreamType != -1) {
- // deprecated, but this will tell us who still uses it.
- mAnalyticsItem->setInt32(kAudioTrackStreamType, track->mStreamType);
- }
- // XXX: consider including from mAttributes: source type
- mAnalyticsItem->setCString(kAudioTrackContentType,
- audioContentTypeString(track->mAttributes.content_type).c_str());
- mAnalyticsItem->setCString(kAudioTrackUsage,
- audioUsageString(track->mAttributes.usage).c_str());
- mAnalyticsItem->setInt32(kAudioTrackSampleRate, track->mSampleRate);
- mAnalyticsItem->setInt64(kAudioTrackChannelMask, track->mChannelMask);
+ // Non-API entries, these can change due to a Java string mistake.
+ mAnalyticsItem->setInt32(MM_PREFIX "sampleRate", (int32_t)track->mSampleRate);
+ mAnalyticsItem->setInt64(MM_PREFIX "channelMask", (int64_t)track->mChannelMask);
+ // Non-API entries, these can change.
+ mAnalyticsItem->setInt32(MM_PREFIX "portId", (int32_t)track->mPortId);
+ mAnalyticsItem->setCString(MM_PREFIX "encoding", toString(track->mFormat).c_str());
+ mAnalyticsItem->setInt32(MM_PREFIX "frameCount", (int32_t)track->mFrameCount);
+ mAnalyticsItem->setCString(MM_PREFIX "attributes", toString(track->mAttributes).c_str());
}
// hand the user a snapshot of the metrics.
@@ -615,7 +573,7 @@
mCbf = cbf;
if (cbf != NULL) {
- mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
+ mAudioTrackThread = new AudioTrackThread(*this);
mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
// thread begins in paused state, and will not reference us until start()
}
@@ -3127,8 +3085,9 @@
// =========================================================================
-AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
- : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
+AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver)
+ : Thread(true /* bCanCallJava */) // binder recursion on restoreTrack_l() may call Java.
+ , mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
mIgnoreNextPausedInt(false)
{
}
diff --git a/media/libaudioclient/AudioVolumeGroup.cpp b/media/libaudioclient/AudioVolumeGroup.cpp
new file mode 100644
index 0000000..e79a362
--- /dev/null
+++ b/media/libaudioclient/AudioVolumeGroup.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AudioVolumeGroup"
+
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+#include <binder/Parcel.h>
+
+#include <media/AudioVolumeGroup.h>
+#include <media/AudioAttributes.h>
+
+namespace android {
+
+status_t AudioVolumeGroup::readFromParcel(const Parcel *parcel)
+{
+ status_t ret = parcel->readUtf8FromUtf16(&mName);
+ if (ret != NO_ERROR) {
+ return ret;
+ }
+ mGroupId = static_cast<volume_group_t>(parcel->readInt32());
+ size_t size = static_cast<size_t>(parcel->readInt32());
+ for (size_t i = 0; i < size; i++) {
+ AudioAttributes attribute;
+ attribute.readFromParcel(parcel);
+ if (ret != NO_ERROR) {
+ mAudioAttributes.clear();
+ return ret;
+ }
+ mAudioAttributes.push_back(attribute.getAttributes());
+ }
+ size = static_cast<size_t>(parcel->readInt32());
+ for (size_t i = 0; i < size; i++) {
+ audio_stream_type_t stream = static_cast<audio_stream_type_t>(parcel->readInt32());
+ mStreams.push_back(stream);
+ }
+ return NO_ERROR;
+}
+
+status_t AudioVolumeGroup::writeToParcel(Parcel *parcel) const
+{
+ parcel->writeUtf8AsUtf16(mName);
+ parcel->writeInt32(static_cast<int32_t>(mGroupId));
+ size_t size = mAudioAttributes.size();
+ size_t sizePosition = parcel->dataPosition();
+ parcel->writeInt32(size);
+ size_t finalSize = size;
+ for (const auto &attributes : mAudioAttributes) {
+ size_t position = parcel->dataPosition();
+ AudioAttributes attribute(attributes);
+ status_t ret = attribute.writeToParcel(parcel);
+ if (ret != NO_ERROR) {
+ parcel->setDataPosition(position);
+ finalSize--;
+ }
+ }
+ if (size != finalSize) {
+ size_t position = parcel->dataPosition();
+ parcel->setDataPosition(sizePosition);
+ parcel->writeInt32(finalSize);
+ parcel->setDataPosition(position);
+ }
+ parcel->writeInt32(mStreams.size());
+ for (const auto &stream : mStreams) {
+ parcel->writeInt32(static_cast<int32_t>(stream));
+ }
+ return NO_ERROR;
+}
+
+} // namespace android
diff --git a/media/libaudioclient/IAudioPolicyService.cpp b/media/libaudioclient/IAudioPolicyService.cpp
index 1bce16f..3bac44f 100644
--- a/media/libaudioclient/IAudioPolicyService.cpp
+++ b/media/libaudioclient/IAudioPolicyService.cpp
@@ -51,6 +51,10 @@
INIT_STREAM_VOLUME,
SET_STREAM_VOLUME,
GET_STREAM_VOLUME,
+ SET_VOLUME_ATTRIBUTES,
+ GET_VOLUME_ATTRIBUTES,
+ GET_MIN_VOLUME_FOR_ATTRIBUTES,
+ GET_MAX_VOLUME_FOR_ATTRIBUTES,
GET_STRATEGY_FOR_STREAM,
GET_OUTPUT_FOR_EFFECT,
REGISTER_EFFECT,
@@ -78,6 +82,7 @@
START_AUDIO_SOURCE,
STOP_AUDIO_SOURCE,
SET_AUDIO_PORT_CALLBACK_ENABLED,
+ SET_AUDIO_VOLUME_GROUP_CALLBACK_ENABLED,
SET_MASTER_MONO,
GET_MASTER_MONO,
GET_STREAM_VOLUME_DB,
@@ -95,6 +100,8 @@
GET_OFFLOAD_FORMATS_A2DP,
LIST_AUDIO_PRODUCT_STRATEGIES,
GET_STRATEGY_FOR_ATTRIBUTES,
+ LIST_AUDIO_VOLUME_GROUPS,
+ GET_VOLUME_GROUP_FOR_ATTRIBUTES
};
#define MAX_ITEMS_PER_LIST 1024
@@ -415,6 +422,70 @@
return static_cast <status_t> (reply.readInt32());
}
+ virtual status_t setVolumeIndexForAttributes(const audio_attributes_t &attr, int index,
+ audio_devices_t device)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&attr, sizeof(audio_attributes_t));
+ data.writeInt32(index);
+ data.writeInt32(static_cast <uint32_t>(device));
+ status_t status = remote()->transact(SET_VOLUME_ATTRIBUTES, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return static_cast <status_t> (reply.readInt32());
+ }
+ virtual status_t getVolumeIndexForAttributes(const audio_attributes_t &attr, int &index,
+ audio_devices_t device)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&attr, sizeof(audio_attributes_t));
+ data.writeInt32(static_cast <uint32_t>(device));
+ status_t status = remote()->transact(GET_VOLUME_ATTRIBUTES, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = static_cast <status_t> (reply.readInt32());
+ if (status != NO_ERROR) {
+ return status;
+ }
+ index = reply.readInt32();
+ return NO_ERROR;
+ }
+ virtual status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr, int &index)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&attr, sizeof(audio_attributes_t));
+ status_t status = remote()->transact(GET_MIN_VOLUME_FOR_ATTRIBUTES, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = static_cast <status_t> (reply.readInt32());
+ if (status != NO_ERROR) {
+ return status;
+ }
+ index = reply.readInt32();
+ return NO_ERROR;
+ }
+ virtual status_t getMaxVolumeIndexForAttributes(const audio_attributes_t &attr, int &index)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.write(&attr, sizeof(audio_attributes_t));
+ status_t status = remote()->transact(GET_MAX_VOLUME_FOR_ATTRIBUTES, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = static_cast <status_t> (reply.readInt32());
+ if (status != NO_ERROR) {
+ return status;
+ }
+ index = reply.readInt32();
+ return NO_ERROR;
+ }
virtual uint32_t getStrategyForStream(audio_stream_type_t stream)
{
Parcel data, reply;
@@ -692,6 +763,14 @@
remote()->transact(SET_AUDIO_PORT_CALLBACK_ENABLED, data, &reply);
}
+ virtual void setAudioVolumeGroupCallbacksEnabled(bool enabled)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ data.writeInt32(enabled ? 1 : 0);
+ remote()->transact(SET_AUDIO_VOLUME_GROUP_CALLBACK_ENABLED, data, &reply);
+ }
+
virtual status_t acquireSoundTriggerSession(audio_session_t *session,
audio_io_handle_t *ioHandle,
audio_devices_t *device)
@@ -1085,35 +1164,90 @@
return status;
}
status = static_cast<status_t>(reply.readInt32());
- if (status == NO_ERROR) {
- uint32_t numStrategies = static_cast<uint32_t>(reply.readInt32());
- for (size_t i = 0; i < numStrategies; i++) {
- AudioProductStrategy strategy;
- status = strategy.readFromParcel(&reply);
- if (status != NO_ERROR) {
- ALOGE("%s: failed to read strategies", __FUNCTION__);
- strategies.clear();
- return status;
- }
- strategies.push_back(strategy);
- }
+ if (status != NO_ERROR) {
+ return status;
}
- return status;
+ uint32_t numStrategies = static_cast<uint32_t>(reply.readInt32());
+ for (size_t i = 0; i < numStrategies; i++) {
+ AudioProductStrategy strategy;
+ status = strategy.readFromParcel(&reply);
+ if (status != NO_ERROR) {
+ ALOGE("%s: failed to read strategies", __FUNCTION__);
+ strategies.clear();
+ return status;
+ }
+ strategies.push_back(strategy);
+ }
+ return NO_ERROR;
}
- virtual product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa)
+ virtual status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
status_t status = aa.writeToParcel(&data);
if (status != NO_ERROR) {
- return PRODUCT_STRATEGY_NONE;
+ return status;
}
status = remote()->transact(GET_STRATEGY_FOR_ATTRIBUTES, data, &reply);
- if (status == NO_ERROR) {
- return static_cast<product_strategy_t>(reply.readInt32());
+ if (status != NO_ERROR) {
+ return status;
}
- return PRODUCT_STRATEGY_NONE;
+ status = static_cast<status_t>(reply.readInt32());
+ if (status != NO_ERROR) {
+ return status;
+ }
+ productStrategy = static_cast<product_strategy_t>(reply.readInt32());
+ return NO_ERROR;
+ }
+
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+
+ status_t status = remote()->transact(LIST_AUDIO_VOLUME_GROUPS, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = static_cast<status_t>(reply.readInt32());
+ if (status != NO_ERROR) {
+ return status;
+ }
+ uint32_t numGroups = static_cast<uint32_t>(reply.readInt32());
+ for (size_t i = 0; i < numGroups; i++) {
+ AudioVolumeGroup group;
+ status = group.readFromParcel(&reply);
+ if (status != NO_ERROR) {
+ ALOGE("%s: failed to read volume groups", __FUNCTION__);
+ groups.clear();
+ return status;
+ }
+ groups.push_back(group);
+ }
+ return NO_ERROR;
+ }
+
+ virtual status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
+ status_t status = aa.writeToParcel(&data);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = remote()->transact(GET_VOLUME_GROUP_FOR_ATTRIBUTES, data, &reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = static_cast<status_t>(reply.readInt32());
+ if (status != NO_ERROR) {
+ return status;
+ }
+ volumeGroup = static_cast<volume_group_t>(reply.readInt32());
+ return NO_ERROR;
}
};
@@ -1173,7 +1307,9 @@
case SET_A11Y_SERVICES_UIDS:
case SET_UID_DEVICE_AFFINITY:
case REMOVE_UID_DEVICE_AFFINITY:
- case GET_OFFLOAD_FORMATS_A2DP: {
+ case GET_OFFLOAD_FORMATS_A2DP:
+ case LIST_AUDIO_VOLUME_GROUPS:
+ case GET_VOLUME_GROUP_FOR_ATTRIBUTES: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
@@ -1433,6 +1569,73 @@
return NO_ERROR;
} break;
+ case SET_VOLUME_ATTRIBUTES: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_attributes_t attributes = {};
+ status_t status = data.read(&attributes, sizeof(audio_attributes_t));
+ if (status != NO_ERROR) {
+ return status;
+ }
+ int index = data.readInt32();
+ audio_devices_t device = static_cast <audio_devices_t>(data.readInt32());
+
+ reply->writeInt32(static_cast <uint32_t>(setVolumeIndexForAttributes(attributes,
+ index, device)));
+ return NO_ERROR;
+ } break;
+
+ case GET_VOLUME_ATTRIBUTES: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_attributes_t attributes = {};
+ status_t status = data.read(&attributes, sizeof(audio_attributes_t));
+ if (status != NO_ERROR) {
+ return status;
+ }
+ audio_devices_t device = static_cast <audio_devices_t>(data.readInt32());
+
+ int index = 0;
+ status = getVolumeIndexForAttributes(attributes, index, device);
+ reply->writeInt32(static_cast <uint32_t>(status));
+ if (status == NO_ERROR) {
+ reply->writeInt32(index);
+ }
+ return NO_ERROR;
+ } break;
+
+ case GET_MIN_VOLUME_FOR_ATTRIBUTES: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_attributes_t attributes = {};
+ status_t status = data.read(&attributes, sizeof(audio_attributes_t));
+ if (status != NO_ERROR) {
+ return status;
+ }
+
+ int index = 0;
+ status = getMinVolumeIndexForAttributes(attributes, index);
+ reply->writeInt32(static_cast <uint32_t>(status));
+ if (status == NO_ERROR) {
+ reply->writeInt32(index);
+ }
+ return NO_ERROR;
+ } break;
+
+ case GET_MAX_VOLUME_FOR_ATTRIBUTES: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ audio_attributes_t attributes = {};
+ status_t status = data.read(&attributes, sizeof(audio_attributes_t));
+ if (status != NO_ERROR) {
+ return status;
+ }
+
+ int index = 0;
+ status = getMaxVolumeIndexForAttributes(attributes, index);
+ reply->writeInt32(static_cast <uint32_t>(status));
+ if (status == NO_ERROR) {
+ reply->writeInt32(index);
+ }
+ return NO_ERROR;
+ } break;
+
case GET_DEVICES_FOR_STREAM: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
audio_stream_type_t stream =
@@ -1681,6 +1884,12 @@
return NO_ERROR;
} break;
+ case SET_AUDIO_VOLUME_GROUP_CALLBACK_ENABLED: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ setAudioVolumeGroupCallbacksEnabled(data.readInt32() == 1);
+ return NO_ERROR;
+ } break;
+
case ACQUIRE_SOUNDTRIGGER_SESSION: {
CHECK_INTERFACE(IAudioPolicyService, data, reply);
sp<IAudioPolicyServiceClient> client = interface_cast<IAudioPolicyServiceClient>(
@@ -1992,7 +2201,7 @@
status_t status = listAudioProductStrategies(strategies);
reply->writeInt32(status);
if (status != NO_ERROR) {
- return status;
+ return NO_ERROR;
}
size_t size = strategies.size();
size_t sizePosition = reply->dataPosition();
@@ -2021,11 +2230,61 @@
if (status != NO_ERROR) {
return status;
}
- product_strategy_t strategy = getProductStrategyFromAudioAttributes(attributes);
+ product_strategy_t strategy;
+ status = getProductStrategyFromAudioAttributes(attributes, strategy);
+ reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
reply->writeUint32(static_cast<int>(strategy));
return NO_ERROR;
}
+ case LIST_AUDIO_VOLUME_GROUPS: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioVolumeGroupVector groups;
+ status_t status = listAudioVolumeGroups(groups);
+ reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
+ size_t size = groups.size();
+ size_t sizePosition = reply->dataPosition();
+ reply->writeInt32(size);
+ size_t finalSize = size;
+ for (size_t i = 0; i < size; i++) {
+ size_t position = reply->dataPosition();
+ if (groups[i].writeToParcel(reply) != NO_ERROR) {
+ reply->setDataPosition(position);
+ finalSize--;
+ }
+ }
+ if (size != finalSize) {
+ size_t position = reply->dataPosition();
+ reply->setDataPosition(sizePosition);
+ reply->writeInt32(finalSize);
+ reply->setDataPosition(position);
+ }
+ return NO_ERROR;
+ }
+
+ case GET_VOLUME_GROUP_FOR_ATTRIBUTES: {
+ CHECK_INTERFACE(IAudioPolicyService, data, reply);
+ AudioAttributes attributes;
+ status_t status = attributes.readFromParcel(&data);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ volume_group_t group;
+ status = getVolumeGroupFromAudioAttributes(attributes, group);
+ reply->writeInt32(status);
+ if (status != NO_ERROR) {
+ return NO_ERROR;
+ }
+ reply->writeUint32(static_cast<int>(group));
+ return NO_ERROR;
+ }
+
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libaudioclient/IAudioPolicyServiceClient.cpp b/media/libaudioclient/IAudioPolicyServiceClient.cpp
index 1f9eab7..52d8ccd 100644
--- a/media/libaudioclient/IAudioPolicyServiceClient.cpp
+++ b/media/libaudioclient/IAudioPolicyServiceClient.cpp
@@ -31,7 +31,8 @@
PORT_LIST_UPDATE = IBinder::FIRST_CALL_TRANSACTION,
PATCH_LIST_UPDATE,
MIX_STATE_UPDATE,
- RECORDING_CONFIGURATION_UPDATE
+ RECORDING_CONFIGURATION_UPDATE,
+ VOLUME_GROUP_CHANGED,
};
// ----------------------------------------------------------------------
@@ -108,6 +109,15 @@
remote()->transact(PATCH_LIST_UPDATE, data, &reply, IBinder::FLAG_ONEWAY);
}
+ void onAudioVolumeGroupChanged(volume_group_t group, int flags)
+ {
+ Parcel data, reply;
+ data.writeInterfaceToken(IAudioPolicyServiceClient::getInterfaceDescriptor());
+ data.writeUint32(group);
+ data.writeInt32(flags);
+ remote()->transact(VOLUME_GROUP_CHANGED, data, &reply, IBinder::FLAG_ONEWAY);
+ }
+
void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state)
{
Parcel data, reply;
@@ -157,6 +167,13 @@
onAudioPatchListUpdate();
return NO_ERROR;
} break;
+ case VOLUME_GROUP_CHANGED: {
+ CHECK_INTERFACE(IAudioPolicyServiceClient, data, reply);
+ volume_group_t group = static_cast<volume_group_t>(data.readUint32());
+ int flags = data.readInt32();
+ onAudioVolumeGroupChanged(group, flags);
+ return NO_ERROR;
+ } break;
case MIX_STATE_UPDATE: {
CHECK_INTERFACE(IAudioPolicyServiceClient, data, reply);
String8 regId = data.readString8();
diff --git a/media/libaudioclient/include/media/AudioAttributes.h b/media/libaudioclient/include/media/AudioAttributes.h
index edf26eb..0a35e9e 100644
--- a/media/libaudioclient/include/media/AudioAttributes.h
+++ b/media/libaudioclient/include/media/AudioAttributes.h
@@ -17,6 +17,7 @@
#pragma once
+#include <media/AudioCommonTypes.h>
#include <system/audio.h>
#include <system/audio_policy.h>
#include <binder/Parcelable.h>
@@ -28,7 +29,7 @@
public:
AudioAttributes() = default;
AudioAttributes(const audio_attributes_t &attributes) : mAttributes(attributes) {}
- AudioAttributes(uint32_t groupId,
+ AudioAttributes(volume_group_t groupId,
audio_stream_type_t stream,
const audio_attributes_t &attributes) :
mAttributes(attributes), mStreamType(stream), mGroupId(groupId) {}
@@ -39,7 +40,7 @@
status_t writeToParcel(Parcel *parcel) const override;
audio_stream_type_t getStreamType() const { return mStreamType; }
- uint32_t getGroupId() const { return mGroupId; }
+ volume_group_t getGroupId() const { return mGroupId; }
private:
audio_attributes_t mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
@@ -53,7 +54,7 @@
* @brief mGroupId: for future volume management, define groups within a strategy that follows
* the same curves of volume (extension of stream types to manage volume)
*/
- uint32_t mGroupId = 0;
+ volume_group_t mGroupId = VOLUME_GROUP_NONE;
};
} // namespace android
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index 5188da1..8e446ea 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -27,7 +27,7 @@
const product_strategy_t PRODUCT_STRATEGY_NONE = static_cast<product_strategy_t>(-1);
using AttributesVector = std::vector<audio_attributes_t>;
-using StreamTypes = std::vector<audio_stream_type_t>;
+using StreamTypeVector = std::vector<audio_stream_type_t>;
constexpr bool operator==(const audio_attributes_t &lhs, const audio_attributes_t &rhs)
{
@@ -38,5 +38,9 @@
{
return !(lhs==rhs);
}
+
+enum volume_group_t : uint32_t;
+static const volume_group_t VOLUME_GROUP_NONE = static_cast<volume_group_t>(-1);
+
} // namespace android
diff --git a/media/libaudioclient/include/media/AudioMixer.h b/media/libaudioclient/include/media/AudioMixer.h
index 41b425f..783eef3 100644
--- a/media/libaudioclient/include/media/AudioMixer.h
+++ b/media/libaudioclient/include/media/AudioMixer.h
@@ -273,7 +273,7 @@
mPostDownmixReformatBufferProvider.reset(nullptr);
mDownmixerBufferProvider.reset(nullptr);
mReformatBufferProvider.reset(nullptr);
- mAdjustChannelsNonDestructiveBufferProvider.reset(nullptr);
+ mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
mAdjustChannelsBufferProvider.reset(nullptr);
}
@@ -347,8 +347,12 @@
* all pre-mixer track buffer conversions outside the AudioMixer class.
*
* 1) mInputBufferProvider: The AudioTrack buffer provider.
- * 2) mAdjustChannelsBufferProvider: Expend or contracts data
- * 3) mAdjustChannelsNonDestructiveBufferProvider: Non-destructively adjust sample data
+ * 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
+ * channel format to another. Expanded channels are filled with zeros and put at the end
+ * of each audio frame. Contracted channels are copied to the end of the buffer.
+ * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
+ * This is currently using at audio-haptic coupled playback to separate audio and haptic
+ * data. Contracted channels could be written to given buffer.
* 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
* match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
* requires reformat. For example, it may convert floating point input to
@@ -360,9 +364,10 @@
* 7) mTimestretchBufferProvider: Adds timestretching for playback rate
*/
AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
- // TODO: combine AdjustChannelsBufferProvider and AdjustChannelsNonDestructiveBufferProvider
+ // TODO: combine mAdjustChannelsBufferProvider and
+ // mContractChannelsNonDestructiveBufferProvider
std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
- std::unique_ptr<PassthruBufferProvider> mAdjustChannelsNonDestructiveBufferProvider;
+ std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 4707c4a..b4ddb69 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -189,7 +189,10 @@
uid_t uid = AUDIO_UID_INVALID,
pid_t pid = -1,
const audio_attributes_t* pAttributes = NULL,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+ audio_microphone_direction_t
+ selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
+ float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT);
/* Terminates the AudioRecord and unregisters it from AudioFlinger.
* Also destroys all resources associated with the AudioRecord.
@@ -228,7 +231,10 @@
uid_t uid = AUDIO_UID_INVALID,
pid_t pid = -1,
const audio_attributes_t* pAttributes = NULL,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+ audio_microphone_direction_t
+ selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
+ float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT);
/* Result of constructing the AudioRecord. This must be checked for successful initialization
* before using any AudioRecord API (except for set()), because using
@@ -562,7 +568,7 @@
class AudioRecordThread : public Thread
{
public:
- AudioRecordThread(AudioRecord& receiver, bool bCanCallJava = false);
+ AudioRecordThread(AudioRecord& receiver);
// Do not call Thread::requestExitAndWait() without first calling requestExit().
// Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
@@ -716,6 +722,9 @@
// activity and connected devices
wp<AudioSystem::AudioDeviceCallback> mDeviceCallback;
+ audio_microphone_direction_t mSelectedMicDirection;
+ float mSelectedMicFieldDimension;
+
private:
class MediaMetrics {
public:
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index b9ee24a..e64f285 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -21,6 +21,7 @@
#include <media/AudioPolicy.h>
#include <media/AudioProductStrategy.h>
+#include <media/AudioVolumeGroup.h>
#include <media/AudioIoDescriptor.h>
#include <media/IAudioFlingerClient.h>
#include <media/IAudioPolicyServiceClient.h>
@@ -263,6 +264,17 @@
int *index,
audio_devices_t device);
+ static status_t setVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int index,
+ audio_devices_t device);
+ static status_t getVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index,
+ audio_devices_t device);
+
+ static status_t getMaxVolumeIndexForAttributes(const audio_attributes_t &attr, int &index);
+
+ static status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr, int &index);
+
static uint32_t getStrategyForStream(audio_stream_type_t stream);
static audio_devices_t getDevicesForStream(audio_stream_type_t stream);
@@ -367,13 +379,34 @@
static bool isHapticPlaybackSupported();
static status_t listAudioProductStrategies(AudioProductStrategyVector &strategies);
- static product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa);
+ static status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy);
static audio_attributes_t streamTypeToAttributes(audio_stream_type_t stream);
static audio_stream_type_t attributesToStreamType(const audio_attributes_t &attr);
+ static status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups);
+
+ static status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup);
+
// ----------------------------------------------------------------------------
+ class AudioVolumeGroupCallback : public RefBase
+ {
+ public:
+
+ AudioVolumeGroupCallback() {}
+ virtual ~AudioVolumeGroupCallback() {}
+
+ virtual void onAudioVolumeGroupChanged(volume_group_t group, int flags) = 0;
+ virtual void onServiceDied() = 0;
+
+ };
+
+ static status_t addAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
+ static status_t removeAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
+
class AudioPortCallback : public RefBase
{
public:
@@ -506,12 +539,17 @@
int removeAudioPortCallback(const sp<AudioPortCallback>& callback);
bool isAudioPortCbEnabled() const { return (mAudioPortCallbacks.size() != 0); }
+ int addAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
+ int removeAudioVolumeGroupCallback(const sp<AudioVolumeGroupCallback>& callback);
+ bool isAudioVolumeGroupCbEnabled() const { return (mAudioVolumeGroupCallback.size() != 0); }
+
// DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
// IAudioPolicyServiceClient
virtual void onAudioPortListUpdate();
virtual void onAudioPatchListUpdate();
+ virtual void onAudioVolumeGroupChanged(volume_group_t group, int flags);
virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state);
virtual void onRecordingConfigurationUpdate(int event,
const record_client_info_t *clientInfo,
@@ -525,6 +563,7 @@
private:
Mutex mLock;
Vector <sp <AudioPortCallback> > mAudioPortCallbacks;
+ Vector <sp <AudioVolumeGroupCallback> > mAudioVolumeGroupCallback;
};
static audio_io_handle_t getOutput(audio_stream_type_t stream);
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 12f5d71..3926ead 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -934,7 +934,7 @@
class AudioTrackThread : public Thread
{
public:
- AudioTrackThread(AudioTrack& receiver, bool bCanCallJava = false);
+ AudioTrackThread(AudioTrack& receiver);
// Do not call Thread::requestExitAndWait() without first calling requestExit().
// Thread::requestExitAndWait() is not virtual, and the implementation doesn't do enough.
diff --git a/media/libaudioclient/include/media/AudioVolumeGroup.h b/media/libaudioclient/include/media/AudioVolumeGroup.h
new file mode 100644
index 0000000..9a6ea07
--- /dev/null
+++ b/media/libaudioclient/include/media/AudioVolumeGroup.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#pragma once
+
+#include <media/AudioProductStrategy.h>
+#include <system/audio.h>
+#include <system/audio_policy.h>
+#include <binder/Parcelable.h>
+
+namespace android {
+
+class AudioVolumeGroup : public Parcelable
+{
+public:
+ AudioVolumeGroup() {}
+ AudioVolumeGroup(const std::string &name,
+ volume_group_t group,
+ const AttributesVector &attributes,
+ const StreamTypeVector &streams) :
+ mName(name), mGroupId(group), mAudioAttributes(attributes), mStreams(streams) {}
+
+ const std::string &getName() const { return mName; }
+ volume_group_t getId() const { return mGroupId; }
+ AttributesVector getAudioAttributes() const { return mAudioAttributes; }
+ StreamTypeVector getStreamTypes() const { return mStreams; }
+
+ status_t readFromParcel(const Parcel *parcel) override;
+ status_t writeToParcel(Parcel *parcel) const override;
+
+private:
+ std::string mName;
+ volume_group_t mGroupId = VOLUME_GROUP_NONE;
+ AttributesVector mAudioAttributes;
+ StreamTypeVector mStreams;
+};
+
+using AudioVolumeGroupVector = std::vector<AudioVolumeGroup>;
+
+} // namespace android
diff --git a/media/libaudioclient/include/media/IAudioPolicyService.h b/media/libaudioclient/include/media/IAudioPolicyService.h
index e89a55d..35540f0 100644
--- a/media/libaudioclient/include/media/IAudioPolicyService.h
+++ b/media/libaudioclient/include/media/IAudioPolicyService.h
@@ -93,6 +93,17 @@
virtual status_t getStreamVolumeIndex(audio_stream_type_t stream,
int *index,
audio_devices_t device) = 0;
+
+ virtual status_t setVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int index,
+ audio_devices_t device) = 0;
+ virtual status_t getVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index,
+ audio_devices_t device) = 0;
+ virtual status_t getMaxVolumeIndexForAttributes(const audio_attributes_t &attr, int &index) = 0;
+
+ virtual status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr, int &index) = 0;
+
virtual uint32_t getStrategyForStream(audio_stream_type_t stream) = 0;
virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream) = 0;
virtual audio_io_handle_t getOutputForEffect(const effect_descriptor_t *desc) = 0;
@@ -160,6 +171,8 @@
virtual void setAudioPortCallbacksEnabled(bool enabled) = 0;
+ virtual void setAudioVolumeGroupCallbacksEnabled(bool enabled) = 0;
+
virtual status_t acquireSoundTriggerSession(audio_session_t *session,
audio_io_handle_t *ioHandle,
audio_devices_t *device) = 0;
@@ -198,7 +211,12 @@
virtual bool isHapticPlaybackSupported() = 0;
virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) = 0;
- virtual product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa) = 0;
+ virtual status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy) = 0;
+
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups) = 0;
+ virtual status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup) = 0;
};
diff --git a/media/libaudioclient/include/media/IAudioPolicyServiceClient.h b/media/libaudioclient/include/media/IAudioPolicyServiceClient.h
index b3c0381..79008c3 100644
--- a/media/libaudioclient/include/media/IAudioPolicyServiceClient.h
+++ b/media/libaudioclient/include/media/IAudioPolicyServiceClient.h
@@ -23,6 +23,8 @@
#include <binder/IInterface.h>
#include <system/audio.h>
#include <system/audio_effect.h>
+#include <media/AudioPolicy.h>
+#include <media/AudioVolumeGroup.h>
namespace android {
@@ -45,6 +47,8 @@
public:
DECLARE_META_INTERFACE(AudioPolicyServiceClient);
+ // Notifies a change of volume group
+ virtual void onAudioVolumeGroupChanged(volume_group_t group, int flags) = 0;
// Notifies a change of audio port configuration.
virtual void onAudioPortListUpdate() = 0;
// Notifies a change of audio patch configuration.
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index a1e869f..b25f82e 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -123,15 +123,13 @@
status_t DeviceHalHidl::setMasterVolume(float volume) {
if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
- return processReturn("setMasterVolume", mPrimaryDevice->setMasterVolume(volume));
+ return processReturn("setMasterVolume", mDevice->setMasterVolume(volume));
}
status_t DeviceHalHidl::getMasterVolume(float *volume) {
if (mDevice == 0) return NO_INIT;
- if (mPrimaryDevice == 0) return INVALID_OPERATION;
Result retval;
- Return<void> ret = mPrimaryDevice->getMasterVolume(
+ Return<void> ret = mDevice->getMasterVolume(
[&](Result r, float v) {
retval = r;
if (retval == Result::OK) {
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index b0597b3..caf575c 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -18,6 +18,7 @@
//#define LOG_NDEBUG 0
#include <common/all-versions/VersionUtils.h>
+#include <cutils/native_handle.h>
#include <hwbinder/IPCThreadState.h>
#include <media/EffectsFactoryApi.h>
#include <utils/Log.h>
@@ -280,6 +281,15 @@
return ret.isOk() ? analyzeResult(ret) : FAILED_TRANSACTION;
}
+status_t EffectHalHidl::dump(int fd) {
+ if (mEffect == 0) return NO_INIT;
+ native_handle_t* hidlHandle = native_handle_create(1, 0);
+ hidlHandle->data[0] = fd;
+ Return<void> ret = mEffect->debug(hidlHandle, {} /* options */);
+ native_handle_delete(hidlHandle);
+ return ret.isOk() ? OK : FAILED_TRANSACTION;
+}
+
status_t EffectHalHidl::getConfigImpl(
uint32_t cmdCode, uint32_t *replySize, void *pReplyData) {
if (replySize == NULL || *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
diff --git a/media/libaudiohal/impl/EffectHalHidl.h b/media/libaudiohal/impl/EffectHalHidl.h
index 9d9f707..1f238c0 100644
--- a/media/libaudiohal/impl/EffectHalHidl.h
+++ b/media/libaudiohal/impl/EffectHalHidl.h
@@ -61,6 +61,8 @@
// Whether it's a local implementation.
virtual bool isLocal() const { return false; }
+ virtual status_t dump(int fd);
+
uint64_t effectId() const { return mEffectId; }
static void effectDescriptorToHal(
diff --git a/media/libaudiohal/include/media/audiohal/EffectHalInterface.h b/media/libaudiohal/include/media/audiohal/EffectHalInterface.h
index 92622aa..03165bd 100644
--- a/media/libaudiohal/include/media/audiohal/EffectHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/EffectHalInterface.h
@@ -55,6 +55,8 @@
// Whether it's a local implementation.
virtual bool isLocal() const = 0;
+ virtual status_t dump(int fd) = 0;
+
protected:
// Subclasses can not be constructed directly by clients.
EffectHalInterface() {}
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index 2c57db7..f7cc096 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -408,8 +408,8 @@
void AudioMixer::Track::unprepareForAdjustChannelsNonDestructive()
{
ALOGV("AUDIOMIXER::unprepareForAdjustChannelsNonDestructive");
- if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
- mAdjustChannelsNonDestructiveBufferProvider.reset(nullptr);
+ if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
reconfigureBufferProviders();
}
}
@@ -426,13 +426,13 @@
? (uint8_t*)mainBuffer + frames * audio_bytes_per_frame(
mMixerChannelCount, mMixerFormat)
: NULL;
- mAdjustChannelsNonDestructiveBufferProvider.reset(
- new AdjustChannelsNonDestructiveBufferProvider(
+ mContractChannelsNonDestructiveBufferProvider.reset(
+ new AdjustChannelsBufferProvider(
mFormat,
mAdjustNonDestructiveInChannelCount,
mAdjustNonDestructiveOutChannelCount,
- mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
frames,
+ mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
buffer));
reconfigureBufferProviders();
}
@@ -441,9 +441,9 @@
void AudioMixer::Track::clearContractedBuffer()
{
- if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
- static_cast<AdjustChannelsNonDestructiveBufferProvider*>(
- mAdjustChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
+ if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ static_cast<AdjustChannelsBufferProvider*>(
+ mContractChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
}
}
@@ -455,9 +455,9 @@
mAdjustChannelsBufferProvider->setBufferProvider(bufferProvider);
bufferProvider = mAdjustChannelsBufferProvider.get();
}
- if (mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
- mAdjustChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = mAdjustChannelsNonDestructiveBufferProvider.get();
+ if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ mContractChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
+ bufferProvider = mContractChannelsNonDestructiveBufferProvider.get();
}
if (mReformatBufferProvider.get() != nullptr) {
mReformatBufferProvider->setBufferProvider(bufferProvider);
@@ -966,8 +966,8 @@
track->mDownmixerBufferProvider->reset();
} else if (track->mReformatBufferProvider.get() != nullptr) {
track->mReformatBufferProvider->reset();
- } else if (track->mAdjustChannelsNonDestructiveBufferProvider.get() != nullptr) {
- track->mAdjustChannelsNonDestructiveBufferProvider->reset();
+ } else if (track->mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ track->mContractChannelsNonDestructiveBufferProvider->reset();
} else if (track->mAdjustChannelsBufferProvider.get() != nullptr) {
track->mAdjustChannelsBufferProvider->reset();
}
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index b764ccb..21d25e1 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -627,79 +627,68 @@
}
}
-AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(audio_format_t format,
- size_t inChannelCount, size_t outChannelCount, size_t frameCount) :
+AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(
+ audio_format_t format, size_t inChannelCount, size_t outChannelCount,
+ size_t frameCount, audio_format_t contractedFormat, void* contractedBuffer) :
CopyBufferProvider(
audio_bytes_per_frame(inChannelCount, format),
- audio_bytes_per_frame(outChannelCount, format),
+ audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
frameCount),
mFormat(format),
mInChannelCount(inChannelCount),
mOutChannelCount(outChannelCount),
- mSampleSizeInBytes(audio_bytes_per_sample(format))
-{
- ALOGV("AdjustBufferProvider(%p)(%#x, %zu, %zu, %zu)",
- this, format, inChannelCount, outChannelCount, frameCount);
-}
-
-void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
-{
- adjust_channels(src, mInChannelCount, dst, mOutChannelCount, mSampleSizeInBytes,
- frames * mInChannelCount * mSampleSizeInBytes);
-}
-
-AdjustChannelsNonDestructiveBufferProvider::AdjustChannelsNonDestructiveBufferProvider(
- audio_format_t format, size_t inChannelCount, size_t outChannelCount,
- audio_format_t contractedFormat, size_t contractedFrameCount, void* contractedBuffer) :
- CopyBufferProvider(
- audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
- audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
- contractedFrameCount),
- mFormat(format),
- mInChannelCount(inChannelCount),
- mOutChannelCount(outChannelCount),
mSampleSizeInBytes(audio_bytes_per_sample(format)),
+ mFrameCount(frameCount),
mContractedChannelCount(inChannelCount - outChannelCount),
mContractedFormat(contractedFormat),
- mContractedFrameCount(contractedFrameCount),
mContractedBuffer(contractedBuffer),
mContractedWrittenFrames(0)
{
- ALOGV("AdjustChannelsNonDestructiveBufferProvider(%p)(%#x, %zu, %zu, %#x, %p)",
- this, format, inChannelCount, outChannelCount, contractedFormat, contractedBuffer);
+ ALOGV("AdjustChannelsBufferProvider(%p)(%#x, %zu, %zu, %zu, %#x, %p)", this, format,
+ inChannelCount, outChannelCount, frameCount, contractedFormat, contractedBuffer);
if (mContractedFormat != AUDIO_FORMAT_INVALID && mInChannelCount > mOutChannelCount) {
mContractedFrameSize = audio_bytes_per_frame(mContractedChannelCount, mContractedFormat);
}
}
-status_t AdjustChannelsNonDestructiveBufferProvider::getNextBuffer(
- AudioBufferProvider::Buffer* pBuffer)
+status_t AdjustChannelsBufferProvider::getNextBuffer(AudioBufferProvider::Buffer* pBuffer)
{
- const size_t outFramesLeft = mContractedFrameCount - mContractedWrittenFrames;
- if (outFramesLeft < pBuffer->frameCount) {
- // Restrict the frame count so that we don't write over the size of the output buffer.
- pBuffer->frameCount = outFramesLeft;
+ if (mContractedBuffer != nullptr) {
+ // Restrict frame count only when it is needed to save contracted frames.
+ const size_t outFramesLeft = mFrameCount - mContractedWrittenFrames;
+ if (outFramesLeft < pBuffer->frameCount) {
+ // Restrict the frame count so that we don't write over the size of the output buffer.
+ pBuffer->frameCount = outFramesLeft;
+ }
}
return CopyBufferProvider::getNextBuffer(pBuffer);
}
-void AdjustChannelsNonDestructiveBufferProvider::copyFrames(
- void *dst, const void *src, size_t frames)
+void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
{
- adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount, mSampleSizeInBytes,
- frames * mInChannelCount * mSampleSizeInBytes);
- if (mContractedFormat != AUDIO_FORMAT_INVALID && mContractedBuffer != NULL
- && mInChannelCount > mOutChannelCount) {
- const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
- memcpy_by_audio_format(
- (uint8_t*)mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
- mContractedFormat, (uint8_t*)dst + contractedIdx, mFormat,
- mContractedChannelCount * frames);
- mContractedWrittenFrames += frames;
+ if (mInChannelCount > mOutChannelCount) {
+ // For case multi to mono, adjust_channels has special logic that will mix first two input
+ // channels into a single output channel. In that case, use adjust_channels_non_destructive
+ // to keep only one channel data even when contracting to mono.
+ adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount,
+ mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
+ if (mContractedFormat != AUDIO_FORMAT_INVALID
+ && mContractedBuffer != nullptr) {
+ const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+ memcpy_by_audio_format(
+ (uint8_t*) mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
+ mContractedFormat, (uint8_t*) dst + contractedIdx, mFormat,
+ mContractedChannelCount * frames);
+ mContractedWrittenFrames += frames;
+ }
+ } else {
+ // Prefer expanding data from the end of each audio frame.
+ adjust_channels(src, mInChannelCount, dst, mOutChannelCount,
+ mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
}
}
-void AdjustChannelsNonDestructiveBufferProvider::reset()
+void AdjustChannelsBufferProvider::reset()
{
mContractedWrittenFrames = 0;
CopyBufferProvider::reset();
diff --git a/media/libeffects/downmix/tests/Android.bp b/media/libeffects/downmix/tests/Android.bp
index e2e7dbd..63afc54 100644
--- a/media/libeffects/downmix/tests/Android.bp
+++ b/media/libeffects/downmix/tests/Android.bp
@@ -24,7 +24,6 @@
],
cflags: [
- "-v",
"-Werror",
"-Wextra",
],
diff --git a/media/libeffects/lvm/tests/lvmtest.cpp b/media/libeffects/lvm/tests/lvmtest.cpp
index fe47d0b..416bdaa 100644
--- a/media/libeffects/lvm/tests/lvmtest.cpp
+++ b/media/libeffects/lvm/tests/lvmtest.cpp
@@ -15,6 +15,7 @@
*/
#include <assert.h>
#include <inttypes.h>
+#include <iterator>
#include <math.h>
#include <stdlib.h>
#include <string.h>
@@ -102,20 +103,15 @@
printf("\n -M");
printf("\n Mono mode (force all input audio channels to be identical)");
printf("\n -basslvl:<effect_level>");
- printf("\n A value that ranges between 0 - 15 default 0");
+ printf("\n A value that ranges between %d - %d default 0", LVM_BE_MIN_EFFECTLEVEL,
+ LVM_BE_MAX_EFFECTLEVEL);
printf("\n");
printf("\n -eqPreset:<preset Value>");
- printf("\n 0 - Normal");
- printf("\n 1 - Classical");
- printf("\n 2 - Dance");
- printf("\n 3 - Flat");
- printf("\n 4 - Folk");
- printf("\n 5 - Heavy Metal");
- printf("\n 6 - Hip Hop");
- printf("\n 7 - Jazz");
- printf("\n 8 - Pop");
- printf("\n 9 - Rock");
- printf("\n default 0");
+ const size_t numPresetLvls = std::size(gEqualizerPresets);
+ for (size_t i = 0; i < numPresetLvls; ++i) {
+ printf("\n %zu - %s", i, gEqualizerPresets[i].name);
+ }
+ printf("\n default - 0");
printf("\n -bE ");
printf("\n Enable Dynamic Bass Enhancement");
printf("\n");
@@ -619,7 +615,7 @@
std::fill(fp + 1, fp + channelCount, *fp); // replicate ch 0
}
}
-#if 1
+#ifndef BYPASS_EXEC
errCode = lvmExecute(floatIn.data(), floatOut.data(), pContext, plvmConfigParams);
if (errCode) {
printf("\nError: lvmExecute returned with %d\n", errCode);
@@ -689,7 +685,7 @@
lvmConfigParams.monoMode = true;
} else if (!strncmp(argv[i], "-basslvl:", 9)) {
const int bassEffectLevel = atoi(argv[i] + 9);
- if (bassEffectLevel > 15 || bassEffectLevel < 0) {
+ if (bassEffectLevel > LVM_BE_MAX_EFFECTLEVEL || bassEffectLevel < LVM_BE_MIN_EFFECTLEVEL) {
printf("Error: Unsupported Bass Effect Level : %d\n",
bassEffectLevel);
printUsage();
@@ -698,7 +694,8 @@
lvmConfigParams.bassEffectLevel = bassEffectLevel;
} else if (!strncmp(argv[i], "-eqPreset:", 10)) {
const int eqPresetLevel = atoi(argv[i] + 10);
- if (eqPresetLevel > 9 || eqPresetLevel < 0) {
+ const int numPresetLvls = std::size(gEqualizerPresets);
+ if (eqPresetLevel >= numPresetLvls || eqPresetLevel < 0) {
printf("Error: Unsupported Equalizer Preset : %d\n", eqPresetLevel);
printUsage();
return -1;
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index 5853e4b..9799cad 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -259,6 +259,7 @@
name: "libmedia_player2_util",
srcs: [
+ "AudioParameter.cpp",
"BufferingSettings.cpp",
"DataSourceDesc.cpp",
"MediaCodecBuffer.cpp",
diff --git a/media/libmedia/IMediaRecorder.cpp b/media/libmedia/IMediaRecorder.cpp
index 56ee18e..f283569 100644
--- a/media/libmedia/IMediaRecorder.cpp
+++ b/media/libmedia/IMediaRecorder.cpp
@@ -66,6 +66,8 @@
ENABLE_AUDIO_DEVICE_CALLBACK,
GET_ACTIVE_MICROPHONES,
GET_PORT_ID,
+ SET_MICROPHONE_DIRECTION,
+ SET_MICROPHONE_FIELD_DIMENSION
};
class BpMediaRecorder: public BpInterface<IMediaRecorder>
@@ -407,6 +409,24 @@
return status;
}
+ status_t setMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setMicrophoneDirection(%d)", direction);
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ data.writeInt32(direction);
+ status_t status = remote()->transact(SET_MICROPHONE_DIRECTION, data, &reply);
+ return status == NO_ERROR ? (status_t)reply.readInt32() : status;
+ }
+
+ status_t setMicrophoneFieldDimension(float zoom) {
+ ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+ Parcel data, reply;
+ data.writeInterfaceToken(IMediaRecorder::getInterfaceDescriptor());
+ data.writeFloat(zoom);
+ status_t status = remote()->transact(SET_MICROPHONE_FIELD_DIMENSION, data, &reply);
+ return status == NO_ERROR ? (status_t)reply.readInt32() : status;
+ }
+
status_t getPortId(audio_port_handle_t *portId)
{
ALOGV("getPortId");
@@ -689,6 +709,23 @@
}
return NO_ERROR;
}
+ case SET_MICROPHONE_DIRECTION: {
+ ALOGV("SET_MICROPHONE_DIRECTION");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ int direction = data.readInt32();
+ status_t status =
+ setMicrophoneDirection(static_cast<audio_microphone_direction_t>(direction));
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
+ case SET_MICROPHONE_FIELD_DIMENSION: {
+ ALOGV("SET_MICROPHONE_FIELD_DIMENSION");
+ CHECK_INTERFACE(IMediaRecorder, data, reply);
+ float zoom = data.readFloat();
+ status_t status = setMicrophoneFieldDimension(zoom);
+ reply->writeInt32(status);
+ return NO_ERROR;
+ }
default:
return BBinder::onTransact(code, data, reply, flags);
}
diff --git a/media/libmedia/NdkWrapper.cpp b/media/libmedia/NdkWrapper.cpp
index cbd64bb..ea0547c 100644
--- a/media/libmedia/NdkWrapper.cpp
+++ b/media/libmedia/NdkWrapper.cpp
@@ -96,6 +96,7 @@
static const char *AMediaFormatKeyGroupInt64[] = {
AMEDIAFORMAT_KEY_DURATION,
+ AMEDIAFORMAT_KEY_MAX_PTS_GAP_TO_ENCODER,
AMEDIAFORMAT_KEY_REPEAT_PREVIOUS_FRAME_AFTER,
AMEDIAFORMAT_KEY_TIME_US,
};
@@ -127,6 +128,7 @@
static const char *AMediaFormatKeyGroupFloatInt32[] = {
AMEDIAFORMAT_KEY_FRAME_RATE,
AMEDIAFORMAT_KEY_I_FRAME_INTERVAL,
+ AMEDIAFORMAT_KEY_MAX_FPS_TO_ENCODER,
AMEDIAFORMAT_KEY_OPERATING_RATE,
};
diff --git a/media/libmedia/TypeConverter.cpp b/media/libmedia/TypeConverter.cpp
index 8dac91a..0301b21 100644
--- a/media/libmedia/TypeConverter.cpp
+++ b/media/libmedia/TypeConverter.cpp
@@ -415,14 +415,6 @@
OutputDeviceConverter::fromString(literalDevice, device);
}
-bool deviceToString(audio_devices_t device, std::string& literalDevice) {
- if (device & AUDIO_DEVICE_BIT_IN) {
- return InputDeviceConverter::toString(device, literalDevice);
- } else {
- return OutputDeviceConverter::toString(device, literalDevice);
- }
-}
-
SampleRateTraits::Collection samplingRatesFromString(
const std::string &samplingRates, const char *del)
{
diff --git a/media/libmedia/include/media/BufferProviders.h b/media/libmedia/include/media/BufferProviders.h
index ea41527..b038854 100644
--- a/media/libmedia/include/media/BufferProviders.h
+++ b/media/libmedia/include/media/BufferProviders.h
@@ -218,33 +218,21 @@
bool mAudioPlaybackRateValid; // flag for current parameters validity
};
-// AdjustBufferProvider derives from CopyBufferProvider to adjust sample data.
+// AdjustChannelsBufferProvider derives from CopyBufferProvider to adjust sample data.
// Expands or contracts sample data from one interleaved channel format to another.
-// Expanded channels are filled with zeros and put at the end of each audio frame.
-// Contracted channels are omitted from the end of each audio frame.
+// Extra expanded channels are filled with zeros and put at the end of each audio frame.
+// Contracted channels are copied to the end of the output buffer(storage should be
+// allocated appropriately).
+// Contracted channels could be written to output buffer.
class AdjustChannelsBufferProvider : public CopyBufferProvider {
public:
AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
- size_t outChannelCount, size_t frameCount);
- //Overrides
- void copyFrames(void *dst, const void *src, size_t frames) override;
-
-protected:
- const audio_format_t mFormat;
- const size_t mInChannelCount;
- const size_t mOutChannelCount;
- const size_t mSampleSizeInBytes;
-};
-
-// AdjustChannelsNonDestructiveBufferProvider derives from CopyBufferProvider to adjust sample data.
-// Expands or contracts sample data from one interleaved channel format to another.
-// Extra expanded channels are interleaved in from the end of the input buffer.
-// Contracted channels are copied to the end of the output buffer.
-// Contracted channels could be written to output buffer.
-class AdjustChannelsNonDestructiveBufferProvider : public CopyBufferProvider {
-public:
- AdjustChannelsNonDestructiveBufferProvider(audio_format_t format, size_t inChannelCount,
- size_t outChannelCount, audio_format_t contractedFormat, size_t contractedFrameCount,
+ size_t outChannelCount, size_t frameCount) : AdjustChannelsBufferProvider(
+ format, inChannelCount, outChannelCount,
+ frameCount, AUDIO_FORMAT_INVALID, nullptr) { }
+ // Contracted data is converted to contractedFormat and put into contractedBuffer.
+ AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
+ size_t outChannelCount, size_t frameCount, audio_format_t contractedFormat,
void* contractedBuffer);
//Overrides
status_t getNextBuffer(Buffer* pBuffer) override;
@@ -258,9 +246,9 @@
const size_t mInChannelCount;
const size_t mOutChannelCount;
const size_t mSampleSizeInBytes;
+ const size_t mFrameCount;
const size_t mContractedChannelCount;
const audio_format_t mContractedFormat;
- const size_t mContractedFrameCount;
void *mContractedBuffer;
size_t mContractedWrittenFrames;
size_t mContractedFrameSize;
diff --git a/media/libmedia/include/media/IMediaRecorder.h b/media/libmedia/include/media/IMediaRecorder.h
index e7c466d..0b09420 100644
--- a/media/libmedia/include/media/IMediaRecorder.h
+++ b/media/libmedia/include/media/IMediaRecorder.h
@@ -73,6 +73,8 @@
virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
+ virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) = 0;
+ virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
virtual status_t getPortId(audio_port_handle_t *portId) = 0;
};
diff --git a/media/libmedia/include/media/IMediaSource.h b/media/libmedia/include/media/IMediaSource.h
index 7a4b1b9..5ab6e37 100644
--- a/media/libmedia/include/media/IMediaSource.h
+++ b/media/libmedia/include/media/IMediaSource.h
@@ -126,8 +126,8 @@
static const size_t kBinderMediaBuffers = 4; // buffers managed by BnMediaSource
static const size_t kTransferSharedAsSharedThreshold = 4 * 1024; // if >= shared, else inline
- static const size_t kTransferInlineAsSharedThreshold = 64 * 1024; // if >= shared, else inline
- static const size_t kInlineMaxTransfer = 256 * 1024; // Binder size limited to BINDER_VM_SIZE.
+ static const size_t kTransferInlineAsSharedThreshold = 8 * 1024; // if >= shared, else inline
+ static const size_t kInlineMaxTransfer = 64 * 1024; // Binder size limited to BINDER_VM_SIZE.
protected:
virtual ~BnMediaSource();
diff --git a/media/libmedia/include/media/MediaRecorderBase.h b/media/libmedia/include/media/MediaRecorderBase.h
index e1c5d47..88282ac 100644
--- a/media/libmedia/include/media/MediaRecorderBase.h
+++ b/media/libmedia/include/media/MediaRecorderBase.h
@@ -72,6 +72,8 @@
virtual status_t enableAudioDeviceCallback(bool enabled) = 0;
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones) = 0;
+ virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction) = 0;
+ virtual status_t setMicrophoneFieldDimension(float zoom) = 0;
virtual status_t getPortId(audio_port_handle_t *portId) const = 0;
diff --git a/media/libmedia/include/media/TypeConverter.h b/media/libmedia/include/media/TypeConverter.h
index 418e09c..3acfe98 100644
--- a/media/libmedia/include/media/TypeConverter.h
+++ b/media/libmedia/include/media/TypeConverter.h
@@ -233,8 +233,6 @@
bool deviceFromString(const std::string& literalDevice, audio_devices_t& device);
-bool deviceToString(audio_devices_t device, std::string& literalDevice);
-
SampleRateTraits::Collection samplingRatesFromString(
const std::string &samplingRates, const char *del = AudioParameter::valueListSeparator);
@@ -255,47 +253,53 @@
OutputChannelTraits::Collection outputChannelMasksFromString(
const std::string &outChannels, const char *del = AudioParameter::valueListSeparator);
-static inline std::string toString(audio_usage_t usage)
+// counting enumerations
+template <typename T, std::enable_if_t<std::is_same<T, audio_content_type_t>::value
+ || std::is_same<T, audio_mode_t>::value
+ || std::is_same<T, audio_source_t>::value
+ || std::is_same<T, audio_stream_type_t>::value
+ || std::is_same<T, audio_usage_t>::value
+ , int> = 0>
+static inline std::string toString(const T& value)
{
- std::string usageLiteral;
- if (!android::UsageTypeConverter::toString(usage, usageLiteral)) {
- ALOGV("failed to convert usage: %d", usage);
- return "AUDIO_USAGE_UNKNOWN";
- }
- return usageLiteral;
+ std::string result;
+ return TypeConverter<DefaultTraits<T>>::toString(value, result)
+ ? result : std::to_string(static_cast<int>(value));
+
}
-static inline std::string toString(audio_content_type_t content)
+// flag enumerations
+template <typename T, std::enable_if_t<std::is_same<T, audio_gain_mode_t>::value
+ || std::is_same<T, audio_input_flags_t>::value
+ || std::is_same<T, audio_output_flags_t>::value
+ , int> = 0>
+static inline std::string toString(const T& value)
{
- std::string contentLiteral;
- if (!android::AudioContentTypeConverter::toString(content, contentLiteral)) {
- ALOGV("failed to convert content type: %d", content);
- return "AUDIO_CONTENT_TYPE_UNKNOWN";
- }
- return contentLiteral;
+ std::string result;
+ TypeConverter<DefaultTraits<T>>::maskToString(value, result);
+ return result;
}
-static inline std::string toString(audio_stream_type_t stream)
+static inline std::string toString(const audio_devices_t& devices)
{
- std::string streamLiteral;
- if (!android::StreamTypeConverter::toString(stream, streamLiteral)) {
- ALOGV("failed to convert stream: %d", stream);
- return "AUDIO_STREAM_DEFAULT";
+ std::string result;
+ if ((devices & AUDIO_DEVICE_BIT_IN) != 0) {
+ InputDeviceConverter::maskToString(devices, result);
+ } else {
+ OutputDeviceConverter::maskToString(devices, result);
}
- return streamLiteral;
+ return result;
}
-static inline std::string toString(audio_source_t source)
+// TODO: Remove when FormatTraits uses DefaultTraits.
+static inline std::string toString(const audio_format_t& format)
{
- std::string sourceLiteral;
- if (!android::SourceTypeConverter::toString(source, sourceLiteral)) {
- ALOGV("failed to convert source: %d", source);
- return "AUDIO_SOURCE_DEFAULT";
- }
- return sourceLiteral;
+ std::string result;
+ return TypeConverter<VectorTraits<audio_format_t>>::toString(format, result)
+ ? result : std::to_string(static_cast<int>(format));
}
-static inline std::string toString(const audio_attributes_t &attributes)
+static inline std::string toString(const audio_attributes_t& attributes)
{
std::ostringstream result;
result << "{ Content type: " << toString(attributes.content_type)
@@ -308,16 +312,6 @@
return result.str();
}
-static inline std::string toString(audio_mode_t mode)
-{
- std::string modeLiteral;
- if (!android::AudioModeConverter::toString(mode, modeLiteral)) {
- ALOGV("failed to convert mode: %d", mode);
- return "AUDIO_MODE_INVALID";
- }
- return modeLiteral;
-}
-
}; // namespace android
#endif /*ANDROID_TYPE_CONVERTER_H_*/
diff --git a/media/libmedia/include/media/mediarecorder.h b/media/libmedia/include/media/mediarecorder.h
index 33be559..8580437 100644
--- a/media/libmedia/include/media/mediarecorder.h
+++ b/media/libmedia/include/media/mediarecorder.h
@@ -264,6 +264,9 @@
status_t getRoutedDeviceId(audio_port_handle_t *deviceId);
status_t enableAudioDeviceCallback(bool enabled);
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+ status_t setMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setMicrophoneFieldDimension(float zoom);
+
status_t getPortId(audio_port_handle_t *portId) const;
private:
diff --git a/media/libmedia/mediarecorder.cpp b/media/libmedia/mediarecorder.cpp
index d07e703..6c59a29 100644
--- a/media/libmedia/mediarecorder.cpp
+++ b/media/libmedia/mediarecorder.cpp
@@ -842,6 +842,16 @@
return mMediaRecorder->getActiveMicrophones(activeMicrophones);
}
+status_t MediaRecorder::setMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setMicrophoneDirection(%d)", direction);
+ return mMediaRecorder->setMicrophoneDirection(direction);
+}
+
+status_t MediaRecorder::setMicrophoneFieldDimension(float zoom) {
+ ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+ return mMediaRecorder->setMicrophoneFieldDimension(zoom);
+}
+
status_t MediaRecorder::getPortId(audio_port_handle_t *portId) const
{
ALOGV("getPortId");
diff --git a/media/libmediaplayer2/JAudioTrack.cpp b/media/libmediaplayer2/JAudioTrack.cpp
index a01afa3..910edff 100644
--- a/media/libmediaplayer2/JAudioTrack.cpp
+++ b/media/libmediaplayer2/JAudioTrack.cpp
@@ -571,8 +571,8 @@
}
void JAudioTrack::registerRoutingDelegates(
- std::vector<std::pair<jobject, jobject>>& routingDelegates) {
- for (std::vector<std::pair<jobject, jobject>>::iterator it = routingDelegates.begin();
+ Vector<std::pair<jobject, jobject>>& routingDelegates) {
+ for (Vector<std::pair<jobject, jobject>>::iterator it = routingDelegates.begin();
it != routingDelegates.end(); it++) {
addAudioDeviceCallback(it->second, getHandler(it->second));
}
@@ -597,23 +597,9 @@
return env->CallObjectMethod(routingDelegateObj, jGetHandler);
}
-jobject JAudioTrack::addGlobalRef(const jobject obj) {
+jobject JAudioTrack::findByKey(Vector<std::pair<jobject, jobject>>& mp, const jobject key) {
JNIEnv *env = JavaVMHelper::getJNIEnv();
- return reinterpret_cast<jobject>(env->NewGlobalRef(obj));
-}
-
-status_t JAudioTrack::removeGlobalRef(const jobject obj) {
- if (obj == NULL) {
- return BAD_VALUE;
- }
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- env->DeleteGlobalRef(obj);
- return NO_ERROR;
-}
-
-jobject JAudioTrack::findByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key) {
- JNIEnv *env = JavaVMHelper::getJNIEnv();
- for (std::vector<std::pair<jobject, jobject>>::iterator it = mp.begin(); it != mp.end(); it++) {
+ for (Vector<std::pair<jobject, jobject>>::iterator it = mp.begin(); it != mp.end(); it++) {
if (env->IsSameObject(it->first, key)) {
return it->second;
}
@@ -621,9 +607,9 @@
return nullptr;
}
-void JAudioTrack::eraseByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key) {
+void JAudioTrack::eraseByKey(Vector<std::pair<jobject, jobject>>& mp, const jobject key) {
JNIEnv *env = JavaVMHelper::getJNIEnv();
- for (std::vector<std::pair<jobject, jobject>>::iterator it = mp.begin(); it != mp.end(); it++) {
+ for (Vector<std::pair<jobject, jobject>>::iterator it = mp.begin(); it != mp.end(); it++) {
if (env->IsSameObject(it->first, key)) {
mp.erase(it);
return;
diff --git a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
index 4de92ad..7c3063d 100644
--- a/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
+++ b/media/libmediaplayer2/MediaPlayer2AudioOutput.cpp
@@ -85,9 +85,6 @@
}
MediaPlayer2AudioOutput::~MediaPlayer2AudioOutput() {
- for (auto routingDelegate : mRoutingDelegates) {
- JAudioTrack::removeGlobalRef(routingDelegate.second);
- }
close();
delete mCallbackData;
}
@@ -524,13 +521,16 @@
status_t MediaPlayer2AudioOutput::addAudioDeviceCallback(jobject jRoutingDelegate) {
ALOGV("addAudioDeviceCallback");
Mutex::Autolock lock(mLock);
- jobject listener = JAudioTrack::getListener(jRoutingDelegate);
- if (mJAudioTrack != nullptr &&
- JAudioTrack::findByKey(mRoutingDelegates, listener) == nullptr) {
- jobject handler = JAudioTrack::getHandler(jRoutingDelegate);
- jobject routingDelegate = JAudioTrack::addGlobalRef(jRoutingDelegate);
+ jobject listener = (new JObjectHolder(
+ JAudioTrack::getListener(jRoutingDelegate)))->getJObject();
+ if (JAudioTrack::findByKey(mRoutingDelegates, listener) == nullptr) {
+ jobject handler = (new JObjectHolder(
+ JAudioTrack::getHandler(jRoutingDelegate)))->getJObject();
+ jobject routingDelegate = (new JObjectHolder(jRoutingDelegate))->getJObject();
mRoutingDelegates.push_back(std::pair<jobject, jobject>(listener, routingDelegate));
- return mJAudioTrack->addAudioDeviceCallback(routingDelegate, handler);
+ if (mJAudioTrack != nullptr) {
+ return mJAudioTrack->addAudioDeviceCallback(routingDelegate, handler);
+ }
}
return NO_ERROR;
}
@@ -539,13 +539,11 @@
ALOGV("removeAudioDeviceCallback");
Mutex::Autolock lock(mLock);
jobject routingDelegate = nullptr;
- if (mJAudioTrack != nullptr &&
- (routingDelegate = JAudioTrack::findByKey(mRoutingDelegates, listener)) != nullptr) {
- mJAudioTrack->removeAudioDeviceCallback(routingDelegate);
- JAudioTrack::eraseByKey(mRoutingDelegates, listener);
- if (JAudioTrack::removeGlobalRef(routingDelegate) != NO_ERROR) {
- return BAD_VALUE;
+ if ((routingDelegate = JAudioTrack::findByKey(mRoutingDelegates, listener)) != nullptr) {
+ if (mJAudioTrack != nullptr) {
+ mJAudioTrack->removeAudioDeviceCallback(routingDelegate);
}
+ JAudioTrack::eraseByKey(mRoutingDelegates, listener);
}
return NO_ERROR;
}
diff --git a/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h b/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
index 87dc889..7381286 100644
--- a/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
+++ b/media/libmediaplayer2/include/mediaplayer2/JAudioTrack.h
@@ -17,7 +17,6 @@
#ifndef ANDROID_JAUDIOTRACK_H
#define ANDROID_JAUDIOTRACK_H
-#include <vector>
#include <utility>
#include <jni.h>
#include <media/AudioResamplerPublic.h>
@@ -25,6 +24,7 @@
#include <media/VolumeShaper.h>
#include <system/audio.h>
#include <utils/Errors.h>
+#include <utils/Vector.h>
#include <mediaplayer2/JObjectHolder.h>
#include <media/AudioTimestamp.h> // It has dependency on audio.h/Errors.h, but doesn't
// include them in it. Therefore it is included here at last.
@@ -405,7 +405,7 @@
* routingDelegates: backed-up routing delegates
*
*/
- void registerRoutingDelegates(std::vector<std::pair<jobject, jobject>>& routingDelegates);
+ void registerRoutingDelegates(Vector<std::pair<jobject, jobject>>& routingDelegates);
/* get listener from RoutingDelegate object
*/
@@ -415,17 +415,6 @@
*/
static jobject getHandler(const jobject routingDelegateObj);
- /* convert local reference to global reference.
- */
- static jobject addGlobalRef(const jobject obj);
-
- /* erase global reference.
- *
- * Returns NO_ERROR if succeeds
- * BAD_VALUE if obj is NULL
- */
- static status_t removeGlobalRef(const jobject obj);
-
/*
* Parameters:
* map and key
@@ -433,13 +422,13 @@
* Returns value if key is in the map
* nullptr if key is not in the map
*/
- static jobject findByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key);
+ static jobject findByKey(Vector<std::pair<jobject, jobject>>& mp, const jobject key);
/*
* Parameters:
* map and key
*/
- static void eraseByKey(std::vector<std::pair<jobject, jobject>>& mp, const jobject key);
+ static void eraseByKey(Vector<std::pair<jobject, jobject>>& mp, const jobject key);
private:
audio_output_flags_t mFlags;
diff --git a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
index bda4f61..1b3f2dc 100644
--- a/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
+++ b/media/libmediaplayer2/include/mediaplayer2/MediaPlayer2AudioOutput.h
@@ -22,7 +22,6 @@
#include <mediaplayer2/JAudioTrack.h>
#include <mediaplayer2/JObjectHolder.h>
-#include <vector>
#include <utility>
#include <utils/String16.h>
#include <utils/Vector.h>
@@ -125,7 +124,7 @@
audio_output_flags_t mFlags;
sp<JObjectHolder> mPreferredDevice;
mutable Mutex mLock;
- std::vector<std::pair<jobject, jobject>> mRoutingDelegates; // <listener, routingDelegate>
+ Vector<std::pair<jobject, jobject>> mRoutingDelegates; // <listener, routingDelegate>
// static variables below not protected by mutex
static bool mIsOnEmulator;
diff --git a/media/libmediaplayerservice/Android.bp b/media/libmediaplayerservice/Android.bp
index 22fa495..46a1c24 100644
--- a/media/libmediaplayerservice/Android.bp
+++ b/media/libmediaplayerservice/Android.bp
@@ -64,8 +64,6 @@
name: "libmediaplayerservice",
- compile_multilib: "32",
-
sanitize: {
cfi: true,
},
diff --git a/media/libmediaplayerservice/MediaRecorderClient.cpp b/media/libmediaplayerservice/MediaRecorderClient.cpp
index 3fa8e3f..d6628d9 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.cpp
+++ b/media/libmediaplayerservice/MediaRecorderClient.cpp
@@ -538,6 +538,22 @@
return NO_INIT;
}
+status_t MediaRecorderClient::setMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setMicrophoneDirection(%d)", direction);
+ if (mRecorder != NULL) {
+ return mRecorder->setMicrophoneDirection(direction);
+ }
+ return NO_INIT;
+}
+
+status_t MediaRecorderClient::setMicrophoneFieldDimension(float zoom) {
+ ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+ if (mRecorder != NULL) {
+ return mRecorder->setMicrophoneFieldDimension(zoom);
+ }
+ return NO_INIT;
+}
+
status_t MediaRecorderClient::getPortId(audio_port_handle_t *portId) {
ALOGV("getPortId");
Mutex::Autolock lock(mLock);
diff --git a/media/libmediaplayerservice/MediaRecorderClient.h b/media/libmediaplayerservice/MediaRecorderClient.h
index 303cefc..8da718f 100644
--- a/media/libmediaplayerservice/MediaRecorderClient.h
+++ b/media/libmediaplayerservice/MediaRecorderClient.h
@@ -109,6 +109,8 @@
virtual status_t enableAudioDeviceCallback(bool enabled);
virtual status_t getActiveMicrophones(
std::vector<media::MicrophoneInfo>* activeMicrophones);
+ virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
+ virtual status_t setMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) override;
private:
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index 51bd093..624f596 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -118,7 +118,9 @@
mVideoSource(VIDEO_SOURCE_LIST_END),
mStarted(false),
mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mDeviceCallbackEnabled(false) {
+ mDeviceCallbackEnabled(false),
+ mSelectedMicDirection(MIC_DIRECTION_UNSPECIFIED),
+ mSelectedMicFieldDimension(MIC_FIELD_DIMENSION_NORMAL) {
ALOGV("Constructor");
@@ -1089,7 +1091,9 @@
mSampleRate,
mClientUid,
mClientPid,
- mSelectedDeviceId);
+ mSelectedDeviceId,
+ mSelectedMicDirection,
+ mSelectedMicFieldDimension);
status_t err = audioSource->initCheck();
@@ -2268,6 +2272,24 @@
return NO_INIT;
}
+status_t StagefrightRecorder::setMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setMicrophoneDirection(%d)", direction);
+ mSelectedMicDirection = direction;
+ if (mAudioSourceNode != 0) {
+ return mAudioSourceNode->setMicrophoneDirection(direction);
+ }
+ return NO_INIT;
+}
+
+status_t StagefrightRecorder::setMicrophoneFieldDimension(float zoom) {
+ ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+ mSelectedMicFieldDimension = zoom;
+ if (mAudioSourceNode != 0) {
+ return mAudioSourceNode->setMicrophoneFieldDimension(zoom);
+ }
+ return NO_INIT;
+}
+
status_t StagefrightRecorder::getPortId(audio_port_handle_t *portId) const {
if (mAudioSourceNode != 0) {
return mAudioSourceNode->getPortId(portId);
diff --git a/media/libmediaplayerservice/StagefrightRecorder.h b/media/libmediaplayerservice/StagefrightRecorder.h
index a292e58..236b19e 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.h
+++ b/media/libmediaplayerservice/StagefrightRecorder.h
@@ -77,6 +77,8 @@
virtual void setAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
virtual status_t enableAudioDeviceCallback(bool enabled);
virtual status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+ virtual status_t setMicrophoneDirection(audio_microphone_direction_t direction);
+ virtual status_t setMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const override;
private:
@@ -159,6 +161,9 @@
bool mDeviceCallbackEnabled;
wp<AudioSystem::AudioDeviceCallback> mAudioDeviceCallback;
+ audio_microphone_direction_t mSelectedMicDirection;
+ float mSelectedMicFieldDimension;
+
static const int kMaxHighSpeedFps = 1000;
status_t prepareInternal();
diff --git a/media/libmediaplayerservice/tests/Android.bp b/media/libmediaplayerservice/tests/Android.bp
index 4749a8b..f8c89e5 100644
--- a/media/libmediaplayerservice/tests/Android.bp
+++ b/media/libmediaplayerservice/tests/Android.bp
@@ -14,8 +14,6 @@
"android.hardware.drm@1.2",
],
- compile_multilib: "32",
-
cflags: [
"-Werror",
"-Wall",
diff --git a/media/libstagefright/AACWriter.cpp b/media/libstagefright/AACWriter.cpp
index 2ea5fcd..e173974 100644
--- a/media/libstagefright/AACWriter.cpp
+++ b/media/libstagefright/AACWriter.cpp
@@ -85,7 +85,7 @@
CHECK(!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC));
CHECK(meta->findInt32(kKeyChannelCount, &mChannelCount));
CHECK(meta->findInt32(kKeySampleRate, &mSampleRate));
- CHECK(mChannelCount >= 1 && mChannelCount <= 2);
+ CHECK(mChannelCount >= 1 && mChannelCount <= 7);
// Optionally, we want to check whether AACProfile is also set.
if (meta->findInt32(kKeyAACProfile, &mAACProfile)) {
@@ -154,11 +154,11 @@
mDone = true;
void *dummy;
+ status_t status = mSource->stop();
pthread_join(mThread, &dummy);
status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
{
- status_t status = mSource->stop();
if (err == OK &&
(status != OK && status != ERROR_END_OF_STREAM)) {
err = status;
diff --git a/media/libstagefright/ACodec.cpp b/media/libstagefright/ACodec.cpp
index 9d3338b..f00c895 100644
--- a/media/libstagefright/ACodec.cpp
+++ b/media/libstagefright/ACodec.cpp
@@ -37,6 +37,7 @@
#include <media/stagefright/BufferProducerWrapper.h>
#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/OMXClient.h>
#include <media/stagefright/PersistentSurface.h>
@@ -1817,20 +1818,19 @@
}
if (!msg->findInt64(
- "repeat-previous-frame-after",
- &mRepeatFrameDelayUs)) {
+ KEY_REPEAT_PREVIOUS_FRAME_AFTER, &mRepeatFrameDelayUs)) {
mRepeatFrameDelayUs = -1LL;
}
// only allow 32-bit value, since we pass it as U32 to OMX.
- if (!msg->findInt64("max-pts-gap-to-encoder", &mMaxPtsGapUs)) {
+ if (!msg->findInt64(KEY_MAX_PTS_GAP_TO_ENCODER, &mMaxPtsGapUs)) {
mMaxPtsGapUs = 0LL;
} else if (mMaxPtsGapUs > INT32_MAX || mMaxPtsGapUs < INT32_MIN) {
ALOGW("Unsupported value for max pts gap %lld", (long long) mMaxPtsGapUs);
mMaxPtsGapUs = 0LL;
}
- if (!msg->findFloat("max-fps-to-encoder", &mMaxFps)) {
+ if (!msg->findFloat(KEY_MAX_FPS_TO_ENCODER, &mMaxFps)) {
mMaxFps = -1;
}
@@ -1844,8 +1844,8 @@
}
if (!msg->findInt32(
- "create-input-buffers-suspended",
- (int32_t*)&mCreateInputBuffersSuspended)) {
+ KEY_CREATE_INPUT_SURFACE_SUSPENDED,
+ (int32_t*)&mCreateInputBuffersSuspended)) {
mCreateInputBuffersSuspended = false;
}
}
@@ -6667,7 +6667,7 @@
return false;
}
- mDeathNotifier = new DeathNotifier(notify);
+ mDeathNotifier = new DeathNotifier(new AMessage(kWhatOMXDied, mCodec));
auto tOmxNode = omxNode->getHalInterface<IOmxNode>();
if (tOmxNode && !tOmxNode->linkToDeath(mDeathNotifier, 0)) {
mDeathNotifier.clear();
@@ -7431,7 +7431,7 @@
}
int64_t timeOffsetUs;
- if (params->findInt64("time-offset-us", &timeOffsetUs)) {
+ if (params->findInt64(PARAMETER_KEY_OFFSET_TIME, &timeOffsetUs)) {
if (mGraphicBufferSource == NULL) {
ALOGE("[%s] Invalid to set input buffer time offset without surface",
mComponentName.c_str());
@@ -7467,7 +7467,7 @@
}
int32_t dropInputFrames;
- if (params->findInt32("drop-input-frames", &dropInputFrames)) {
+ if (params->findInt32(PARAMETER_KEY_SUSPEND, &dropInputFrames)) {
if (mGraphicBufferSource == NULL) {
ALOGE("[%s] Invalid to set suspend without surface",
mComponentName.c_str());
@@ -7475,7 +7475,7 @@
}
int64_t suspendStartTimeUs = -1;
- (void) params->findInt64("drop-start-time-us", &suspendStartTimeUs);
+ (void) params->findInt64(PARAMETER_KEY_SUSPEND_TIME, &suspendStartTimeUs);
status_t err = statusFromBinderStatus(
mGraphicBufferSource->setSuspend(dropInputFrames != 0, suspendStartTimeUs));
diff --git a/media/libstagefright/AMRWriter.cpp b/media/libstagefright/AMRWriter.cpp
index 41106a1..24f6f0b 100644
--- a/media/libstagefright/AMRWriter.cpp
+++ b/media/libstagefright/AMRWriter.cpp
@@ -149,11 +149,11 @@
mDone = true;
void *dummy;
+ status_t status = mSource->stop();
pthread_join(mThread, &dummy);
status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
{
- status_t status = mSource->stop();
if (err == OK &&
(status != OK && status != ERROR_END_OF_STREAM)) {
err = status;
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 488890d..d8b825d 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -199,7 +199,6 @@
"libhidlallocatorutils",
"libhidlbase",
"libhidlmemory",
- "libziparchive",
"android.hidl.allocator@1.0",
"android.hardware.cas.native@1.0",
"android.hardware.media.omx@1.0",
@@ -265,9 +264,11 @@
srcs: [
"ClearFileSource.cpp",
"DataURISource.cpp",
+ "DataSourceBase.cpp",
"HTTPBase.cpp",
"HevcUtils.cpp",
"MediaClock.cpp",
+ "MediaSource.cpp",
"NdkUtils.cpp",
"Utils.cpp",
"VideoFrameSchedulerBase.cpp",
diff --git a/media/libstagefright/AudioSource.cpp b/media/libstagefright/AudioSource.cpp
index 5027303..5f86bd3 100644
--- a/media/libstagefright/AudioSource.cpp
+++ b/media/libstagefright/AudioSource.cpp
@@ -52,7 +52,9 @@
AudioSource::AudioSource(
audio_source_t inputSource, const String16 &opPackageName,
uint32_t sampleRate, uint32_t channelCount, uint32_t outSampleRate,
- uid_t uid, pid_t pid, audio_port_handle_t selectedDeviceId)
+ uid_t uid, pid_t pid, audio_port_handle_t selectedDeviceId,
+ audio_microphone_direction_t selectedMicDirection,
+ float selectedMicFieldDimension)
: mStarted(false),
mSampleRate(sampleRate),
mOutSampleRate(outSampleRate > 0 ? outSampleRate : sampleRate),
@@ -103,7 +105,9 @@
uid,
pid,
NULL /*pAttributes*/,
- selectedDeviceId);
+ selectedDeviceId,
+ selectedMicDirection,
+ selectedMicFieldDimension);
mInitCheck = mRecord->initCheck();
if (mInitCheck != OK) {
mRecord.clear();
@@ -506,6 +510,22 @@
return NO_INIT;
}
+status_t AudioSource::setMicrophoneDirection(audio_microphone_direction_t direction) {
+ ALOGV("setMicrophoneDirection(%d)", direction);
+ if (mRecord != 0) {
+ return mRecord->setMicrophoneDirection(direction);
+ }
+ return NO_INIT;
+}
+
+status_t AudioSource::setMicrophoneFieldDimension(float zoom) {
+ ALOGV("setMicrophoneFieldDimension(%f)", zoom);
+ if (mRecord != 0) {
+ return mRecord->setMicrophoneFieldDimension(zoom);
+ }
+ return NO_INIT;
+}
+
status_t AudioSource::getPortId(audio_port_handle_t *portId) const {
if (mRecord != 0) {
*portId = mRecord->getPortId();
diff --git a/media/libstagefright/HevcUtils.cpp b/media/libstagefright/HevcUtils.cpp
index 0c38f2e..482a1a7 100644
--- a/media/libstagefright/HevcUtils.cpp
+++ b/media/libstagefright/HevcUtils.cpp
@@ -312,14 +312,23 @@
for (uint32_t j = 0; j < numPics; ++j) {
skipUE(&reader); // delta_poc_s0|1_minus1[i]
reader.skipBits(1); // used_by_curr_pic_s0|1_flag[i]
+ if (reader.overRead()) {
+ return ERROR_MALFORMED;
+ }
}
}
+ if (reader.overRead()) {
+ return ERROR_MALFORMED;
+ }
}
if (reader.getBitsWithFallback(1, 0)) { // long_term_ref_pics_present_flag
uint32_t numLongTermRefPicSps = parseUEWithFallback(&reader, 0);
for (uint32_t i = 0; i < numLongTermRefPicSps; ++i) {
reader.skipBits(log2MaxPicOrderCntLsb); // lt_ref_pic_poc_lsb_sps[i]
reader.skipBits(1); // used_by_curr_pic_lt_sps_flag[i]
+ if (reader.overRead()) {
+ return ERROR_MALFORMED;
+ }
}
}
reader.skipBits(1); // sps_temporal_mvp_enabled_flag
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 72cdd05..a52da45 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -3132,8 +3132,8 @@
if (!mIsHeic) {
if (mStszTableEntries->count() == 0) {
mFirstSampleTimeRealUs = systemTime() / 1000;
+ mOwner->setStartTimestampUs(timestampUs);
mStartTimestampUs = timestampUs;
- mOwner->setStartTimestampUs(mStartTimestampUs);
previousPausedDurationUs = mStartTimestampUs;
}
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 9c58e05..d4e4000 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -1973,10 +1973,11 @@
case kWhatComponentConfigured:
{
- if (mState == UNINITIALIZED || mState == INITIALIZED) {
- // In case a kWhatError message came in and replied with error,
+ if (mState == RELEASING || mState == UNINITIALIZED || mState == INITIALIZED) {
+ // In case a kWhatError or kWhatRelease message came in and replied,
// we log a warning and ignore.
- ALOGW("configure interrupted by error, current state %d", mState);
+ ALOGW("configure interrupted by error or release, current state %d",
+ mState);
break;
}
CHECK_EQ(mState, CONFIGURING);
@@ -2067,6 +2068,13 @@
case kWhatStartCompleted:
{
+ if (mState == RELEASING || mState == UNINITIALIZED) {
+ // In case a kWhatRelease message came in and replied,
+ // we log a warning and ignore.
+ ALOGW("start interrupted by release, current state %d", mState);
+ break;
+ }
+
CHECK_EQ(mState, STARTING);
if (mIsVideo) {
addResource(
@@ -2632,11 +2640,12 @@
break;
}
- // If we're flushing, or we're stopping but received a release
- // request, post the reply for the pending call first, and consider
- // it done. The reply token will be replaced after this, and we'll
- // no longer be able to reply.
- if (mState == FLUSHING || mState == STOPPING) {
+ // If we're flushing, stopping, configuring or starting but
+ // received a release request, post the reply for the pending call
+ // first, and consider it done. The reply token will be replaced
+ // after this, and we'll no longer be able to reply.
+ if (mState == FLUSHING || mState == STOPPING
+ || mState == CONFIGURING || mState == STARTING) {
(new AMessage)->postReply(mReplyID);
}
diff --git a/media/libstagefright/MediaCodecSource.cpp b/media/libstagefright/MediaCodecSource.cpp
index 5d2291f..c3d85ee 100644
--- a/media/libstagefright/MediaCodecSource.cpp
+++ b/media/libstagefright/MediaCodecSource.cpp
@@ -32,6 +32,7 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaCodecList.h>
#include <media/stagefright/MediaCodecSource.h>
#include <media/stagefright/MediaErrors.h>
@@ -362,7 +363,7 @@
status_t MediaCodecSource::setInputBufferTimeOffset(int64_t timeOffsetUs) {
sp<AMessage> msg = new AMessage(kWhatSetInputBufferTimeOffset, mReflector);
- msg->setInt64("time-offset-us", timeOffsetUs);
+ msg->setInt64(PARAMETER_KEY_OFFSET_TIME, timeOffsetUs);
return postSynchronouslyAndReturnError(msg);
}
@@ -490,7 +491,7 @@
mCodecLooper->start();
if (mFlags & FLAG_USE_SURFACE_INPUT) {
- mOutputFormat->setInt32("create-input-buffers-suspended", 1);
+ mOutputFormat->setInt32(KEY_CREATE_INPUT_SURFACE_SUSPENDED, 1);
}
AString outputMIME;
@@ -677,9 +678,9 @@
CHECK(mFlags & FLAG_USE_SURFACE_INPUT);
if (mEncoder != NULL) {
sp<AMessage> params = new AMessage;
- params->setInt32("drop-input-frames", false);
+ params->setInt32(PARAMETER_KEY_SUSPEND, false);
if (resumeStartTimeUs > 0) {
- params->setInt64("drop-start-time-us", resumeStartTimeUs);
+ params->setInt64(PARAMETER_KEY_SUSPEND_TIME, resumeStartTimeUs);
}
mEncoder->setParameters(params);
}
@@ -799,7 +800,7 @@
if (mFlags & FLAG_USE_SURFACE_INPUT) {
if (mEncoder != NULL) {
sp<AMessage> params = new AMessage;
- params->setInt32("drop-input-frames", false);
+ params->setInt32(PARAMETER_KEY_SUSPEND, false);
if (startTimeUs >= 0) {
params->setInt64("skip-frames-before", startTimeUs);
}
@@ -832,8 +833,8 @@
void MediaCodecSource::onPause(int64_t pauseStartTimeUs) {
if ((mFlags & FLAG_USE_SURFACE_INPUT) && (mEncoder != NULL)) {
sp<AMessage> params = new AMessage;
- params->setInt32("drop-input-frames", true);
- params->setInt64("drop-start-time-us", pauseStartTimeUs);
+ params->setInt32(PARAMETER_KEY_SUSPEND, true);
+ params->setInt64(PARAMETER_KEY_SUSPEND_TIME, pauseStartTimeUs);
mEncoder->setParameters(params);
} else {
CHECK(mPuller != NULL);
@@ -1096,12 +1097,12 @@
sp<AReplyToken> replyID;
CHECK(msg->senderAwaitsResponse(&replyID));
status_t err = OK;
- CHECK(msg->findInt64("time-offset-us", &mInputBufferTimeOffsetUs));
+ CHECK(msg->findInt64(PARAMETER_KEY_OFFSET_TIME, &mInputBufferTimeOffsetUs));
// Propagate the timestamp offset to GraphicBufferSource.
if (mFlags & FLAG_USE_SURFACE_INPUT) {
sp<AMessage> params = new AMessage;
- params->setInt64("time-offset-us", mInputBufferTimeOffsetUs);
+ params->setInt64(PARAMETER_KEY_OFFSET_TIME, mInputBufferTimeOffsetUs);
err = mEncoder->setParameters(params);
}
diff --git a/media/libstagefright/MediaExtractorFactory.cpp b/media/libstagefright/MediaExtractorFactory.cpp
index a938d51..a309ee4 100644
--- a/media/libstagefright/MediaExtractorFactory.cpp
+++ b/media/libstagefright/MediaExtractorFactory.cpp
@@ -23,8 +23,6 @@
#include <binder/PermissionCache.h>
#include <binder/IServiceManager.h>
#include <media/DataSource.h>
-#include <media/MediaAnalyticsItem.h>
-#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/InterfaceUtils.h>
#include <media/stagefright/MediaExtractor.h>
#include <media/stagefright/MediaExtractorFactory.h>
@@ -34,7 +32,6 @@
#include <private/android_filesystem_config.h>
#include <cutils/properties.h>
#include <utils/String8.h>
-#include <ziparchive/zip_archive.h>
#include <dirent.h>
#include <dlfcn.h>
@@ -130,13 +127,6 @@
std::shared_ptr<std::list<sp<ExtractorPlugin>>> MediaExtractorFactory::gPlugins;
bool MediaExtractorFactory::gPluginsRegistered = false;
bool MediaExtractorFactory::gIgnoreVersion = false;
-std::string MediaExtractorFactory::gLinkedLibraries;
-
-// static
-void MediaExtractorFactory::SetLinkedLibraries(const std::string& linkedLibraries) {
- Mutex::Autolock autoLock(gPluginMutex);
- gLinkedLibraries = linkedLibraries;
-}
// static
void *MediaExtractorFactory::sniff(
@@ -235,9 +225,11 @@
}
//static
-void MediaExtractorFactory::RegisterExtractorsInSystem(
- const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList) {
+void MediaExtractorFactory::RegisterExtractors(
+ const char *libDirPath, const android_dlextinfo* dlextinfo,
+ std::list<sp<ExtractorPlugin>> &pluginList) {
ALOGV("search for plugins at %s", libDirPath);
+
DIR *libDir = opendir(libDirPath);
if (libDir) {
struct dirent* libEntry;
@@ -246,73 +238,12 @@
continue;
}
String8 libPath = String8(libDirPath) + "/" + libEntry->d_name;
- void *libHandle = dlopen(libPath.string(), RTLD_NOW | RTLD_LOCAL);
- if (libHandle) {
- GetExtractorDef getDef =
- (GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
- if (getDef) {
- ALOGV("registering sniffer for %s", libPath.string());
- RegisterExtractor(
- new ExtractorPlugin(getDef(), libHandle, libPath), pluginList);
- } else {
- ALOGW("%s does not contain sniffer", libPath.string());
- dlclose(libHandle);
- }
- } else {
- ALOGW("couldn't dlopen(%s) %s", libPath.string(), strerror(errno));
- }
- }
-
- closedir(libDir);
- } else {
- ALOGE("couldn't opendir(%s)", libDirPath);
- }
-}
-
-//static
-void MediaExtractorFactory::RegisterExtractorsInApex(
- const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList) {
- ALOGV("search for plugins at %s", libDirPath);
- ALOGV("linked libs %s", gLinkedLibraries.c_str());
-
- std::string libDirPathEx = libDirPath;
- libDirPathEx += "/extractors";
- android_namespace_t *extractorNs = android_create_namespace("extractor",
- nullptr, // ld_library_path
- libDirPath, // default_library_path
- ANDROID_NAMESPACE_TYPE_ISOLATED,
- libDirPathEx.c_str(), // permitted_when_isolated_path
- nullptr); // parent
- if (!android_link_namespaces(extractorNs, nullptr, gLinkedLibraries.c_str())) {
- ALOGE("Failed to link namespace. Failed to load extractor plug-ins in apex.");
- return;
- }
- const android_dlextinfo dlextinfo = {
- .flags = ANDROID_DLEXT_USE_NAMESPACE,
- .library_namespace = extractorNs,
- };
-
- // try extractors subfolder first
- DIR *libDir = opendir(libDirPathEx.c_str());
-
- if (libDir) {
- libDirPath = libDirPathEx.c_str();
- } else {
- libDir = opendir(libDirPath);
- }
- if (libDir) {
- struct dirent* libEntry;
- while ((libEntry = readdir(libDir))) {
- if (libEntry->d_name[0] == '.') {
- continue;
- }
- String8 libPath = String8(libDirPath) + "/" + libEntry->d_name;
if (!libPath.contains("extractor.so")) {
continue;
}
void *libHandle = android_dlopen_ext(
libPath.string(),
- RTLD_NOW | RTLD_LOCAL, &dlextinfo);
+ RTLD_NOW | RTLD_LOCAL, dlextinfo);
if (libHandle) {
GetExtractorDef getDef =
(GetExtractorDef) dlsym(libHandle, "GETEXTRACTORDEF");
@@ -352,17 +283,27 @@
std::shared_ptr<std::list<sp<ExtractorPlugin>>> newList(new std::list<sp<ExtractorPlugin>>());
- RegisterExtractorsInApex("/apex/com.android.media/lib"
+ android_namespace_t *mediaNs = android_get_exported_namespace("media");
+ if (mediaNs != NULL) {
+ const android_dlextinfo dlextinfo = {
+ .flags = ANDROID_DLEXT_USE_NAMESPACE,
+ .library_namespace = mediaNs,
+ };
+ RegisterExtractors("/apex/com.android.media/lib"
#ifdef __LP64__
- "64"
+ "64"
#endif
- , *newList);
+ "/extractors", &dlextinfo, *newList);
- RegisterExtractorsInSystem("/system/lib"
+ } else {
+ ALOGE("couldn't find media namespace.");
+ }
+
+ RegisterExtractors("/system/lib"
#ifdef __LP64__
"64"
#endif
- "/extractors", *newList);
+ "/extractors", NULL, *newList);
newList->sort(compareFunc);
gPlugins = newList;
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
index 5c13983..cb87b55 100644
--- a/media/libstagefright/OggWriter.cpp
+++ b/media/libstagefright/OggWriter.cpp
@@ -295,6 +295,18 @@
mEstimatedSizeBytes, mMaxFileSizeLimitBytes);
break;
}
+
+ int32_t isCodecSpecific;
+ if ((buffer->meta_data().findInt32(kKeyIsCodecConfig, &isCodecSpecific)
+ && isCodecSpecific)
+ || IsOpusHeader((uint8_t*)buffer->data() + buffer->range_offset(),
+ buffer->range_length())) {
+ ALOGV("Drop codec specific info buffer");
+ buffer->release();
+ buffer = nullptr;
+ continue;
+ }
+
int64_t timestampUs;
CHECK(buffer->meta_data().findInt64(kKeyTime, ×tampUs));
if (timestampUs > mEstimatedDurationUs) {
diff --git a/media/libstagefright/VideoFrameScheduler2.cpp b/media/libstagefright/VideoFrameScheduler2.cpp
index fc76904..23671f2 100644
--- a/media/libstagefright/VideoFrameScheduler2.cpp
+++ b/media/libstagefright/VideoFrameScheduler2.cpp
@@ -160,13 +160,13 @@
mPhase = (long) (atan2(sampleAvgY, sampleAvgX) / scale);
}
-static void frameCallback(long frameTimeNanos, void* data) {
+static void frameCallback(int64_t frameTimeNanos, void* data) {
if (data == NULL) {
return;
}
sp<VsyncTracker> vsyncTracker(static_cast<VsyncTracker*>(data));
vsyncTracker->addSample(frameTimeNanos);
- AChoreographer_postFrameCallback(AChoreographer_getInstance(),
+ AChoreographer_postFrameCallback64(AChoreographer_getInstance(),
frameCallback, static_cast<void*>(vsyncTracker.get()));
}
@@ -247,7 +247,7 @@
if (AChoreographer_getInstance() == NULL) {
return NO_INIT;
}
- AChoreographer_postFrameCallback(AChoreographer_getInstance(), frameCallback, mData);
+ AChoreographer_postFrameCallback64(AChoreographer_getInstance(), frameCallback, mData);
return OK;
}
diff --git a/media/libstagefright/codecs/m4v_h263/enc/test/m4v_h263_enc_test.cpp b/media/libstagefright/codecs/m4v_h263/enc/test/m4v_h263_enc_test.cpp
index db2c61a..5554ebd 100644
--- a/media/libstagefright/codecs/m4v_h263/enc/test/m4v_h263_enc_test.cpp
+++ b/media/libstagefright/codecs/m4v_h263/enc/test/m4v_h263_enc_test.cpp
@@ -183,6 +183,10 @@
// Initialize the encoder.
if (!PVInitVideoEncoder(&handle, &encParams)) {
fprintf(stderr, "Failed to initialize the encoder\n");
+ fclose(fpInput);
+ fclose(fpOutput);
+ free(inputBuf);
+ free(outputBuf);
return EXIT_FAILURE;
}
@@ -190,6 +194,10 @@
int32_t headerLength = kOutputBufferSize;
if (!PVGetVolHeader(&handle, outputBuf, &headerLength, 0)) {
fprintf(stderr, "Failed to get VOL header\n");
+ fclose(fpInput);
+ fclose(fpOutput);
+ free(inputBuf);
+ free(outputBuf);
return EXIT_FAILURE;
}
fwrite(outputBuf, 1, headerLength, fpOutput);
diff --git a/media/libstagefright/codecs/raw/SoftRaw.cpp b/media/libstagefright/codecs/raw/SoftRaw.cpp
index 1a527b3..0e31804 100644
--- a/media/libstagefright/codecs/raw/SoftRaw.cpp
+++ b/media/libstagefright/codecs/raw/SoftRaw.cpp
@@ -60,7 +60,7 @@
def.eDir = OMX_DirInput;
def.nBufferCountMin = kNumBuffers;
def.nBufferCountActual = def.nBufferCountMin;
- def.nBufferSize = 64 * 1024;
+ def.nBufferSize = 192 * 1024;
def.bEnabled = OMX_TRUE;
def.bPopulated = OMX_FALSE;
def.eDomain = OMX_PortDomainAudio;
@@ -78,7 +78,7 @@
def.eDir = OMX_DirOutput;
def.nBufferCountMin = kNumBuffers;
def.nBufferCountActual = def.nBufferCountMin;
- def.nBufferSize = 64 * 1024;
+ def.nBufferSize = 192 * 1024;
def.bEnabled = OMX_TRUE;
def.bPopulated = OMX_FALSE;
def.eDomain = OMX_PortDomainAudio;
diff --git a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
index 8a86a0d..da86758 100644
--- a/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
+++ b/media/libstagefright/codecs/xaacdec/SoftXAAC.cpp
@@ -1178,6 +1178,8 @@
int i_target_loudness;
unsigned int i_sbr_mode;
int i;
+ int ui_proc_mem_tabs_size = 0;
+ pVOID pv_alloc_ptr = NULL;
#ifdef ENABLE_MPEG_D_DRC
{
@@ -1228,6 +1230,29 @@
IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE, &i_sbr_mode);
RETURN_IF_FATAL(err_code, "IA_ENHAACPLUS_DEC_CONFIG_PARAM_SBR_MODE");
+ /* Get memory info tables size */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_GET_MEMTABS_SIZE, 0,
+ &ui_proc_mem_tabs_size);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_GET_MEMTABS_SIZE");
+
+ pv_alloc_ptr = memalign(4, ui_proc_mem_tabs_size);
+
+ if (pv_alloc_ptr == NULL) {
+ ALOGE("Cannot create requested memory %d", ui_proc_mem_tabs_size);
+ return IA_FATAL_ERROR;
+ }
+
+ memset(pv_alloc_ptr, 0, ui_proc_mem_tabs_size);
+
+ mMemoryVec.push(pv_alloc_ptr);
+
+ /* Set pointer for process memory tables */
+ err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_SET_MEMTABS_PTR, 0,
+ pv_alloc_ptr);
+
+ RETURN_IF_FATAL(err_code, "IA_API_CMD_SET_MEMTABS_PTR");
+
err_code = ia_drc_dec_api(mMpegDDrcHandle, IA_API_CMD_INIT,
IA_CMD_TYPE_INIT_API_POST_CONFIG_PARAMS, NULL);
diff --git a/media/libstagefright/foundation/MediaDefs.cpp b/media/libstagefright/foundation/MediaDefs.cpp
index 9d1ec1f..52b2765 100644
--- a/media/libstagefright/foundation/MediaDefs.cpp
+++ b/media/libstagefright/foundation/MediaDefs.cpp
@@ -32,6 +32,10 @@
const char *MEDIA_MIMETYPE_VIDEO_RAW = "video/raw";
const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION = "video/dolby-vision";
const char *MEDIA_MIMETYPE_VIDEO_SCRAMBLED = "video/scrambled";
+const char *MEDIA_MIMETYPE_VIDEO_DIVX = "video/divx";
+const char *MEDIA_MIMETYPE_VIDEO_DIVX3 = "video/divx3";
+const char *MEDIA_MIMETYPE_VIDEO_XVID = "video/xvid";
+const char *MEDIA_MIMETYPE_VIDEO_MJPEG = "video/x-motion-jpeg";
const char *MEDIA_MIMETYPE_AUDIO_AMR_NB = "audio/3gpp";
const char *MEDIA_MIMETYPE_AUDIO_AMR_WB = "audio/amr-wb";
diff --git a/media/libstagefright/foundation/OpusHeader.cpp b/media/libstagefright/foundation/OpusHeader.cpp
index 9faede1..acb9ccf 100644
--- a/media/libstagefright/foundation/OpusHeader.cpp
+++ b/media/libstagefright/foundation/OpusHeader.cpp
@@ -15,9 +15,9 @@
*/
//#define LOG_NDEBUG 0
-#define LOG_TAG "SoftOpus"
-#include <algorithm>
+#define LOG_TAG "OpusHeader"
#include <cstring>
+#include <inttypes.h>
#include <stdint.h>
#include <log/log.h>
@@ -91,6 +91,9 @@
// Parses Opus Header. Header spec: http://wiki.xiph.org/OggOpus#ID_Header
bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header) {
+ if (data == NULL) {
+ return false;
+ }
if (data_size < kOpusHeaderSize) {
ALOGV("Header size is too small.");
return false;
@@ -183,53 +186,88 @@
ALOGD("Buffer not large enough to hold unified OPUS CSD");
return -1;
}
+ int headerLen = 0;
- int headerLen = WriteOpusHeader(header, inputSampleRate, output,
+ // Add opus header
+ /*
+ Following is the CSD syntax for signalling OpusHeader
+ (http://wiki.xiph.org/OggOpus#ID_Header)
+
+ Marker (8 bytes) | Length (8 bytes) | OpusHeader
+
+ Markers supported:
+ AOPUS_CSD_OPUS_HEADER_MARKER - Signals Opus Header
+
+ Length should be a value within AOPUS_OPUSHEAD_MINSIZE and AOPUS_OPUSHEAD_MAXSIZE.
+ */
+
+ memcpy(output + headerLen, AOPUS_CSD_OPUS_HEADER_MARKER, AOPUS_MARKER_SIZE);
+ headerLen += AOPUS_MARKER_SIZE;
+
+ // Place holder for opusHeader Size
+ headerLen += AOPUS_LENGTH_SIZE;
+
+ int headerSize = WriteOpusHeader(header, inputSampleRate, output + headerLen,
outputSize);
- if (headerLen < 0) {
- ALOGD("WriteOpusHeader failed");
+ if (headerSize < 0) {
+ ALOGD("%s: WriteOpusHeader failed", __func__);
return -1;
}
- if (headerLen >= (outputSize - 2 * AOPUS_TOTAL_CSD_SIZE)) {
- ALOGD("Buffer not large enough to hold codec delay and seek pre roll");
- return -1;
- }
+ headerLen += headerSize;
- uint64_t length = AOPUS_LENGTH;
+ // Update opus headerSize after AOPUS_CSD_OPUS_HEADER_MARKER
+ uint64_t length = headerSize;
+ memcpy(output + AOPUS_MARKER_SIZE, &length, AOPUS_LENGTH_SIZE);
/*
Following is the CSD syntax for signalling codec delay and
seek pre-roll which is to be appended after OpusHeader
- Marker (8 bytes) | Length (8 bytes) | Samples (8 bytes)
+ Marker (8 bytes) | Length (8 bytes) | Samples in ns (8 bytes)
Markers supported:
- AOPUSDLY - Signals Codec Delay
- AOPUSPRL - Signals seek pre roll
+ AOPUS_CSD_CODEC_DELAY_MARKER - codec delay as samples in ns, represented in 8 bytes
+ AOPUS_CSD_SEEK_PREROLL_MARKER - preroll adjustment as samples in ns, represented in 8 bytes
- Length should be 8.
*/
-
+ length = sizeof(codecDelay);
+ if (headerLen > (outputSize - AOPUS_MARKER_SIZE - AOPUS_LENGTH_SIZE - length)) {
+ ALOGD("Buffer not large enough to hold codec delay");
+ return -1;
+ }
// Add codec delay
memcpy(output + headerLen, AOPUS_CSD_CODEC_DELAY_MARKER, AOPUS_MARKER_SIZE);
headerLen += AOPUS_MARKER_SIZE;
memcpy(output + headerLen, &length, AOPUS_LENGTH_SIZE);
headerLen += AOPUS_LENGTH_SIZE;
- memcpy(output + headerLen, &codecDelay, AOPUS_CSD_SIZE);
- headerLen += AOPUS_CSD_SIZE;
+ memcpy(output + headerLen, &codecDelay, length);
+ headerLen += length;
+ length = sizeof(seekPreRoll);
+ if (headerLen > (outputSize - AOPUS_MARKER_SIZE - AOPUS_LENGTH_SIZE - length)) {
+ ALOGD("Buffer not large enough to hold seek pre roll");
+ return -1;
+ }
// Add skip pre roll
memcpy(output + headerLen, AOPUS_CSD_SEEK_PREROLL_MARKER, AOPUS_MARKER_SIZE);
headerLen += AOPUS_MARKER_SIZE;
memcpy(output + headerLen, &length, AOPUS_LENGTH_SIZE);
headerLen += AOPUS_LENGTH_SIZE;
- memcpy(output + headerLen, &seekPreRoll, AOPUS_CSD_SIZE);
- headerLen += AOPUS_CSD_SIZE;
+ memcpy(output + headerLen, &seekPreRoll, length);
+ headerLen += length;
return headerLen;
}
-void GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
+bool IsOpusHeader(const uint8_t *data, size_t data_size) {
+ if (data_size < AOPUS_MARKER_SIZE) {
+ return false;
+ }
+
+ return !memcmp(data, AOPUS_CSD_OPUS_HEADER_MARKER, AOPUS_MARKER_SIZE);
+}
+
+bool GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
void **opusHeadBuf, size_t *opusHeadSize,
void **codecDelayBuf, size_t *codecDelaySize,
void **seekPreRollBuf, size_t *seekPreRollSize) {
@@ -237,26 +275,77 @@
*codecDelaySize = 0;
*seekPreRollBuf = NULL;
*seekPreRollSize = 0;
- *opusHeadBuf = (void *)data;
- *opusHeadSize = data_size;
- if (data_size >= AOPUS_UNIFIED_CSD_MINSIZE) {
+ *opusHeadBuf = NULL;
+ *opusHeadSize = 0;
+
+ // AOPUS_MARKER_SIZE is 8 "OpusHead" is of size 8
+ if (data_size < 8)
+ return false;
+
+ // Check if the CSD is in legacy format
+ if (!memcmp("OpusHead", data, 8)) {
+ if (data_size < AOPUS_OPUSHEAD_MINSIZE || data_size > AOPUS_OPUSHEAD_MAXSIZE) {
+ ALOGD("Unexpected size for opusHeadSize %zu", data_size);
+ return false;
+ }
+ *opusHeadBuf = (void *)data;
+ *opusHeadSize = data_size;
+ return true;
+ } else if (memcmp(AOPUS_CSD_MARKER_PREFIX, data, AOPUS_CSD_MARKER_PREFIX_SIZE) == 0) {
size_t i = 0;
- while (i < data_size - AOPUS_TOTAL_CSD_SIZE) {
+ bool found = false;
+ while (i <= data_size - AOPUS_MARKER_SIZE - AOPUS_LENGTH_SIZE) {
uint8_t *csdBuf = (uint8_t *)data + i;
- if (!memcmp(csdBuf, AOPUS_CSD_CODEC_DELAY_MARKER, AOPUS_MARKER_SIZE)) {
- *opusHeadSize = std::min(*opusHeadSize, i);
+ if (!memcmp(csdBuf, AOPUS_CSD_OPUS_HEADER_MARKER, AOPUS_MARKER_SIZE)) {
+ uint64_t value;
+ memcpy(&value, csdBuf + AOPUS_MARKER_SIZE, sizeof(value));
+ if (value < AOPUS_OPUSHEAD_MINSIZE || value > AOPUS_OPUSHEAD_MAXSIZE) {
+ ALOGD("Unexpected size for opusHeadSize %" PRIu64, value);
+ return false;
+ }
+ i += AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE + value;
+ if (i > data_size) {
+ ALOGD("Marker signals a header that is larger than input");
+ return false;
+ }
+ *opusHeadBuf = csdBuf + AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE;
+ *opusHeadSize = value;
+ found = true;
+ } else if (!memcmp(csdBuf, AOPUS_CSD_CODEC_DELAY_MARKER, AOPUS_MARKER_SIZE)) {
+ uint64_t value;
+ memcpy(&value, csdBuf + AOPUS_MARKER_SIZE, sizeof(value));
+ if (value != sizeof(uint64_t)) {
+ ALOGD("Unexpected size for codecDelay %" PRIu64, value);
+ return false;
+ }
+ i += AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE + value;
+ if (i > data_size) {
+ ALOGD("Marker signals a header that is larger than input");
+ return false;
+ }
*codecDelayBuf = csdBuf + AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE;
- *codecDelaySize = AOPUS_CSD_SIZE;
- i += AOPUS_TOTAL_CSD_SIZE;
+ *codecDelaySize = value;
} else if (!memcmp(csdBuf, AOPUS_CSD_SEEK_PREROLL_MARKER, AOPUS_MARKER_SIZE)) {
- *opusHeadSize = std::min(*opusHeadSize, i);
+ uint64_t value;
+ memcpy(&value, csdBuf + AOPUS_MARKER_SIZE, sizeof(value));
+ if (value != sizeof(uint64_t)) {
+ ALOGD("Unexpected size for seekPreRollSize %" PRIu64, value);
+ return false;
+ }
+ i += AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE + value;
+ if (i > data_size) {
+ ALOGD("Marker signals a header that is larger than input");
+ return false;
+ }
*seekPreRollBuf = csdBuf + AOPUS_MARKER_SIZE + AOPUS_LENGTH_SIZE;
- *seekPreRollSize = AOPUS_CSD_SIZE;
- i += AOPUS_TOTAL_CSD_SIZE;
+ *seekPreRollSize = value;
} else {
i++;
}
}
+ return found;
+ } else {
+ return false; // it isn't in either format
}
}
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
index e68852d..007a09b 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -34,6 +34,10 @@
extern const char *MEDIA_MIMETYPE_VIDEO_RAW;
extern const char *MEDIA_MIMETYPE_VIDEO_DOLBY_VISION;
extern const char *MEDIA_MIMETYPE_VIDEO_SCRAMBLED;
+extern const char *MEDIA_MIMETYPE_VIDEO_DIVX;
+extern const char *MEDIA_MIMETYPE_VIDEO_DIVX3;
+extern const char *MEDIA_MIMETYPE_VIDEO_XVID;
+extern const char *MEDIA_MIMETYPE_VIDEO_MJPEG;
extern const char *MEDIA_MIMETYPE_AUDIO_AMR_NB;
extern const char *MEDIA_MIMETYPE_AUDIO_AMR_WB;
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h b/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h
index 9bffccb..29037af 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/OpusHeader.h
@@ -25,22 +25,37 @@
namespace android {
/* Constants used for delimiting Opus CSD */
-#define AOPUS_CSD_CODEC_DELAY_MARKER "AOPUSDLY"
-#define AOPUS_CSD_SEEK_PREROLL_MARKER "AOPUSPRL"
-#define AOPUS_CSD_SIZE 8
-#define AOPUS_LENGTH 8
+#define AOPUS_CSD_MARKER_PREFIX "AOPUS"
+#define AOPUS_CSD_MARKER_PREFIX_SIZE (sizeof(AOPUS_CSD_MARKER_PREFIX) - 1)
+#define AOPUS_CSD_OPUS_HEADER_MARKER AOPUS_CSD_MARKER_PREFIX "HDR"
+#define AOPUS_CSD_CODEC_DELAY_MARKER AOPUS_CSD_MARKER_PREFIX "DLY"
+#define AOPUS_CSD_SEEK_PREROLL_MARKER AOPUS_CSD_MARKER_PREFIX "PRL"
#define AOPUS_MARKER_SIZE 8
-#define AOPUS_LENGTH_SIZE 8
-#define AOPUS_TOTAL_CSD_SIZE \
- ((AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + (AOPUS_CSD_SIZE))
-#define AOPUS_CSD0_MINSIZE 19
-#define AOPUS_UNIFIED_CSD_MINSIZE \
- ((AOPUS_CSD0_MINSIZE) + 2 * (AOPUS_TOTAL_CSD_SIZE))
+#define AOPUS_LENGTH_SIZE sizeof(uint64_t)
+#define AOPUS_CSD_CODEC_DELAY_SIZE \
+ (AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + sizeof(uint64_t)
+#define AOPUS_CSD_SEEK_PREROLL_SIZE \
+ (AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + sizeof(uint64_t)
-/* CSD0 at max can be 22 bytes + max number of channels (255) */
-#define AOPUS_CSD0_MAXSIZE 277
+/* OpusHead csd minimum size is 19 */
+#define AOPUS_OPUSHEAD_MINSIZE 19
+#define AOPUS_CSD_OPUSHEAD_MINSIZE \
+ (AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + (AOPUS_OPUSHEAD_MINSIZE)
+
+#define AOPUS_UNIFIED_CSD_MINSIZE \
+ ((AOPUS_CSD_OPUSHEAD_MINSIZE) + \
+ (AOPUS_CSD_CODEC_DELAY_SIZE) + \
+ (AOPUS_CSD_SEEK_PREROLL_SIZE))
+
+/* OpusHead csd at max can be AOPUS_CSD_OPUSHEAD_MINSIZE + 2 + max number of channels (255) */
+#define AOPUS_OPUSHEAD_MAXSIZE ((AOPUS_OPUSHEAD_MINSIZE) + 2 + 255)
+#define AOPUS_CSD_OPUSHEAD_MAXSIZE \
+ (AOPUS_MARKER_SIZE) + (AOPUS_LENGTH_SIZE) + (AOPUS_OPUSHEAD_MAXSIZE)
+
#define AOPUS_UNIFIED_CSD_MAXSIZE \
- ((AOPUS_CSD0_MAXSIZE) + 2 * (AOPUS_TOTAL_CSD_SIZE))
+ ((AOPUS_CSD_OPUSHEAD_MAXSIZE) + \
+ (AOPUS_CSD_CODEC_DELAY_SIZE) + \
+ (AOPUS_CSD_SEEK_PREROLL_SIZE))
struct OpusHeader {
int channels;
@@ -54,13 +69,14 @@
bool ParseOpusHeader(const uint8_t* data, size_t data_size, OpusHeader* header);
int WriteOpusHeader(const OpusHeader &header, int input_sample_rate, uint8_t* output, size_t output_size);
-void GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
+bool GetOpusHeaderBuffers(const uint8_t *data, size_t data_size,
void **opusHeadBuf, size_t *opusHeadSize,
void **codecDelayBuf, size_t *codecDelaySize,
void **seekPreRollBuf, size_t *seekPreRollSize);
int WriteOpusHeaders(const OpusHeader &header, int inputSampleRate,
uint8_t* output, size_t outputSize, uint64_t codecDelay,
uint64_t seekPreRoll);
+bool IsOpusHeader(const uint8_t *data, size_t data_size);
} // namespace android
#endif // OPUS_HEADER_H_
diff --git a/media/libstagefright/include/media/stagefright/AudioSource.h b/media/libstagefright/include/media/stagefright/AudioSource.h
index b0e32d0..18e5f10 100644
--- a/media/libstagefright/include/media/stagefright/AudioSource.h
+++ b/media/libstagefright/include/media/stagefright/AudioSource.h
@@ -44,7 +44,9 @@
uint32_t outSampleRate = 0,
uid_t uid = -1,
pid_t pid = -1,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
+ audio_microphone_direction_t selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
+ float selectedMicFieldDimension = MIC_FIELD_DIMENSION_NORMAL);
status_t initCheck() const;
@@ -68,6 +70,8 @@
status_t removeAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback);
status_t getActiveMicrophones(std::vector<media::MicrophoneInfo>* activeMicrophones);
+ status_t setMicrophoneDirection(audio_microphone_direction_t direction);
+ status_t setMicrophoneFieldDimension(float zoom);
status_t getPortId(audio_port_handle_t *portId) const;
diff --git a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
index 4358aac..ea87948 100644
--- a/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
+++ b/media/libstagefright/include/media/stagefright/MediaExtractorFactory.h
@@ -21,6 +21,7 @@
#include <stdio.h>
#include <unordered_set>
+#include <android/dlext.h>
#include <media/IMediaExtractor.h>
namespace android {
@@ -36,19 +37,16 @@
const sp<DataSource> &source, const char *mime = NULL);
static status_t dump(int fd, const Vector<String16>& args);
static std::unordered_set<std::string> getSupportedTypes();
- static void SetLinkedLibraries(const std::string& linkedLibraries);
private:
static Mutex gPluginMutex;
static std::shared_ptr<std::list<sp<ExtractorPlugin>>> gPlugins;
static bool gPluginsRegistered;
static bool gIgnoreVersion;
- static std::string gLinkedLibraries;
- static void RegisterExtractorsInSystem(
- const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
- static void RegisterExtractorsInApex(
- const char *libDirPath, std::list<sp<ExtractorPlugin>> &pluginList);
+ static void RegisterExtractors(
+ const char *libDirPath, const android_dlextinfo* dlextinfo,
+ std::list<sp<ExtractorPlugin>> &pluginList);
static void RegisterExtractor(
const sp<ExtractorPlugin> &plugin, std::list<sp<ExtractorPlugin>> &pluginList);
diff --git a/media/libstagefright/mpeg2ts/ESQueue.cpp b/media/libstagefright/mpeg2ts/ESQueue.cpp
index b91edcd..5af7b23 100644
--- a/media/libstagefright/mpeg2ts/ESQueue.cpp
+++ b/media/libstagefright/mpeg2ts/ESQueue.cpp
@@ -1526,6 +1526,7 @@
header, &frameSize, &samplingRate, &numChannels,
&bitrate, &numSamples)) {
ALOGE("Failed to get audio frame size");
+ mBuffer->setRange(0, 0);
return NULL;
}
@@ -1550,6 +1551,22 @@
return NULL;
}
+ if (mFormat != NULL) {
+ const char *mime;
+ if (mFormat->findCString(kKeyMIMEType, &mime)) {
+ if ((layer == 1) && strcmp (mime, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_I)) {
+ ALOGE("Audio layer is not MPEG_LAYER_I");
+ return NULL;
+ } else if ((layer == 2) && strcmp (mime, MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)) {
+ ALOGE("Audio layer is not MPEG_LAYER_II");
+ return NULL;
+ } else if ((layer == 3) && strcmp (mime, MEDIA_MIMETYPE_AUDIO_MPEG)) {
+ ALOGE("Audio layer is not AUDIO_MPEG");
+ return NULL;
+ }
+ }
+ }
+
accessUnit->meta()->setInt64("timeUs", timeUs);
accessUnit->meta()->setInt32("isSync", 1);
diff --git a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
index 82a0631..4302aee 100644
--- a/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4AudioAssembler.cpp
@@ -423,6 +423,11 @@
CHECK_LE(offset + (mOtherDataLenBits / 8), buffer->size());
offset += mOtherDataLenBits / 8;
}
+
+ if (i < mNumSubFrames && offset >= buffer->size()) {
+ ALOGW("Skip subframes after %d, total %d", (int)i, (int)mNumSubFrames);
+ break;
+ }
}
if (offset < buffer->size()) {
diff --git a/media/libstagefright/rtsp/APacketSource.cpp b/media/libstagefright/rtsp/APacketSource.cpp
index b964bc0..1aa8a20 100644
--- a/media/libstagefright/rtsp/APacketSource.cpp
+++ b/media/libstagefright/rtsp/APacketSource.cpp
@@ -376,8 +376,8 @@
ALOGI("VOL dimensions = %dx%d", *width, *height);
size_t len1 = config->size() + GetSizeWidth(config->size()) + 1;
- size_t len2 = len1 + GetSizeWidth(len1) + 1 + 13;
- size_t len3 = len2 + GetSizeWidth(len2) + 1 + 3;
+ size_t len2 = len1 + GetSizeWidth(len1 + 13) + 1 + 13;
+ size_t len3 = len2 + GetSizeWidth(len2 + 3) + 1 + 3;
sp<ABuffer> csd = new ABuffer(len3);
uint8_t *dst = csd->data();
diff --git a/media/libstagefright/xmlparser/Android.bp b/media/libstagefright/xmlparser/Android.bp
index 819058c..bebfb3b 100644
--- a/media/libstagefright/xmlparser/Android.bp
+++ b/media/libstagefright/xmlparser/Android.bp
@@ -10,7 +10,6 @@
vndk: {
enabled: true,
},
- double_loadable: true,
srcs: [
"MediaCodecsXmlParser.cpp",
diff --git a/media/mediaserver/Android.bp b/media/mediaserver/Android.bp
index 16c7be9..8377723 100644
--- a/media/mediaserver/Android.bp
+++ b/media/mediaserver/Android.bp
@@ -34,6 +34,7 @@
"frameworks/av/services/mediaresourcemanager",
],
+ // back to 32-bit, b/126502613
compile_multilib: "32",
init_rc: ["mediaserver.rc"],
diff --git a/media/mtp/MtpFfsHandle.cpp b/media/mtp/MtpFfsHandle.cpp
index ad3c068..bd6a6c6 100644
--- a/media/mtp/MtpFfsHandle.cpp
+++ b/media/mtp/MtpFfsHandle.cpp
@@ -47,6 +47,9 @@
constexpr unsigned MAX_FILE_CHUNK_SIZE = AIO_BUFS_MAX * AIO_BUF_LEN;
constexpr uint32_t MAX_MTP_FILE_SIZE = 0xFFFFFFFF;
+// Note: POLL_TIMEOUT_MS = 0 means return immediately i.e. no sleep.
+// And this will cause high CPU usage.
+constexpr int32_t POLL_TIMEOUT_MS = 500;
struct timespec ZERO_TIMEOUT = { 0, 0 };
@@ -305,7 +308,7 @@
int error = 0;
while (num_events < min_events) {
- if (poll(mPollFds, 2, 0) == -1) {
+ if (poll(mPollFds, 2, POLL_TIMEOUT_MS) == -1) {
PLOG(ERROR) << "Mtp error during poll()";
return -1;
}
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 339f622..f9f1acc 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -76,7 +76,7 @@
"libbinder",
"libmedia",
"libmedia_omx",
- "libmedia_jni",
+ "libmedia_jni_utils",
"libmediadrm",
"libstagefright",
"libstagefright_foundation",
@@ -84,8 +84,7 @@
"liblog",
"libutils",
"libcutils",
- "libandroid",
- "libandroid_runtime",
+ "libnativewindow",
"libbinder",
"libhidlbase",
"libgui",
@@ -94,6 +93,12 @@
"libmediandk_utils",
],
+ required: [
+ // libmediandk may be used by Java and non-Java things. When lower-level things use it,
+ // they shouldn't have to take on the cost of loading libandroid_runtime.
+ "libandroid_runtime",
+ ],
+
export_include_dirs: ["include"],
product_variables: {
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 20b1667..1883f63 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -23,7 +23,7 @@
#include "NdkImageReaderPriv.h"
#include <android_media_Utils.h>
-#include <android_runtime/android_hardware_HardwareBuffer.h>
+#include <private/android/AHardwareBufferHelpers.h>
#include <utils/Log.h>
#include "hardware/camera3.h"
@@ -190,7 +190,7 @@
auto lockedBuffer = std::make_unique<CpuConsumer::LockedBuffer>();
- uint64_t grallocUsage = android_hardware_HardwareBuffer_convertToGrallocUsageBits(mUsage);
+ uint64_t grallocUsage = AHardwareBuffer_convertToGrallocUsageBits(mUsage);
status_t ret =
lockImageFromBuffer(mBuffer, grallocUsage, mBuffer->mFence->dup(), lockedBuffer.get());
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index c3eb437..bcc7ff3 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -25,8 +25,8 @@
#include <cutils/atomic.h>
#include <utils/Log.h>
#include <android_media_Utils.h>
-#include <android_runtime/android_view_Surface.h>
-#include <android_runtime/android_hardware_HardwareBuffer.h>
+#include <ui/PublicFormat.h>
+#include <private/android/AHardwareBufferHelpers.h>
#include <grallocusage/GrallocUsageConversion.h>
#include <media/stagefright/bqhelper/WGraphicBufferProducer.h>
@@ -70,6 +70,7 @@
case AIMAGE_FORMAT_DEPTH_POINT_CLOUD:
case AIMAGE_FORMAT_Y8:
case AIMAGE_FORMAT_HEIC:
+ case AIMAGE_FORMAT_DEPTH_JPEG:
return true;
case AIMAGE_FORMAT_PRIVATE:
// For private format, cpu usage is prohibited.
@@ -98,6 +99,7 @@
case AIMAGE_FORMAT_DEPTH_POINT_CLOUD:
case AIMAGE_FORMAT_Y8:
case AIMAGE_FORMAT_HEIC:
+ case AIMAGE_FORMAT_DEPTH_JPEG:
return 1;
case AIMAGE_FORMAT_PRIVATE:
return 0;
@@ -270,9 +272,9 @@
media_status_t
AImageReader::init() {
PublicFormat publicFormat = static_cast<PublicFormat>(mFormat);
- mHalFormat = android_view_Surface_mapPublicFormatToHalFormat(publicFormat);
- mHalDataSpace = android_view_Surface_mapPublicFormatToHalDataspace(publicFormat);
- mHalUsage = android_hardware_HardwareBuffer_convertToGrallocUsageBits(mUsage);
+ mHalFormat = mapPublicFormatToHalFormat(publicFormat);
+ mHalDataSpace = mapPublicFormatToHalDataspace(publicFormat);
+ mHalUsage = AHardwareBuffer_convertToGrallocUsageBits(mUsage);
sp<IGraphicBufferProducer> gbProducer;
sp<IGraphicBufferConsumer> gbConsumer;
diff --git a/media/ndk/NdkMediaCrypto.cpp b/media/ndk/NdkMediaCrypto.cpp
index b8af5ff..ce2c660 100644
--- a/media/ndk/NdkMediaCrypto.cpp
+++ b/media/ndk/NdkMediaCrypto.cpp
@@ -29,7 +29,6 @@
#include <binder/IServiceManager.h>
#include <media/ICrypto.h>
#include <media/IMediaDrmService.h>
-#include <android_runtime/AndroidRuntime.h>
#include <android_util_Binder.h>
#include <jni.h>
diff --git a/media/ndk/NdkMediaDataSource.cpp b/media/ndk/NdkMediaDataSource.cpp
index 1abee93..0891f2a 100644
--- a/media/ndk/NdkMediaDataSource.cpp
+++ b/media/ndk/NdkMediaDataSource.cpp
@@ -23,9 +23,7 @@
#include <jni.h>
#include <unistd.h>
-#include <android_runtime/AndroidRuntime.h>
-#include <android_util_Binder.h>
-#include <binder/IServiceManager.h>
+#include <binder/IBinder.h>
#include <cutils/properties.h>
#include <utils/Log.h>
#include <utils/StrongPointer.h>
@@ -41,8 +39,67 @@
#include "../../libstagefright/include/NuCachedSource2.h"
#include "NdkMediaDataSourceCallbacksPriv.h"
+#include <mutex> // std::call_once,once_flag
+#include <dlfcn.h> // dlopen
+
using namespace android;
+// load libandroid_runtime.so lazily.
+// A vendor process may use libmediandk but should not depend on libandroid_runtime.
+// TODO(jooyung): remove duplicate (b/125550121)
+// frameworks/native/libs/binder/ndk/ibinder_jni.cpp
+namespace {
+
+typedef JNIEnv* (*getJNIEnv_t)();
+typedef sp<IBinder> (*ibinderForJavaObject_t)(JNIEnv* env, jobject obj);
+
+getJNIEnv_t getJNIEnv_;
+ibinderForJavaObject_t ibinderForJavaObject_;
+
+std::once_flag mLoadFlag;
+
+void load() {
+ std::call_once(mLoadFlag, []() {
+ void* handle = dlopen("libandroid_runtime.so", RTLD_LAZY);
+ if (handle == nullptr) {
+ ALOGE("Could not open libandroid_runtime.");
+ return;
+ }
+
+ getJNIEnv_ = reinterpret_cast<getJNIEnv_t>(
+ dlsym(handle, "_ZN7android14AndroidRuntime9getJNIEnvEv"));
+ if (getJNIEnv_ == nullptr) {
+ ALOGE("Could not find AndroidRuntime::getJNIEnv.");
+ // no return
+ }
+
+ ibinderForJavaObject_ = reinterpret_cast<ibinderForJavaObject_t>(
+ dlsym(handle, "_ZN7android20ibinderForJavaObjectEP7_JNIEnvP8_jobject"));
+ if (ibinderForJavaObject_ == nullptr) {
+ ALOGE("Could not find ibinderForJavaObject.");
+ // no return
+ }
+ });
+}
+
+JNIEnv* getJNIEnv() {
+ load();
+ if (getJNIEnv_ == nullptr) {
+ return nullptr;
+ }
+ return (getJNIEnv_)();
+}
+
+sp<IBinder> ibinderForJavaObject(JNIEnv* env, jobject obj) {
+ load();
+ if (ibinderForJavaObject_ == nullptr) {
+ return nullptr;
+ }
+ return (ibinderForJavaObject_)(env, obj);
+}
+
+} // namespace
+
struct AMediaDataSource {
void *userdata;
AMediaDataSourceReadAt readAt;
@@ -124,9 +181,14 @@
if (obj == NULL) {
return NULL;
}
+ sp<IBinder> binder;
switch (version) {
case 1:
- return interface_cast<IMediaHTTPService>(ibinderForJavaObject(env, obj));
+ binder = ibinderForJavaObject(env, obj);
+ if (binder == NULL) {
+ return NULL;
+ }
+ return interface_cast<IMediaHTTPService>(binder);
case 2:
return new JMedia2HTTPService(env, obj);
default:
@@ -179,7 +241,7 @@
switch (version) {
case 1:
- env = AndroidRuntime::getJNIEnv();
+ env = getJNIEnv();
clazz = "android/media/MediaHTTPService";
method = "createHttpServiceBinderIfNecessary";
signature = "(Ljava/lang/String;)Landroid/os/IBinder;";
diff --git a/media/ndk/NdkMediaExtractor.cpp b/media/ndk/NdkMediaExtractor.cpp
index 28e4f12..c83b255 100644
--- a/media/ndk/NdkMediaExtractor.cpp
+++ b/media/ndk/NdkMediaExtractor.cpp
@@ -34,7 +34,6 @@
#include <media/stagefright/MetaData.h>
#include <media/stagefright/NuMediaExtractor.h>
#include <media/IMediaHTTPService.h>
-#include <android_runtime/AndroidRuntime.h>
#include <android_util_Binder.h>
#include <jni.h>
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index 26a6238..768a7a9 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -26,7 +26,6 @@
#include <utils/StrongPointer.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/AMessage.h>
-#include <android_runtime/AndroidRuntime.h>
#include <android_util_Binder.h>
#include <jni.h>
@@ -344,8 +343,10 @@
EXPORT const char* AMEDIAFORMAT_KEY_LYRICIST = "lyricist";
EXPORT const char* AMEDIAFORMAT_KEY_MANUFACTURER = "manufacturer";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_BIT_RATE = "max-bitrate";
+EXPORT const char* AMEDIAFORMAT_KEY_MAX_FPS_TO_ENCODER = "max-fps-to-encoder";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_HEIGHT = "max-height";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_INPUT_SIZE = "max-input-size";
+EXPORT const char* AMEDIAFORMAT_KEY_MAX_PTS_GAP_TO_ENCODER = "max-pts-gap-to-encoder";
EXPORT const char* AMEDIAFORMAT_KEY_MAX_WIDTH = "max-width";
EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
EXPORT const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA = "mpeg-user-data";
diff --git a/media/ndk/NdkMediaMuxer.cpp b/media/ndk/NdkMediaMuxer.cpp
index e79926d..d1992bf 100644
--- a/media/ndk/NdkMediaMuxer.cpp
+++ b/media/ndk/NdkMediaMuxer.cpp
@@ -30,7 +30,6 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/stagefright/MediaMuxer.h>
#include <media/IMediaHTTPService.h>
-#include <android_runtime/AndroidRuntime.h>
#include <android_util_Binder.h>
#include <jni.h>
diff --git a/media/ndk/OWNERS b/media/ndk/OWNERS
index 11e8340..9dc441e 100644
--- a/media/ndk/OWNERS
+++ b/media/ndk/OWNERS
@@ -1,5 +1,3 @@
marcone@google.com
# For AImage/AImageReader
-etalvala@google.com
-yinchiayeh@google.com
-zhijunhe@google.com
+include platform/frameworks/av:/camera/OWNERS
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 14d88cb..3e60de0 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -535,6 +535,15 @@
* Format as described in ISO/IEC 23008-12.</p>
*/
AIMAGE_FORMAT_HEIC = 0x48454946,
+
+ /**
+ * Depth augmented compressed JPEG format.
+ *
+ * <p>JPEG compressed main image along with XMP embedded depth metadata
+ * following ISO 16684-1:2011(E).</p>
+ */
+ AIMAGE_FORMAT_DEPTH_JPEG = 0x69656963,
+
};
/**
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index ddf5291..56bcaab 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -216,6 +216,8 @@
extern const char* AMEDIAFORMAT_KEY_LYRICIST __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_MANUFACTURER __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_MAX_BIT_RATE __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_MAX_FPS_TO_ENCODER __INTRODUCED_IN(29);
+extern const char* AMEDIAFORMAT_KEY_MAX_PTS_GAP_TO_ENCODER __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN __INTRODUCED_IN(29);
extern const char* AMEDIAFORMAT_KEY_PSSH __INTRODUCED_IN(29);
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 7bdd3ad..9756926 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -107,8 +107,10 @@
AMEDIAFORMAT_KEY_LYRICIST; # var introduced=29
AMEDIAFORMAT_KEY_MANUFACTURER; # var introduced=29
AMEDIAFORMAT_KEY_MAX_BIT_RATE; # var introduced=29
+ AMEDIAFORMAT_KEY_MAX_FPS_TO_ENCODER; # var introduced=29
AMEDIAFORMAT_KEY_MAX_HEIGHT; # var introduced=21
AMEDIAFORMAT_KEY_MAX_INPUT_SIZE; # var introduced=21
+ AMEDIAFORMAT_KEY_MAX_PTS_GAP_TO_ENCODER; # var introduced=29
AMEDIAFORMAT_KEY_MAX_WIDTH; # var introduced=21
AMEDIAFORMAT_KEY_MIME; # var introduced=21
AMEDIAFORMAT_KEY_MPEG_USER_DATA; # var introduced=28
diff --git a/media/utils/ServiceUtilities.cpp b/media/utils/ServiceUtilities.cpp
index 599c446..2fb24f5 100644
--- a/media/utils/ServiceUtilities.cpp
+++ b/media/utils/ServiceUtilities.cpp
@@ -22,6 +22,9 @@
#include <binder/PermissionCache.h>
#include "mediautils/ServiceUtilities.h"
+#include <iterator>
+#include <algorithm>
+
/* When performing permission checks we do not use permission cache for
* runtime permissions (protection level dangerous) as they may change at
* runtime. All other permissions (protection level normal and dangerous)
@@ -220,4 +223,85 @@
return NO_ERROR;
}
+sp<content::pm::IPackageManagerNative> MediaPackageManager::retreivePackageManager() {
+ const sp<IServiceManager> sm = defaultServiceManager();
+ if (sm == nullptr) {
+ ALOGW("%s: failed to retrieve defaultServiceManager", __func__);
+ return nullptr;
+ }
+ sp<IBinder> packageManager = sm->checkService(String16(nativePackageManagerName));
+ if (packageManager == nullptr) {
+ ALOGW("%s: failed to retrieve native package manager", __func__);
+ return nullptr;
+ }
+ return interface_cast<content::pm::IPackageManagerNative>(packageManager);
+}
+
+std::optional<bool> MediaPackageManager::doIsAllowed(uid_t uid) {
+ if (mPackageManager == nullptr) {
+ /** Can not fetch package manager at construction it may not yet be registered. */
+ mPackageManager = retreivePackageManager();
+ if (mPackageManager == nullptr) {
+ ALOGW("%s: Playback capture is denied as package manager is not reachable", __func__);
+ return std::nullopt;
+ }
+ }
+
+ std::vector<std::string> packageNames;
+ auto status = mPackageManager->getNamesForUids({(int32_t)uid}, &packageNames);
+ if (!status.isOk()) {
+ ALOGW("%s: Playback capture is denied for uid %u as the package names could not be "
+ "retrieved from the package manager: %s", __func__, uid, status.toString8().c_str());
+ return std::nullopt;
+ }
+ if (packageNames.empty()) {
+ ALOGW("%s: Playback capture for uid %u is denied as no package name could be retrieved "
+ "from the package manager: %s", __func__, uid, status.toString8().c_str());
+ return std::nullopt;
+ }
+ std::vector<bool> isAllowed;
+ status = mPackageManager->isAudioPlaybackCaptureAllowed(packageNames, &isAllowed);
+ if (!status.isOk()) {
+ ALOGW("%s: Playback capture is denied for uid %u as the manifest property could not be "
+ "retrieved from the package manager: %s", __func__, uid, status.toString8().c_str());
+ return std::nullopt;
+ }
+ if (packageNames.size() != isAllowed.size()) {
+ ALOGW("%s: Playback capture is denied for uid %u as the package manager returned incoherent"
+ " response size: %zu != %zu", __func__, uid, packageNames.size(), isAllowed.size());
+ return std::nullopt;
+ }
+
+ // Zip together packageNames and isAllowed for debug logs
+ Packages& packages = mDebugLog[uid];
+ packages.resize(packageNames.size()); // Reuse all objects
+ std::transform(begin(packageNames), end(packageNames), begin(isAllowed),
+ begin(packages), [] (auto& name, bool isAllowed) -> Package {
+ return {std::move(name), isAllowed};
+ });
+
+ // Only allow playback record if all packages in this UID allow it
+ bool playbackCaptureAllowed = std::all_of(begin(isAllowed), end(isAllowed),
+ [](bool b) { return b; });
+
+ return playbackCaptureAllowed;
+}
+
+void MediaPackageManager::dump(int fd, int spaces) const {
+ dprintf(fd, "%*sAllow playback capture log:\n", spaces, "");
+ if (mPackageManager == nullptr) {
+ dprintf(fd, "%*sNo package manager\n", spaces + 2, "");
+ }
+ dprintf(fd, "%*sPackage manager errors: %u\n", spaces + 2, "", mPackageManagerErrors);
+
+ for (const auto& uidCache : mDebugLog) {
+ for (const auto& package : std::get<Packages>(uidCache)) {
+ dprintf(fd, "%*s- uid=%5u, allowPlaybackCapture=%s, packageName=%s\n", spaces + 2, "",
+ std::get<const uid_t>(uidCache),
+ package.playbackCaptureAllowed ? "true " : "false",
+ package.name.c_str());
+ }
+ }
+}
+
} // namespace android
diff --git a/media/utils/include/mediautils/ServiceUtilities.h b/media/utils/include/mediautils/ServiceUtilities.h
index 98f54c2..94370ee 100644
--- a/media/utils/include/mediautils/ServiceUtilities.h
+++ b/media/utils/include/mediautils/ServiceUtilities.h
@@ -14,13 +14,22 @@
* limitations under the License.
*/
+#ifndef ANDROID_MEDIAUTILS_SERVICEUTILITIES_H
+#define ANDROID_MEDIAUTILS_SERVICEUTILITIES_H
+
#include <unistd.h>
+#include <android/content/pm/IPackageManagerNative.h>
#include <binder/IMemory.h>
#include <binder/PermissionController.h>
#include <cutils/multiuser.h>
#include <private/android_filesystem_config.h>
+#include <map>
+#include <optional>
+#include <string>
+#include <vector>
+
namespace android {
// Audio permission utilities
@@ -72,4 +81,31 @@
bool dumpAllowed();
bool modifyPhoneStateAllowed(pid_t pid, uid_t uid);
status_t checkIMemory(const sp<IMemory>& iMemory);
+
+class MediaPackageManager {
+public:
+ /** Query the PackageManager to check if all apps of an UID allow playback capture. */
+ bool allowPlaybackCapture(uid_t uid) {
+ auto result = doIsAllowed(uid);
+ if (!result) {
+ mPackageManagerErrors++;
+ }
+ return result.value_or(false);
+ }
+ void dump(int fd, int spaces = 0) const;
+private:
+ static constexpr const char* nativePackageManagerName = "package_native";
+ std::optional<bool> doIsAllowed(uid_t uid);
+ sp<content::pm::IPackageManagerNative> retreivePackageManager();
+ sp<content::pm::IPackageManagerNative> mPackageManager; // To check apps manifest
+ uint_t mPackageManagerErrors = 0;
+ struct Package {
+ std::string name;
+ bool playbackCaptureAllowed = false;
+ };
+ using Packages = std::vector<Package>;
+ std::map<uid_t, Packages> mDebugLog;
+};
}
+
+#endif // ANDROID_MEDIAUTILS_SERVICEUTILITIES_H
diff --git a/services/OWNERS b/services/OWNERS
index d5d00da..66a4bcb 100644
--- a/services/OWNERS
+++ b/services/OWNERS
@@ -1,4 +1,7 @@
+chz@google.com
elaurent@google.com
+essick@google.com
etalvala@google.com
gkasten@google.com
hunga@google.com
+marcone@google.com
diff --git a/services/audioflinger/Android.mk b/services/audioflinger/Android.mk
index 91b7587..40980a6 100644
--- a/services/audioflinger/Android.mk
+++ b/services/audioflinger/Android.mk
@@ -32,6 +32,7 @@
libbinder \
libaudioclient \
libmedialogservice \
+ libmediametrics \
libmediautils \
libnbaio \
libnblog \
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 4033247..213c9c3 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -836,8 +836,10 @@
}
teePatches.push_back({patchRecord, patchTrack});
secondaryThread->addPatchTrack(patchTrack);
- patchTrack->setPeerProxy(patchRecord.get());
- patchRecord->setPeerProxy(patchTrack.get());
+ // In case the downstream patchTrack on the secondaryThread temporarily outlives
+ // our created track, ensure the corresponding patchRecord is still alive.
+ patchTrack->setPeerProxy(patchRecord, true /* holdReference */);
+ patchRecord->setPeerProxy(patchTrack, false /* holdReference */);
}
track->setTeePatches(std::move(teePatches));
}
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 8455e54..ecaeb52 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1500,6 +1500,11 @@
write(fd, result.string(), result.length());
+ if (mEffectInterface != 0) {
+ dprintf(fd, "\tEffect ID %d HAL dump:\n", mId);
+ (void)mEffectInterface->dump(fd);
+ }
+
if (locked) {
mLock.unlock();
}
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index 676a575..a210a6d 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -527,8 +527,8 @@
}
// tie playback and record tracks together
- mRecord.setTrackAndPeer(tempRecordTrack, tempPatchTrack.get());
- mPlayback.setTrackAndPeer(tempPatchTrack, tempRecordTrack.get());
+ mRecord.setTrackAndPeer(tempRecordTrack, tempPatchTrack);
+ mPlayback.setTrackAndPeer(tempPatchTrack, tempRecordTrack);
// start capture and playback
mRecord.track()->start(AudioSystem::SYNC_EVENT_NONE, AUDIO_SESSION_NONE);
@@ -543,6 +543,7 @@
__func__, mRecord.handle(), mPlayback.handle());
mRecord.stopTrack();
mPlayback.stopTrack();
+ mRecord.clearTrackPeer(); // mRecord stop is synchronous. Break PeerProxy sp<> cycle.
mRecord.closeConnections(panel);
mPlayback.closeConnections(panel);
}
diff --git a/services/audioflinger/PatchPanel.h b/services/audioflinger/PatchPanel.h
index 612855f..181e27c 100644
--- a/services/audioflinger/PatchPanel.h
+++ b/services/audioflinger/PatchPanel.h
@@ -122,12 +122,13 @@
mThread = thread;
mCloseThread = closeThread;
}
- void setTrackAndPeer(const sp<TrackType>& track,
- ThreadBase::PatchProxyBufferProvider *peer) {
+ template <typename T>
+ void setTrackAndPeer(const sp<TrackType>& track, const sp<T> &peer) {
mTrack = track;
mThread->addPatchTrack(mTrack);
- mTrack->setPeerProxy(peer);
+ mTrack->setPeerProxy(peer, true /* holdReference */);
}
+ void clearTrackPeer() { if (mTrack) mTrack->clearPeerProxy(); }
void stopTrack() { if (mTrack) mTrack->stop(); }
void swap(Endpoint &other) noexcept {
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index a8c4bd1..49f74a2 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -457,52 +457,6 @@
}
}
-std::string devicesToString(audio_devices_t devices)
-{
- std::string result;
- if (devices & AUDIO_DEVICE_BIT_IN) {
- InputDeviceConverter::maskToString(devices, result);
- } else {
- OutputDeviceConverter::maskToString(devices, result);
- }
- return result;
-}
-
-std::string inputFlagsToString(audio_input_flags_t flags)
-{
- std::string result;
- InputFlagConverter::maskToString(flags, result);
- return result;
-}
-
-std::string outputFlagsToString(audio_output_flags_t flags)
-{
- std::string result;
- OutputFlagConverter::maskToString(flags, result);
- return result;
-}
-
-const char *sourceToString(audio_source_t source)
-{
- switch (source) {
- case AUDIO_SOURCE_DEFAULT: return "default";
- case AUDIO_SOURCE_MIC: return "mic";
- case AUDIO_SOURCE_VOICE_UPLINK: return "voice uplink";
- case AUDIO_SOURCE_VOICE_DOWNLINK: return "voice downlink";
- case AUDIO_SOURCE_VOICE_CALL: return "voice call";
- case AUDIO_SOURCE_CAMCORDER: return "camcorder";
- case AUDIO_SOURCE_VOICE_RECOGNITION: return "voice recognition";
- case AUDIO_SOURCE_VOICE_COMMUNICATION: return "voice communication";
- case AUDIO_SOURCE_REMOTE_SUBMIX: return "remote submix";
- case AUDIO_SOURCE_UNPROCESSED: return "unprocessed";
- case AUDIO_SOURCE_VOICE_PERFORMANCE: return "voice performance";
- case AUDIO_SOURCE_ECHO_REFERENCE: return "echo reference";
- case AUDIO_SOURCE_FM_TUNER: return "FM tuner";
- case AUDIO_SOURCE_HOTWORD: return "hotword";
- default: return "unknown";
- }
-}
-
AudioFlinger::ThreadBase::ThreadBase(const sp<AudioFlinger>& audioFlinger, audio_io_handle_t id,
audio_devices_t outDevice, audio_devices_t inDevice, type_t type, bool systemReady)
: Thread(false /*canCallJava*/),
@@ -534,6 +488,8 @@
sp<IBinder> binder = IInterface::asBinder(mPowerManager);
binder->unlinkToDeath(mDeathRecipient);
}
+
+ sendStatistics(true /* force */);
}
status_t AudioFlinger::ThreadBase::readyToRun()
@@ -617,6 +573,15 @@
// sendIoConfigEvent_l() must be called with ThreadBase::mLock held
void AudioFlinger::ThreadBase::sendIoConfigEvent_l(audio_io_config_event event, pid_t pid)
{
+ // The audio statistics history is exponentially weighted to forget events
+ // about five or more seconds in the past. In order to have
+ // crisper statistics for mediametrics, we reset the statistics on
+ // an IoConfigEvent, to reflect different properties for a new device.
+ mIoJitterMs.reset();
+ mLatencyMs.reset();
+ mProcessTimeMs.reset();
+ mTimestampVerifier.discontinuity();
+
sp<ConfigEvent> configEvent = (ConfigEvent *)new IoConfigEvent(event, pid);
sendConfigEvent_l(configEvent);
}
@@ -717,8 +682,8 @@
event->mStatus = createAudioPatch_l(&data->mPatch, &data->mHandle);
const audio_devices_t newDevice = getDevice();
mLocalLog.log("CFG_EVENT_CREATE_AUDIO_PATCH: old device %#x (%s) new device %#x (%s)",
- (unsigned)oldDevice, devicesToString(oldDevice).c_str(),
- (unsigned)newDevice, devicesToString(newDevice).c_str());
+ (unsigned)oldDevice, toString(oldDevice).c_str(),
+ (unsigned)newDevice, toString(newDevice).c_str());
} break;
case CFG_EVENT_RELEASE_AUDIO_PATCH: {
const audio_devices_t oldDevice = getDevice();
@@ -727,8 +692,8 @@
event->mStatus = releaseAudioPatch_l(data->mHandle);
const audio_devices_t newDevice = getDevice();
mLocalLog.log("CFG_EVENT_RELEASE_AUDIO_PATCH: old device %#x (%s) new device %#x (%s)",
- (unsigned)oldDevice, devicesToString(oldDevice).c_str(),
- (unsigned)newDevice, devicesToString(newDevice).c_str());
+ (unsigned)oldDevice, toString(oldDevice).c_str(),
+ (unsigned)newDevice, toString(newDevice).c_str());
} break;
default:
ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
@@ -858,9 +823,9 @@
dprintf(fd, " none\n");
}
// Note: output device may be used by capture threads for effects such as AEC.
- dprintf(fd, " Output device: %#x (%s)\n", mOutDevice, devicesToString(mOutDevice).c_str());
- dprintf(fd, " Input device: %#x (%s)\n", mInDevice, devicesToString(mInDevice).c_str());
- dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, sourceToString(mAudioSource));
+ dprintf(fd, " Output device: %#x (%s)\n", mOutDevice, toString(mOutDevice).c_str());
+ dprintf(fd, " Input device: %#x (%s)\n", mInDevice, toString(mInDevice).c_str());
+ dprintf(fd, " Audio source: %d (%s)\n", mAudioSource, toString(mAudioSource).c_str());
// Dump timestamp statistics for the Thread types that support it.
if (mType == RECORD
@@ -872,6 +837,28 @@
dprintf(fd, " Timestamp corrected: %s\n", isTimestampCorrectionEnabled() ? "yes" : "no");
}
+ if (mLastIoBeginNs > 0) { // MMAP may not set this
+ dprintf(fd, " Last %s occurred (msecs): %lld\n",
+ isOutput() ? "write" : "read",
+ (long long) (systemTime() - mLastIoBeginNs) / NANOS_PER_MILLISECOND);
+ }
+
+ if (mProcessTimeMs.getN() > 0) {
+ dprintf(fd, " Process time ms stats: %s\n", mProcessTimeMs.toString().c_str());
+ }
+
+ if (mIoJitterMs.getN() > 0) {
+ dprintf(fd, " Hal %s jitter ms stats: %s\n",
+ isOutput() ? "write" : "read",
+ mIoJitterMs.toString().c_str());
+ }
+
+ if (mLatencyMs.getN() > 0) {
+ dprintf(fd, " Threadloop %s latency stats: %s\n",
+ isOutput() ? "write" : "read",
+ mLatencyMs.toString().c_str());
+ }
+
if (locked) {
mLock.unlock();
}
@@ -1675,6 +1662,65 @@
mWaitWorkCV.broadcast();
}
+// Call only from threadLoop() or when it is idle.
+// Do not call from high performance code as this may do binder rpc to the MediaMetrics service.
+void AudioFlinger::ThreadBase::sendStatistics(bool force)
+{
+ // Do not log if we have no stats.
+ // We choose the timestamp verifier because it is the most likely item to be present.
+ const int64_t nstats = mTimestampVerifier.getN() - mLastRecordedTimestampVerifierN;
+ if (nstats == 0) {
+ return;
+ }
+
+ // Don't log more frequently than once per 12 hours.
+ // We use BOOTTIME to include suspend time.
+ const int64_t timeNs = systemTime(SYSTEM_TIME_BOOTTIME);
+ const int64_t sinceNs = timeNs - mLastRecordedTimeNs; // ok if mLastRecordedTimeNs = 0
+ if (!force && sinceNs <= 12 * NANOS_PER_HOUR) {
+ return;
+ }
+
+ mLastRecordedTimestampVerifierN = mTimestampVerifier.getN();
+ mLastRecordedTimeNs = timeNs;
+
+ std::unique_ptr<MediaAnalyticsItem> item(MediaAnalyticsItem::create("audiothread"));
+
+#define MM_PREFIX "android.media.audiothread." // avoid cut-n-paste errors.
+
+ // thread configuration
+ item->setInt32(MM_PREFIX "id", (int32_t)mId); // IO handle
+ // item->setInt32(MM_PREFIX "portId", (int32_t)mPortId);
+ item->setCString(MM_PREFIX "type", threadTypeToString(mType));
+ item->setInt32(MM_PREFIX "sampleRate", (int32_t)mSampleRate);
+ item->setInt64(MM_PREFIX "channelMask", (int64_t)mChannelMask);
+ item->setCString(MM_PREFIX "encoding", toString(mFormat).c_str());
+ item->setInt32(MM_PREFIX "frameCount", (int32_t)mFrameCount);
+ item->setCString(MM_PREFIX "outDevice", toString(mOutDevice).c_str());
+ item->setCString(MM_PREFIX "inDevice", toString(mInDevice).c_str());
+
+ // thread statistics
+ if (mIoJitterMs.getN() > 0) {
+ item->setDouble(MM_PREFIX "ioJitterMs.mean", mIoJitterMs.getMean());
+ item->setDouble(MM_PREFIX "ioJitterMs.std", mIoJitterMs.getStdDev());
+ }
+ if (mProcessTimeMs.getN() > 0) {
+ item->setDouble(MM_PREFIX "processTimeMs.mean", mProcessTimeMs.getMean());
+ item->setDouble(MM_PREFIX "processTimeMs.std", mProcessTimeMs.getStdDev());
+ }
+ const auto tsjitter = mTimestampVerifier.getJitterMs();
+ if (tsjitter.getN() > 0) {
+ item->setDouble(MM_PREFIX "timestampJitterMs.mean", tsjitter.getMean());
+ item->setDouble(MM_PREFIX "timestampJitterMs.std", tsjitter.getStdDev());
+ }
+ if (mLatencyMs.getN() > 0) {
+ item->setDouble(MM_PREFIX "latencyMs.mean", mLatencyMs.getMean());
+ item->setDouble(MM_PREFIX "latencyMs.std", mLatencyMs.getStdDev());
+ }
+
+ item->selfrecord();
+}
+
// ----------------------------------------------------------------------------
// Playback
// ----------------------------------------------------------------------------
@@ -1704,7 +1750,7 @@
// mStreamTypes[] initialized in constructor body
mTracks(type == MIXER),
mOutput(output),
- mLastWriteTime(-1), mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
+ mNumWrites(0), mNumDelayedWrites(0), mInWrite(false),
mMixerStatus(MIXER_IDLE),
mMixerStatusIgnoringFastTracks(MIXER_IDLE),
mStandbyDelayNs(AudioFlinger::mStandbyTimeInNsecs),
@@ -1857,8 +1903,6 @@
channelMaskToString(mHapticChannelMask, true /* output */).c_str());
}
dprintf(fd, " Normal frame count: %zu\n", mNormalFrameCount);
- dprintf(fd, " Last write occurred (msecs): %llu\n",
- (unsigned long long) ns2ms(systemTime() - mLastWriteTime));
dprintf(fd, " Total writes: %d\n", mNumWrites);
dprintf(fd, " Delayed writes: %d\n", mNumDelayedWrites);
dprintf(fd, " Blocked in write: %s\n", mInWrite ? "yes" : "no");
@@ -1871,7 +1915,7 @@
AudioStreamOut *output = mOutput;
audio_output_flags_t flags = output != NULL ? output->flags : AUDIO_OUTPUT_FLAG_NONE;
dprintf(fd, " AudioStreamOut: %p flags %#x (%s)\n",
- output, flags, outputFlagsToString(flags).c_str());
+ output, flags, toString(flags).c_str());
dprintf(fd, " Frames written: %lld\n", (long long)mFramesWritten);
dprintf(fd, " Suspended frames: %lld\n", (long long)mSuspendedFrames);
if (mPipeSink.get() != nullptr) {
@@ -3189,8 +3233,8 @@
Vector< sp<Track> > tracksToRemove;
mStandbyTimeNs = systemTime();
- nsecs_t lastWriteFinished = -1; // time last server write completed
- int64_t lastFramesWritten = -1; // track changes in timestamp server frames written
+ int64_t lastLoopCountWritten = -2; // never matches "previous" loop, when loopCount = 0.
+ int64_t lastFramesWritten = -1; // track changes in timestamp server frames written
// MIXER
nsecs_t lastWarning = 0;
@@ -3236,7 +3280,8 @@
}
audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
- while (!exitPending())
+ // loopCount is used for statistics and diagnostics.
+ for (int64_t loopCount = 0; !exitPending(); ++loopCount)
{
// Log merge requests are performed during AudioFlinger binder transactions, but
// that does not cover audio playback. It's requested here for that reason.
@@ -3394,11 +3439,11 @@
// use the time before we called the HAL write - it is a bit more accurate
// to when the server last read data than the current time here.
//
- // If we haven't written anything, mLastWriteTime will be -1
+ // If we haven't written anything, mLastIoBeginNs will be -1
// and we use systemTime().
mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] = mFramesWritten;
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastWriteTime == -1
- ? systemTime() : mLastWriteTime;
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = mLastIoBeginNs == -1
+ ? systemTime() : mLastIoBeginNs;
}
for (const sp<Track> &t : mActiveTracks) {
@@ -3411,6 +3456,14 @@
}
}
}
+
+ if (audio_has_proportional_frames(mFormat)) {
+ const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
+ if (latencyMs != 0.) { // note 0. means timestamp is empty.
+ mLatencyMs.add(latencyMs);
+ }
+ }
+
} // if (mType ... ) { // no indentation
#if 0
// logFormat example
@@ -3464,6 +3517,7 @@
LOG_AUDIO_STATE();
}
mStandby = true;
+ sendStatistics(false /* force */);
}
if (mActiveTracks.isEmpty() && mConfigEvents.isEmpty()) {
@@ -3635,43 +3689,68 @@
// mSleepTimeUs == 0 means we must write to audio hardware
if (mSleepTimeUs == 0) {
ssize_t ret = 0;
- // We save lastWriteFinished here, as previousLastWriteFinished,
- // for throttling. On thread start, previousLastWriteFinished will be
- // set to -1, which properly results in no throttling after the first write.
- nsecs_t previousLastWriteFinished = lastWriteFinished;
- nsecs_t delta = 0;
+ // writePeriodNs is updated >= 0 when ret > 0.
+ int64_t writePeriodNs = -1;
if (mBytesRemaining) {
// FIXME rewrite to reduce number of system calls
- mLastWriteTime = systemTime(); // also used for dumpsys
+ const int64_t lastIoBeginNs = systemTime();
ret = threadLoop_write();
- lastWriteFinished = systemTime();
- delta = lastWriteFinished - mLastWriteTime;
+ const int64_t lastIoEndNs = systemTime();
if (ret < 0) {
mBytesRemaining = 0;
- } else {
+ } else if (ret > 0) {
mBytesWritten += ret;
mBytesRemaining -= ret;
- mFramesWritten += ret / mFrameSize;
+ const int64_t frames = ret / mFrameSize;
+ mFramesWritten += frames;
+
+ writePeriodNs = lastIoEndNs - mLastIoEndNs;
+ // process information relating to write time.
+ if (audio_has_proportional_frames(mFormat)) {
+ // we are in a continuous mixing cycle
+ if (mMixerStatus == MIXER_TRACKS_READY &&
+ loopCount == lastLoopCountWritten + 1) {
+
+ const double jitterMs =
+ TimestampVerifier<int64_t, int64_t>::computeJitterMs(
+ {frames, writePeriodNs},
+ {0, 0} /* lastTimestamp */, mSampleRate);
+ const double processMs =
+ (lastIoBeginNs - mLastIoEndNs) * 1e-6;
+
+ Mutex::Autolock _l(mLock);
+ mIoJitterMs.add(jitterMs);
+ mProcessTimeMs.add(processMs);
+ }
+
+ // write blocked detection
+ const int64_t deltaWriteNs = lastIoEndNs - lastIoBeginNs;
+ if (mType == MIXER && deltaWriteNs > maxPeriod) {
+ mNumDelayedWrites++;
+ if ((lastIoEndNs - lastWarning) > kWarningThrottleNs) {
+ ATRACE_NAME("underrun");
+ ALOGW("write blocked for %lld msecs, "
+ "%d delayed writes, thread %d",
+ (long long)deltaWriteNs / NANOS_PER_MILLISECOND,
+ mNumDelayedWrites, mId);
+ lastWarning = lastIoEndNs;
+ }
+ }
+ }
+ // update timing info.
+ mLastIoBeginNs = lastIoBeginNs;
+ mLastIoEndNs = lastIoEndNs;
+ lastLoopCountWritten = loopCount;
}
} else if ((mMixerStatus == MIXER_DRAIN_TRACK) ||
(mMixerStatus == MIXER_DRAIN_ALL)) {
threadLoop_drain();
}
if (mType == MIXER && !mStandby) {
- // write blocked detection
- if (delta > maxPeriod) {
- mNumDelayedWrites++;
- if ((lastWriteFinished - lastWarning) > kWarningThrottleNs) {
- ATRACE_NAME("underrun");
- ALOGW("write blocked for %llu msecs, %d delayed writes, thread %p",
- (unsigned long long) ns2ms(delta), mNumDelayedWrites, this);
- lastWarning = lastWriteFinished;
- }
- }
if (mThreadThrottle
&& mMixerStatus == MIXER_TRACKS_READY // we are mixing (active tracks)
- && ret > 0) { // we wrote something
+ && writePeriodNs > 0) { // we have write period info
// Limit MixerThread data processing to no more than twice the
// expected processing rate.
//
@@ -3690,12 +3769,9 @@
// 2. threadLoop_mix (significant for heavy mixing, especially
// on low tier processors)
- // it's OK if deltaMs (and deltaNs) is an overestimate.
- nsecs_t deltaNs;
- // deltaNs = lastWriteFinished - previousLastWriteFinished;
- __builtin_sub_overflow(
- lastWriteFinished,previousLastWriteFinished, &deltaNs);
- const int32_t deltaMs = deltaNs / 1000000;
+ // it's OK if deltaMs is an overestimate.
+
+ const int32_t deltaMs = writePeriodNs / NANOS_PER_MILLISECOND;
const int32_t throttleMs = (int32_t)mHalfBufferMs - deltaMs;
if ((signed)mHalfBufferMs >= throttleMs && throttleMs > 0) {
@@ -3708,7 +3784,8 @@
mThreadThrottleTimeMs += throttleMs;
// Throttle must be attributed to the previous mixer loop's write time
// to allow back-to-back throttling.
- lastWriteFinished += throttleMs * 1000000;
+ // This also ensures proper timing statistics.
+ mLastIoEndNs = systemTime(); // we fetch the write end time again.
} else {
uint32_t diff = mThreadThrottleTimeMs - mThreadThrottleEndMs;
if (diff > 0) {
@@ -5304,13 +5381,6 @@
dprintf(fd, " Master balance: %f (%s)\n", mMasterBalance.load(),
(hasFastMixer() ? std::to_string(mFastMixer->getMasterBalance())
: mBalance.toString()).c_str());
- const double latencyMs = mTimestamp.getOutputServerLatencyMs(mSampleRate);
- if (latencyMs != 0.) {
- dprintf(fd, " NormalMixer latency ms: %.2lf\n", latencyMs);
- } else {
- dprintf(fd, " NormalMixer latency ms: unavail\n");
- }
-
if (hasFastMixer()) {
dprintf(fd, " FastMixer thread %p tid=%d", mFastMixer.get(), mFastMixer->getTid());
@@ -6751,8 +6821,10 @@
// used to request a deferred sleep, to be executed later while mutex is unlocked
uint32_t sleepUs = 0;
+ int64_t lastLoopCountRead = -2; // never matches "previous" loop, when loopCount = 0.
+
// loop while there is work to do
- for (;;) {
+ for (int64_t loopCount = 0;; ++loopCount) { // loopCount used for statistics tracking
Vector< sp<EffectChain> > effectChains;
// activeTracks accumulates a copy of a subset of mActiveTracks
@@ -6951,6 +7023,7 @@
int32_t rear = mRsmpInRear & (mRsmpInFramesP2 - 1);
ssize_t framesRead;
+ const int64_t lastIoBeginNs = systemTime(); // start IO timing
// If an NBAIO source is present, use it to read the normal capture's data
if (mPipeSource != 0) {
@@ -7008,10 +7081,12 @@
}
}
+ const int64_t lastIoEndNs = systemTime(); // end IO timing
+
// Update server timestamp with server stats
// systemTime() is optional if the hardware supports timestamps.
mTimestamp.mPosition[ExtendedTimestamp::LOCATION_SERVER] += framesRead;
- mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = systemTime();
+ mTimestamp.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] = lastIoEndNs;
// Update server timestamp with kernel stats
if (mPipeSource.get() == nullptr /* don't obtain for FastCapture, could block */) {
@@ -7045,6 +7120,15 @@
mTimestampVerifier.error();
}
}
+
+ // From the timestamp, input read latency is negative output write latency.
+ const audio_input_flags_t flags = mInput != NULL ? mInput->flags : AUDIO_INPUT_FLAG_NONE;
+ const double latencyMs = RecordTrack::checkServerLatencySupported(mFormat, flags)
+ ? - mTimestamp.getOutputServerLatencyMs(mSampleRate) : 0.;
+ if (latencyMs != 0.) { // note 0. means timestamp is empty.
+ mLatencyMs.add(latencyMs);
+ }
+
// Use this to track timestamp information
// ALOGD("%s", mTimestamp.toString().c_str());
@@ -7060,6 +7144,24 @@
ALOG_ASSERT(framesRead > 0);
mFramesRead += framesRead;
+ if (audio_has_proportional_frames(mFormat)
+ && loopCount == lastLoopCountRead + 1) {
+ const int64_t readPeriodNs = lastIoEndNs - mLastIoEndNs;
+ const double jitterMs =
+ TimestampVerifier<int64_t, int64_t>::computeJitterMs(
+ {framesRead, readPeriodNs},
+ {0, 0} /* lastTimestamp */, mSampleRate);
+ const double processMs = (lastIoBeginNs - mLastIoEndNs) * 1e-6;
+
+ Mutex::Autolock _l(mLock);
+ mIoJitterMs.add(jitterMs);
+ mProcessTimeMs.add(processMs);
+ }
+ // update timing info.
+ mLastIoBeginNs = lastIoBeginNs;
+ mLastIoEndNs = lastIoEndNs;
+ lastLoopCountRead = loopCount;
+
#ifdef TEE_SINK
(void)mTee.write((uint8_t*)mRsmpInBuffer + rear * mFrameSize, framesRead);
#endif
@@ -7581,6 +7683,8 @@
// note that threadLoop may still be processing the track at this point [without lock]
recordTrack->mState = TrackBase::PAUSING;
+ // NOTE: Waiting here is important to keep stop synchronous.
+ // This is needed for proper patchRecord peer release.
while (recordTrack->mState == TrackBase::PAUSING && !recordTrack->isInvalid()) {
mWaitWorkCV.broadcast(); // signal thread to stop
mStartStopCond.wait(mLock);
@@ -7638,14 +7742,14 @@
status_t AudioFlinger::RecordThread::setMicrophoneDirection(audio_microphone_direction_t direction)
{
- ALOGV("RecordThread::setMicrophoneDirection");
+ ALOGV("setMicrophoneDirection(%d)", direction);
AutoMutex _l(mLock);
return mInput->stream->setMicrophoneDirection(direction);
}
status_t AudioFlinger::RecordThread::setMicrophoneFieldDimension(float zoom)
{
- ALOGV("RecordThread::setMicrophoneFieldDimension");
+ ALOGV("setMicrophoneFieldDimension(%f)", zoom);
AutoMutex _l(mLock);
return mInput->stream->setMicrophoneFieldDimension(zoom);
}
@@ -7708,7 +7812,7 @@
AudioStreamIn *input = mInput;
audio_input_flags_t flags = input != NULL ? input->flags : AUDIO_INPUT_FLAG_NONE;
dprintf(fd, " AudioStreamIn: %p flags %#x (%s)\n",
- input, flags, inputFlagsToString(flags).c_str());
+ input, flags, toString(flags).c_str());
dprintf(fd, " Frames read: %lld\n", (long long)mFramesRead);
if (mActiveTracks.isEmpty()) {
dprintf(fd, " No active record clients\n");
@@ -7719,14 +7823,6 @@
(void)input->stream->dump(fd);
}
- const double latencyMs = RecordTrack::checkServerLatencySupported(mFormat, flags)
- ? - mTimestamp.getOutputServerLatencyMs(mSampleRate) : 0.;
- if (latencyMs != 0.) {
- dprintf(fd, " NormalRecord latency ms: %.2lf\n", latencyMs);
- } else {
- dprintf(fd, " NormalRecord latency ms: unavail\n");
- }
-
dprintf(fd, " Fast capture thread: %s\n", hasFastCapture() ? "yes" : "no");
dprintf(fd, " Fast track available: %s\n", mFastTrackAvail ? "yes" : "no");
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 1131b26..97aa9f0 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -399,6 +399,9 @@
virtual void dump(int fd, const Vector<String16>& args) = 0;
+ // deliver stats to mediametrics.
+ void sendStatistics(bool force);
+
mutable Mutex mLock;
protected:
@@ -512,6 +515,20 @@
TimestampVerifier< // For timestamp statistics.
int64_t /* frame count */, int64_t /* time ns */> mTimestampVerifier;
audio_devices_t mTimestampCorrectedDevices = AUDIO_DEVICE_NONE;
+
+ // ThreadLoop statistics per iteration.
+ int64_t mLastIoBeginNs = -1;
+ int64_t mLastIoEndNs = -1;
+
+ // This should be read under ThreadBase lock (if not on the threadLoop thread).
+ audio_utils::Statistics<double> mIoJitterMs{0.995 /* alpha */};
+ audio_utils::Statistics<double> mProcessTimeMs{0.995 /* alpha */};
+ audio_utils::Statistics<double> mLatencyMs{0.995 /* alpha */};
+
+ // Save the last count when we delivered statistics to mediametrics.
+ int64_t mLastRecordedTimestampVerifierN = 0;
+ int64_t mLastRecordedTimeNs = 0; // BOOTTIME to include suspend.
+
bool mIsMsdDevice = false;
// A condition that must be evaluated by the thread loop has changed and
// we must not wait for async write callback in the thread loop before evaluating it
@@ -1030,7 +1047,6 @@
float mMasterVolume;
std::atomic<float> mMasterBalance{};
audio_utils::Balance mBalance;
- nsecs_t mLastWriteTime;
int mNumWrites;
int mNumDelayedWrites;
bool mInWrite;
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 0ba0ab4..e23173f 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -337,10 +337,19 @@
PatchTrackBase(sp<ClientProxy> proxy, const ThreadBase& thread,
const Timeout& timeout);
void setPeerTimeout(std::chrono::nanoseconds timeout);
- void setPeerProxy(PatchProxyBufferProvider *proxy) { mPeerProxy = proxy; }
+ template <typename T>
+ void setPeerProxy(const sp<T> &proxy, bool holdReference) {
+ mPeerReferenceHold = holdReference ? proxy : nullptr;
+ mPeerProxy = proxy.get();
+ }
+ void clearPeerProxy() {
+ mPeerReferenceHold.clear();
+ mPeerProxy = nullptr;
+ }
protected:
const sp<ClientProxy> mProxy;
+ sp<RefBase> mPeerReferenceHold; // keeps mPeerProxy alive during access.
PatchProxyBufferProvider* mPeerProxy = nullptr;
struct timespec mPeerTimeout{};
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 65f799e..5a43696 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -700,8 +700,13 @@
const AudioBufferProvider::Buffer& sourceBuffer) {
auto start = std::chrono::steady_clock::now();
const size_t frameCount = sourceBuffer.frameCount;
- for (auto& sink : mTeePatches) {
- RecordThread::PatchRecord* patchRecord = sink.patchRecord.get();
+ if (frameCount == 0) {
+ return; // No audio to intercept.
+ // Additionally PatchProxyBufferProvider::obtainBuffer (called by PathTrack::getNextBuffer)
+ // does not allow 0 frame size request contrary to getNextBuffer
+ }
+ for (auto& teePatch : mTeePatches) {
+ RecordThread::PatchRecord* patchRecord = teePatch.patchRecord.get();
size_t framesWritten = writeFrames(patchRecord, sourceBuffer.i8, frameCount);
// On buffer wrap, the buffer frame count will be less than requested,
@@ -1718,6 +1723,7 @@
AudioFlinger::PlaybackThread::PatchTrack::~PatchTrack()
{
+ ALOGV("%s(%d)", __func__, mId);
}
status_t AudioFlinger::PlaybackThread::PatchTrack::start(AudioSystem::sync_event_t event,
@@ -2188,6 +2194,7 @@
AudioFlinger::RecordThread::PatchRecord::~PatchRecord()
{
+ ALOGV("%s(%d)", __func__, mId);
}
// AudioBufferProvider interface
diff --git a/services/audiopolicy/Android.mk b/services/audiopolicy/Android.mk
index f72f44a..9e4eebc 100644
--- a/services/audiopolicy/Android.mk
+++ b/services/audiopolicy/Android.mk
@@ -90,7 +90,7 @@
LOCAL_SHARED_LIBRARIES += libmedia_helper
LOCAL_SHARED_LIBRARIES += libmediametrics
-LOCAL_SHARED_LIBRARIES += libhidlbase libxml2
+LOCAL_SHARED_LIBRARIES += libbinder libhidlbase libxml2
ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
LOCAL_CFLAGS += -DUSE_XML_AUDIO_POLICY_CONF
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index bb5441d..acbfc9e 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -58,10 +58,12 @@
typedef enum {
API_INPUT_INVALID = -1,
API_INPUT_LEGACY = 0,// e.g. audio recording from a microphone
- API_INPUT_MIX_CAPTURE,// used for "remote submix", capture of the media to play it remotely
+ API_INPUT_MIX_CAPTURE,// used for "remote submix" legacy mode (no DAP),
+ // capture of the media to play it remotely
API_INPUT_MIX_EXT_POLICY_REROUTE,// used for platform audio rerouting, where mixes are
// handled by external and dynamically installed
// policies which reroute audio mixes
+ API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK, // used for playback capture with a MediaProjection
API_INPUT_TELEPHONY_RX, // used for capture from telephony RX path
} input_type_t;
@@ -159,6 +161,19 @@
int *index,
audio_devices_t device) = 0;
+ virtual status_t setVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int index,
+ audio_devices_t device) = 0;
+ virtual status_t getVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index,
+ audio_devices_t device) = 0;
+
+ virtual status_t getMaxVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index) = 0;
+
+ virtual status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index) = 0;
+
// return the strategy corresponding to a given stream type
virtual uint32_t getStrategyForStream(audio_stream_type_t stream) = 0;
@@ -244,7 +259,13 @@
virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) = 0;
- virtual product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa) = 0;
+ virtual status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy) = 0;
+
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups) = 0;
+
+ virtual status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup) = 0;
};
@@ -341,6 +362,8 @@
virtual void onAudioPatchListUpdate() = 0;
+ virtual void onAudioVolumeGroupChanged(volume_group_t group, int flags) = 0;
+
virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use) = 0;
virtual void onDynamicPolicyMixStateUpdate(String8 regId, int32_t state) = 0;
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index a3b6b36..561f100 100644
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -16,18 +16,22 @@
#pragma once
+#include <media/AudioCommonTypes.h>
#include <system/audio.h>
#include <utils/Log.h>
#include <math.h>
namespace android {
+
+
/**
* VolumeSource is the discriminent for volume management on an output.
* It used to be the stream type by legacy, it may be host volume group or a volume curves if
- * we allow to have more than one curve per volume group.
+ * we allow to have more than one curve per volume group (mandatory to get rid of AudioServer
+ * stream aliases.
*/
-enum VolumeSource : std::underlying_type<audio_stream_type_t>::type;
-static const VolumeSource VOLUME_SOURCE_NONE = static_cast<VolumeSource>(AUDIO_STREAM_DEFAULT);
+enum VolumeSource : std::underlying_type<volume_group_t>::type;
+static const VolumeSource VOLUME_SOURCE_NONE = static_cast<VolumeSource>(VOLUME_GROUP_NONE);
static inline VolumeSource streamToVolumeSource(audio_stream_type_t stream) {
return static_cast<VolumeSource>(stream);
diff --git a/services/audiopolicy/common/managerdefinitions/Android.bp b/services/audiopolicy/common/managerdefinitions/Android.bp
index e5ebab7..c9037a1 100644
--- a/services/audiopolicy/common/managerdefinitions/Android.bp
+++ b/services/audiopolicy/common/managerdefinitions/Android.bp
@@ -19,7 +19,6 @@
"src/Serializer.cpp",
"src/SoundTriggerSession.cpp",
"src/TypeConverter.cpp",
- "src/VolumeCurve.cpp",
],
shared_libs: [
"libcutils",
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
index 996347b..4af93e1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioGain.h
@@ -20,6 +20,7 @@
#include <utils/RefBase.h>
#include <utils/String8.h>
#include <system/audio.h>
+#include <vector>
namespace android {
@@ -59,12 +60,36 @@
void getDefaultConfig(struct audio_gain_config *config);
status_t checkConfig(const struct audio_gain_config *config);
+ void setUseForVolume(bool canUseForVolume) { mUseForVolume = canUseForVolume; }
+ bool canUseForVolume() const { return mUseForVolume; }
+
const struct audio_gain &getGain() const { return mGain; }
private:
int mIndex;
struct audio_gain mGain;
bool mUseInChannelMask;
+ bool mUseForVolume = false;
+};
+
+class AudioGains : public std::vector<sp<AudioGain> >
+{
+public:
+ bool canUseForVolume() const
+ {
+ for (const auto &gain: *this) {
+ if (gain->canUseForVolume()) {
+ return true;
+ }
+ }
+ return false;
+ }
+
+ int32_t add(const sp<AudioGain> gain)
+ {
+ push_back(gain);
+ return 0;
+ }
};
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
index 803cfac..e071fe0 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioInputDescriptor.h
@@ -29,7 +29,7 @@
namespace android {
-class AudioMix;
+class AudioPolicyMix;
class AudioPolicyClientInterface;
// descriptor for audio inputs. Used to maintain current configuration of each opened audio input
@@ -53,7 +53,7 @@
void dump(String8 *dst) const override;
audio_io_handle_t mIoHandle = AUDIO_IO_HANDLE_NONE; // input handle
- AudioMix *mPolicyMix = nullptr; // non NULL when used by a dynamic policy
+ wp<AudioPolicyMix> mPolicyMix; // non NULL when used by a dynamic policy
const sp<IOProfile> mProfile; // I/O profile this output derives from
virtual void toAudioPortConfig(struct audio_port_config *dstConfig,
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index cf9519b..cd54085 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -34,7 +34,7 @@
namespace android {
class IOProfile;
-class AudioMix;
+class AudioPolicyMix;
class AudioPolicyClientInterface;
class ActivityTracking
@@ -98,7 +98,7 @@
ActivityTracking::dump(dst, spaces);
dst->appendFormat(", Volume: %.03f, MuteCount: %02d\n", mCurVolumeDb, mMuteCount);
}
- void setVolume(float volume) { mCurVolumeDb = volume; }
+ void setVolume(float volumeDb) { mCurVolumeDb = volumeDb; }
float getVolume() const { return mCurVolumeDb; }
private:
@@ -107,7 +107,7 @@
};
/**
* Note: volume activities shall be indexed by CurvesId if we want to allow multiple
- * curves per volume group, inferring a mute management or volume balancing between HW and SW is
+ * curves per volume source, inferring a mute management or volume balancing between HW and SW is
* done
*/
using VolumeActivities = std::map<VolumeSource, VolumeActivity>;
@@ -156,8 +156,8 @@
virtual bool isDuplicated() const { return false; }
virtual uint32_t latency() { return 0; }
virtual bool isFixedVolume(audio_devices_t device);
- virtual bool setVolume(float volume,
- audio_stream_type_t stream,
+ virtual bool setVolume(float volumeDb,
+ VolumeSource volumeSource, const StreamTypeVector &streams,
audio_devices_t device,
uint32_t delayMs,
bool force);
@@ -219,10 +219,10 @@
{
return mVolumeActivities[vs].decMuteCount();
}
- void setCurVolume(VolumeSource vs, float volume)
+ void setCurVolume(VolumeSource vs, float volumeDb)
{
- // Even if not activity for this group registered, need to create anyway
- mVolumeActivities[vs].setVolume(volume);
+ // Even if not activity for this source registered, need to create anyway
+ mVolumeActivities[vs].setVolume(volumeDb);
}
float getCurVolume(VolumeSource vs) const
{
@@ -280,8 +280,13 @@
return mActiveClients;
}
+ bool useHwGain() const
+ {
+ return !devices().isEmpty() ? devices().itemAt(0)->hasGainController() : false;
+ }
+
DeviceVector mDevices; /**< current devices this output is routed to */
- AudioMix *mPolicyMix = nullptr; // non NULL when used by a dynamic policy
+ wp<AudioPolicyMix> mPolicyMix; // non NULL when used by a dynamic policy
protected:
const sp<AudioPort> mPort;
@@ -327,8 +332,8 @@
setClientActive(client, false);
}
}
- virtual bool setVolume(float volume,
- audio_stream_type_t stream,
+ virtual bool setVolume(float volumeDb,
+ VolumeSource volumeSource, const StreamTypeVector &streams,
audio_devices_t device,
uint32_t delayMs,
bool force);
@@ -401,8 +406,8 @@
void dump(String8 *dst) const override;
- virtual bool setVolume(float volume,
- audio_stream_type_t stream,
+ virtual bool setVolume(float volumeDb,
+ VolumeSource volumeSource, const StreamTypeVector &streams,
audio_devices_t device,
uint32_t delayMs,
bool force);
@@ -422,7 +427,7 @@
bool isActive(VolumeSource volumeSource, uint32_t inPastMs = 0) const;
/**
- * return whether any source contributing to VolumeSource is playing remotely, override
+ * return whether any source contributing to VolumeSource is playing remotely, override
* to change the definition of
* local/remote playback, used for instance by notification manager to not make
* media players lose audio focus when not playing locally
@@ -488,8 +493,8 @@
/**
* @brief isAnyOutputActive checks if any output is active (aka playing) except the one(s) that
* hold the volume source to be ignored
- * @param volumeSourceToIgnore source not considered in the activity detection
- * @return true if any output is active for any source except the one to be ignored
+ * @param volumeSourceToIgnore source not to be considered in the activity detection
+ * @return true if any output is active for any volume source except the one to be ignored
*/
bool isAnyOutputActive(VolumeSource volumeSourceToIgnore) const
{
@@ -518,8 +523,8 @@
/**
* @brief isAnyOutputActive checks if any output is active (aka playing) except the one(s) that
* hold the volume source to be ignored
- * @param volumeSourceToIgnore source not considered in the activity detection
- * @return true if any output is active for any source except the one to be ignored
+ * @param volumeSourceToIgnore source not to be considered in the activity detection
+ * @return true if any output is active for any volume source except the one to be ignored
*/
bool isAnyOutputActive(VolumeSource volumeSourceToIgnore) const
{
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index d52eb3d..2264d8f 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -20,7 +20,6 @@
#include <unordered_set>
#include <AudioGain.h>
-#include <VolumeCurve.h>
#include <AudioPort.h>
#include <AudioPatch.h>
#include <DeviceDescriptor.h>
@@ -40,13 +39,11 @@
AudioPolicyConfig(HwModuleCollection &hwModules,
DeviceVector &availableOutputDevices,
DeviceVector &availableInputDevices,
- sp<DeviceDescriptor> &defaultOutputDevice,
- VolumeCurvesCollection *volumes = nullptr)
+ sp<DeviceDescriptor> &defaultOutputDevice)
: mHwModules(hwModules),
mAvailableOutputDevices(availableOutputDevices),
mAvailableInputDevices(availableInputDevices),
mDefaultOutputDevice(defaultOutputDevice),
- mVolumeCurves(volumes),
mIsSpeakerDrcEnabled(false)
{}
@@ -58,13 +55,6 @@
mSource = file;
}
- void setVolumes(const VolumeCurvesCollection &volumes)
- {
- if (mVolumeCurves != nullptr) {
- *mVolumeCurves = volumes;
- }
- }
-
void setHwModules(const HwModuleCollection &hwModules)
{
mHwModules = hwModules;
@@ -182,7 +172,6 @@
DeviceVector &mAvailableOutputDevices;
DeviceVector &mAvailableInputDevices;
sp<DeviceDescriptor> &mDefaultOutputDevice;
- VolumeCurvesCollection *mVolumeCurves;
// TODO: remove when legacy conf file is removed. true on devices that use DRC on the
// DEVICE_CATEGORY_SPEAKER path to boost soft sounds, used to adjust volume curves accordingly.
// Note: remove also speaker_drc_enabled from global configuration of XML config file.
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index d6f24b2..7a9c26e 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -31,24 +31,19 @@
/**
* custom mix entry in mPolicyMixes
*/
-class AudioPolicyMix : public RefBase {
+class AudioPolicyMix : public AudioMix, public RefBase {
public:
- AudioPolicyMix() {}
+ AudioPolicyMix(const AudioMix &mix) : AudioMix(mix) {}
+ AudioPolicyMix(const AudioPolicyMix&) = delete;
+ AudioPolicyMix& operator=(const AudioPolicyMix&) = delete;
- const sp<SwAudioOutputDescriptor> &getOutput() const;
-
- void setOutput(sp<SwAudioOutputDescriptor> &output);
-
- void clearOutput();
-
- android::AudioMix *getMix();
-
- void setMix(const AudioMix &mix);
+ const sp<SwAudioOutputDescriptor> &getOutput() const { return mOutput; }
+ void setOutput(const sp<SwAudioOutputDescriptor> &output) { mOutput = output; }
+ void clearOutput() { mOutput.clear(); }
void dump(String8 *dst, int spaces, int index) const;
private:
- AudioMix mMix; // Audio policy mix descriptor
sp<SwAudioOutputDescriptor> mOutput; // Corresponding output stream
};
@@ -77,21 +72,19 @@
sp<DeviceDescriptor> getDeviceAndMixForInputSource(audio_source_t inputSource,
const DeviceVector &availableDeviceTypes,
- AudioMix **policyMix) const;
+ sp<AudioPolicyMix> *policyMix) const;
/**
* @brief try to find a matching mix for a given output descriptor and returns the associated
* output device.
* @param output to be considered
* @param availableOutputDevices list of output devices currently reachable
- * @param policyMix to be returned if any mix matching ouput descriptor
* @return device selected from the mix attached to the output, null pointer otherwise
*/
sp<DeviceDescriptor> getDeviceAndMixForOutput(const sp<SwAudioOutputDescriptor> &output,
- const DeviceVector &availableOutputDevices,
- AudioMix **policyMix = nullptr);
+ const DeviceVector &availableOutputDevices);
- status_t getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix);
+ status_t getInputMixForAttr(audio_attributes_t attr, sp<AudioPolicyMix> *policyMix);
status_t setUidDeviceAffinities(uid_t uid, const Vector<AudioDeviceTypeAddr>& devices);
status_t removeUidDeviceAffinities(uid_t uid);
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
index 1b5a2d6..2d182bd 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPort.h
@@ -18,6 +18,7 @@
#include "AudioCollections.h"
#include "AudioProfile.h"
+#include "AudioGain.h"
#include "HandleGenerator.h"
#include <utils/String8.h>
#include <utils/Vector.h>
@@ -29,9 +30,7 @@
namespace android {
class HwModule;
-class AudioGain;
class AudioRoute;
-typedef Vector<sp<AudioGain> > AudioGainCollection;
class AudioPort : public virtual RefBase, private HandleGenerator<audio_port_handle_t>
{
@@ -49,8 +48,8 @@
virtual const String8 getTagName() const = 0;
- void setGains(const AudioGainCollection &gains) { mGains = gains; }
- const AudioGainCollection &getGains() const { return mGains; }
+ void setGains(const AudioGains &gains) { mGains = gains; }
+ const AudioGains &getGains() const { return mGains; }
virtual void setFlags(uint32_t flags)
{
@@ -138,7 +137,7 @@
void log(const char* indent) const;
- AudioGainCollection mGains; // gain controllers
+ AudioGains mGains; // gain controllers
private:
void pickChannelMask(audio_channel_mask_t &channelMask, const ChannelsVector &channelMasks) const;
@@ -165,6 +164,8 @@
return (other != 0) && (other->getAudioPort() != 0) && (getAudioPort() != 0) &&
(other->getAudioPort()->getModuleHandle() == getAudioPort()->getModuleHandle());
}
+ bool hasGainController(bool canUseForVolume = false) const;
+
unsigned int mSamplingRate = 0u;
audio_format_t mFormat = AUDIO_FORMAT_INVALID;
audio_channel_mask_t mChannelMask = AUDIO_CHANNEL_NONE;
diff --git a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h
new file mode 100644
index 0000000..d408446
--- /dev/null
+++ b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurves.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <system/audio.h>
+#include <Volume.h>
+#include <utils/Errors.h>
+#include <utils/String8.h>
+#include <vector>
+
+namespace android {
+
+class IVolumeCurves
+{
+public:
+ virtual ~IVolumeCurves() = default;
+
+ virtual void clearCurrentVolumeIndex() = 0;
+ virtual void addCurrentVolumeIndex(audio_devices_t device, int index) = 0;
+ virtual bool canBeMuted() const = 0;
+ virtual int getVolumeIndexMin() const = 0;
+ virtual int getVolumeIndex(audio_devices_t device) const = 0;
+ virtual int getVolumeIndexMax() const = 0;
+ virtual float volIndexToDb(device_category device, int indexInUi) const = 0;
+ virtual bool hasVolumeIndexForDevice(audio_devices_t device) const = 0;
+ virtual status_t initVolume(int indexMin, int indexMax) = 0;
+ virtual std::vector<audio_attributes_t> getAttributes() const = 0;
+ virtual std::vector<audio_stream_type_t> getStreamTypes() const = 0;
+ virtual void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const = 0;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h b/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
deleted file mode 100644
index 750da55..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/IVolumeCurvesCollection.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include <system/audio.h>
-#include <Volume.h>
-#include <utils/Errors.h>
-#include <utils/String8.h>
-
-namespace android {
-
-class IVolumeCurvesCollection
-{
-public:
- virtual ~IVolumeCurvesCollection() = default;
-
- virtual void clearCurrentVolumeIndex(audio_stream_type_t stream) = 0;
- virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device,
- int index) = 0;
- virtual bool canBeMuted(audio_stream_type_t stream) = 0;
- virtual int getVolumeIndexMin(audio_stream_type_t stream) const = 0;
- virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device) = 0;
- virtual int getVolumeIndexMax(audio_stream_type_t stream) const = 0;
- virtual float volIndexToDb(audio_stream_type_t stream, device_category device,
- int indexInUi) const = 0;
- virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax) = 0;
-
- virtual void initializeVolumeCurves(bool /*isSpeakerDrcEnabled*/) {}
- virtual void switchVolumeCurve(audio_stream_type_t src, audio_stream_type_t dst) = 0;
- virtual void restoreOriginVolumeCurve(audio_stream_type_t stream)
- {
- switchVolumeCurve(stream, stream);
- }
- virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
- audio_devices_t device) const = 0;
-
- virtual void dump(String8 *dst) const = 0;
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h b/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
deleted file mode 100644
index 76ec198..0000000
--- a/services/audiopolicy/common/managerdefinitions/include/VolumeCurve.h
+++ /dev/null
@@ -1,239 +0,0 @@
-/*
- * Copyright (C) 2015 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#include "IVolumeCurvesCollection.h"
-#include <policy.h>
-#include <utils/RefBase.h>
-#include <utils/String8.h>
-#include <utils/SortedVector.h>
-#include <utils/KeyedVector.h>
-#include <system/audio.h>
-#include <cutils/config_utils.h>
-#include <string>
-#include <utility>
-
-namespace android {
-
-struct CurvePoint
-{
- CurvePoint() {}
- CurvePoint(int index, int attenuationInMb) :
- mIndex(index), mAttenuationInMb(attenuationInMb) {}
- uint32_t mIndex;
- int mAttenuationInMb;
-};
-
-inline bool operator< (const CurvePoint &lhs, const CurvePoint &rhs)
-{
- return lhs.mIndex < rhs.mIndex;
-}
-
-// A volume curve for a given use case and device category
-// It contains of list of points of this curve expressing the attenuation in Millibels for
-// a given volume index from 0 to 100
-class VolumeCurve : public RefBase
-{
-public:
- VolumeCurve(device_category device, audio_stream_type_t stream) :
- mDeviceCategory(device), mStreamType(stream) {}
-
- device_category getDeviceCategory() const { return mDeviceCategory; }
- audio_stream_type_t getStreamType() const { return mStreamType; }
-
- void add(const CurvePoint &point) { mCurvePoints.add(point); }
-
- float volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const;
-
- void dump(String8 *result) const;
-
-private:
- SortedVector<CurvePoint> mCurvePoints;
- device_category mDeviceCategory;
- audio_stream_type_t mStreamType;
-};
-
-// Volume Curves for a given use case indexed by device category
-class VolumeCurvesForStream : public KeyedVector<device_category, sp<VolumeCurve> >
-{
-public:
- VolumeCurvesForStream() : mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
- {
- mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, 0);
- }
-
- sp<VolumeCurve> getCurvesFor(device_category device) const
- {
- if (indexOfKey(device) < 0) {
- return 0;
- }
- return valueFor(device);
- }
-
- int getVolumeIndex(audio_devices_t device) const
- {
- device = Volume::getDeviceForVolume(device);
- // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
- if (mIndexCur.indexOfKey(device) < 0) {
- device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
- }
- return mIndexCur.valueFor(device);
- }
-
- bool canBeMuted() const { return mCanBeMuted; }
- void clearCurrentVolumeIndex() { mIndexCur.clear(); }
- void addCurrentVolumeIndex(audio_devices_t device, int index) { mIndexCur.add(device, index); }
-
- void setVolumeIndexMin(int volIndexMin) { mIndexMin = volIndexMin; }
- int getVolumeIndexMin() const { return mIndexMin; }
-
- void setVolumeIndexMax(int volIndexMax) { mIndexMax = volIndexMax; }
- int getVolumeIndexMax() const { return mIndexMax; }
-
- bool hasVolumeIndexForDevice(audio_devices_t device) const
- {
- device = Volume::getDeviceForVolume(device);
- return mIndexCur.indexOfKey(device) >= 0;
- }
-
- const sp<VolumeCurve> getOriginVolumeCurve(device_category deviceCategory) const
- {
- ALOG_ASSERT(mOriginVolumeCurves.indexOfKey(deviceCategory) >= 0, "Invalid device category");
- return mOriginVolumeCurves.valueFor(deviceCategory);
- }
- void setVolumeCurve(device_category deviceCategory, const sp<VolumeCurve> &volumeCurve)
- {
- ALOG_ASSERT(indexOfKey(deviceCategory) >= 0, "Invalid device category for Volume Curve");
- replaceValueFor(deviceCategory, volumeCurve);
- }
-
- ssize_t add(const sp<VolumeCurve> &volumeCurve)
- {
- device_category deviceCategory = volumeCurve->getDeviceCategory();
- ssize_t index = indexOfKey(deviceCategory);
- if (index < 0) {
- // Keep track of original Volume Curves per device category in order to switch curves.
- mOriginVolumeCurves.add(deviceCategory, volumeCurve);
- return KeyedVector::add(deviceCategory, volumeCurve);
- }
- return index;
- }
-
- float volIndexToDb(device_category deviceCat, int indexInUi) const
- {
- sp<VolumeCurve> vc = getCurvesFor(deviceCat);
- if (vc != 0) {
- return vc->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
- } else {
- ALOGE("Invalid device category %d for Volume Curve", deviceCat);
- return 0.0f;
- }
- }
-
- void dump(String8 *dst, int spaces, bool curvePoints = false) const;
-
-private:
- KeyedVector<device_category, sp<VolumeCurve> > mOriginVolumeCurves;
- KeyedVector<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
- int mIndexMin; /**< min volume index. */
- int mIndexMax; /**< max volume index. */
- bool mCanBeMuted; /**< true is the stream can be muted. */
-};
-
-// Collection of Volume Curves indexed by use case
-class VolumeCurvesCollection : public KeyedVector<audio_stream_type_t, VolumeCurvesForStream>,
- public IVolumeCurvesCollection
-{
-public:
- VolumeCurvesCollection()
- {
- // Create an empty collection of curves
- for (ssize_t i = 0 ; i < AUDIO_STREAM_CNT; i++) {
- audio_stream_type_t stream = static_cast<audio_stream_type_t>(i);
- KeyedVector::add(stream, VolumeCurvesForStream());
- }
- }
-
- // Once XML has been parsed, must be call first to sanity check table and initialize indexes
- virtual status_t initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
- {
- editValueAt(stream).setVolumeIndexMin(indexMin);
- editValueAt(stream).setVolumeIndexMax(indexMax);
- return NO_ERROR;
- }
- virtual void clearCurrentVolumeIndex(audio_stream_type_t stream)
- {
- editCurvesFor(stream).clearCurrentVolumeIndex();
- }
- virtual void addCurrentVolumeIndex(audio_stream_type_t stream, audio_devices_t device, int index)
- {
- editCurvesFor(stream).addCurrentVolumeIndex(device, index);
- }
- virtual bool canBeMuted(audio_stream_type_t stream) { return getCurvesFor(stream).canBeMuted(); }
-
- virtual int getVolumeIndexMin(audio_stream_type_t stream) const
- {
- return getCurvesFor(stream).getVolumeIndexMin();
- }
- virtual int getVolumeIndexMax(audio_stream_type_t stream) const
- {
- return getCurvesFor(stream).getVolumeIndexMax();
- }
- virtual int getVolumeIndex(audio_stream_type_t stream, audio_devices_t device)
- {
- return getCurvesFor(stream).getVolumeIndex(device);
- }
- virtual void switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst)
- {
- const VolumeCurvesForStream &sourceCurves = getCurvesFor(streamSrc);
- VolumeCurvesForStream &dstCurves = editCurvesFor(streamDst);
- ALOG_ASSERT(sourceCurves.size() == dstCurves.size(), "device category not aligned");
- for (size_t index = 0; index < sourceCurves.size(); index++) {
- device_category cat = sourceCurves.keyAt(index);
- dstCurves.setVolumeCurve(cat, sourceCurves.getOriginVolumeCurve(cat));
- }
- }
- virtual float volIndexToDb(audio_stream_type_t stream, device_category cat, int indexInUi) const
- {
- return getCurvesFor(stream).volIndexToDb(cat, indexInUi);
- }
- virtual bool hasVolumeIndexForDevice(audio_stream_type_t stream,
- audio_devices_t device) const
- {
- return getCurvesFor(stream).hasVolumeIndexForDevice(device);
- }
-
- void dump(String8 *dst) const override;
-
- ssize_t add(const sp<VolumeCurve> &volumeCurve)
- {
- audio_stream_type_t streamType = volumeCurve->getStreamType();
- return editCurvesFor(streamType).add(volumeCurve);
- }
- VolumeCurvesForStream &editCurvesFor(audio_stream_type_t stream)
- {
- ALOG_ASSERT(indexOfKey(stream) >= 0, "Invalid stream type for Volume Curve");
- return editValueAt(stream);
- }
- const VolumeCurvesForStream &getCurvesFor(audio_stream_type_t stream) const
- {
- ALOG_ASSERT(indexOfKey(stream) >= 0, "Invalid stream type for Volume Curve");
- return valueFor(stream);
- }
-};
-
-} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
index 1fa1123..635de6f 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioInputDescriptor.cpp
@@ -22,6 +22,7 @@
#include <AudioPolicyInterface.h>
#include "AudioInputDescriptor.h"
#include "AudioGain.h"
+#include "AudioPolicyMix.h"
#include "HwModule.h"
namespace android {
@@ -308,16 +309,17 @@
const int delta = active ? 1 : -1;
mGlobalActiveCount += delta;
+ sp<AudioPolicyMix> policyMix = mPolicyMix.promote();
if ((oldGlobalActiveCount == 0) && (mGlobalActiveCount > 0)) {
- if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+ if ((policyMix != NULL) && ((policyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
{
- mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
+ mClientInterface->onDynamicPolicyMixStateUpdate(policyMix->mDeviceAddress,
MIX_STATE_MIXING);
}
} else if ((oldGlobalActiveCount > 0) && (mGlobalActiveCount == 0)) {
- if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
+ if ((policyMix != NULL) && ((policyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0))
{
- mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
+ mClientInterface->onDynamicPolicyMixStateUpdate(policyMix->mDeviceAddress,
MIX_STATE_IDLE);
}
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 7293bc4..97b7a01 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -19,10 +19,12 @@
#include <AudioPolicyInterface.h>
#include "AudioOutputDescriptor.h"
+#include "AudioPolicyMix.h"
#include "IOProfile.h"
#include "AudioGain.h"
#include "Volume.h"
#include "HwModule.h"
+#include "TypeConverter.h"
#include <media/AudioParameter.h>
#include <media/AudioPolicy.h>
@@ -111,9 +113,10 @@
}
mGlobalActiveCount += delta;
- if ((mPolicyMix != NULL) && ((mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+ sp<AudioPolicyMix> policyMix = mPolicyMix.promote();
+ if ((policyMix != NULL) && ((policyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
if ((oldGlobalActiveCount == 0) || (mGlobalActiveCount == 0)) {
- mClientInterface->onDynamicPolicyMixStateUpdate(mPolicyMix->mDeviceAddress,
+ mClientInterface->onDynamicPolicyMixStateUpdate(policyMix->mDeviceAddress,
mGlobalActiveCount > 0 ? MIX_STATE_MIXING : MIX_STATE_IDLE);
}
}
@@ -149,18 +152,19 @@
return false;
}
-bool AudioOutputDescriptor::setVolume(float volume,
- audio_stream_type_t stream,
- audio_devices_t device __unused,
+bool AudioOutputDescriptor::setVolume(float volumeDb,
+ VolumeSource volumeSource,
+ const StreamTypeVector &/*streams*/,
+ audio_devices_t /*device*/,
uint32_t delayMs,
bool force)
{
// We actually change the volume if:
// - the float value returned by computeVolume() changed
// - the force flag is set
- if (volume != getCurVolume(static_cast<VolumeSource>(stream)) || force) {
- ALOGV("setVolume() for stream %d, volume %f, delay %d", stream, volume, delayMs);
- setCurVolume(static_cast<VolumeSource>(stream), volume);
+ if (volumeDb != getCurVolume(volumeSource) || force) {
+ ALOGV("%s for volumeSrc %d, volume %f, delay %d", __func__, volumeSource, volumeDb, delayMs);
+ setCurVolume(volumeSource, volumeDb);
return true;
}
return false;
@@ -388,21 +392,59 @@
mFlags & AUDIO_OUTPUT_FLAG_FAST ? AUDIO_LATENCY_LOW : AUDIO_LATENCY_NORMAL;
}
-bool SwAudioOutputDescriptor::setVolume(float volume,
- audio_stream_type_t stream,
+bool SwAudioOutputDescriptor::setVolume(float volumeDb,
+ VolumeSource vs, const StreamTypeVector &streamTypes,
audio_devices_t device,
uint32_t delayMs,
bool force)
{
- if (!AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force)) {
+ StreamTypeVector streams = streamTypes;
+ if (!AudioOutputDescriptor::setVolume(volumeDb, vs, streamTypes, device, delayMs, force)) {
return false;
}
+ if (streams.empty()) {
+ streams.push_back(AUDIO_STREAM_MUSIC);
+ }
+ for (const auto& devicePort : devices()) {
+ // APM loops on all group, so filter on active group to set the port gain,
+ // let the other groups set the stream volume as per legacy
+ // TODO: Pass in the device address and check against it.
+ if (device == devicePort->type() &&
+ devicePort->hasGainController(true) && isActive(vs)) {
+ ALOGV("%s: device %s has gain controller", __func__, devicePort->toString().c_str());
+ // @todo: here we might be in trouble if the SwOutput has several active clients with
+ // different Volume Source (or if we allow several curves within same volume group)
+ //
+ // @todo: default stream volume to max (0) when using HW Port gain?
+ float volumeAmpl = Volume::DbToAmpl(0);
+ for (const auto &stream : streams) {
+ mClientInterface->setStreamVolume(stream, volumeAmpl, mIoHandle, delayMs);
+ }
+
+ AudioGains gains = devicePort->getGains();
+ int gainMinValueInMb = gains[0]->getMinValueInMb();
+ int gainMaxValueInMb = gains[0]->getMaxValueInMb();
+ int gainStepValueInMb = gains[0]->getStepValueInMb();
+ int gainValueMb = ((volumeDb * 100)/ gainStepValueInMb) * gainStepValueInMb;
+ gainValueMb = std::max(gainMinValueInMb, std::min(gainValueMb, gainMaxValueInMb));
+
+ audio_port_config config = {};
+ devicePort->toAudioPortConfig(&config);
+ config.config_mask = AUDIO_PORT_CONFIG_GAIN;
+ config.gain.values[0] = gainValueMb;
+ return mClientInterface->setAudioPortConfig(&config, 0) == NO_ERROR;
+ }
+ }
// Force VOICE_CALL to track BLUETOOTH_SCO stream volume when bluetooth audio is enabled
- float volumeAmpl = Volume::DbToAmpl(getCurVolume(static_cast<VolumeSource>(stream)));
- if (stream == AUDIO_STREAM_BLUETOOTH_SCO) {
+ float volumeAmpl = Volume::DbToAmpl(getCurVolume(vs));
+ if (hasStream(streams, AUDIO_STREAM_BLUETOOTH_SCO)) {
mClientInterface->setStreamVolume(AUDIO_STREAM_VOICE_CALL, volumeAmpl, mIoHandle, delayMs);
}
- mClientInterface->setStreamVolume(stream, volumeAmpl, mIoHandle, delayMs);
+ for (const auto &stream : streams) {
+ ALOGV("%s output %d for volumeSource %d, volume %f, delay %d stream=%s", __func__,
+ mIoHandle, vs, volumeDb, delayMs, toString(stream).c_str());
+ mClientInterface->setStreamVolume(stream, volumeAmpl, mIoHandle, delayMs);
+ }
return true;
}
@@ -591,13 +633,14 @@
}
-bool HwAudioOutputDescriptor::setVolume(float volume,
- audio_stream_type_t stream,
+bool HwAudioOutputDescriptor::setVolume(float volumeDb,
+ VolumeSource volumeSource, const StreamTypeVector &streams,
audio_devices_t device,
uint32_t delayMs,
bool force)
{
- bool changed = AudioOutputDescriptor::setVolume(volume, stream, device, delayMs, force);
+ bool changed =
+ AudioOutputDescriptor::setVolume(volumeDb, volumeSource, streams, device, delayMs, force);
if (changed) {
// TODO: use gain controller on source device if any to adjust volume
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
index cd1c2f2..3a4db90 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPatch.cpp
@@ -41,9 +41,7 @@
const audio_port_config &cfg = cfgs[i];
dst->appendFormat("%*s [%s %d] ", spaces, "", prefix, i + 1);
if (cfg.type == AUDIO_PORT_TYPE_DEVICE) {
- std::string device;
- deviceToString(cfg.ext.device.type, device);
- dst->appendFormat("Device ID %d %s", cfg.id, device.c_str());
+ dst->appendFormat("Device ID %d %s", cfg.id, toString(cfg.ext.device.type).c_str());
} else {
dst->appendFormat("Mix ID %d I/O handle %d", cfg.id, cfg.ext.mix.handle);
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 2c4695d..f7289ca 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -27,53 +27,26 @@
namespace android {
-void AudioPolicyMix::setOutput(sp<SwAudioOutputDescriptor> &output)
-{
- mOutput = output;
-}
-
-const sp<SwAudioOutputDescriptor> &AudioPolicyMix::getOutput() const
-{
- return mOutput;
-}
-
-void AudioPolicyMix::clearOutput()
-{
- mOutput.clear();
-}
-
-void AudioPolicyMix::setMix(const AudioMix &mix)
-{
- mMix = mix;
-}
-
-android::AudioMix *AudioPolicyMix::getMix()
-{
- return &mMix;
-}
-
void AudioPolicyMix::dump(String8 *dst, int spaces, int index) const
{
dst->appendFormat("%*sAudio Policy Mix %d:\n", spaces, "", index + 1);
std::string mixTypeLiteral;
- if (!MixTypeConverter::toString(mMix.mMixType, mixTypeLiteral)) {
- ALOGE("%s: failed to convert mix type %d", __FUNCTION__, mMix.mMixType);
+ if (!MixTypeConverter::toString(mMixType, mixTypeLiteral)) {
+ ALOGE("%s: failed to convert mix type %d", __FUNCTION__, mMixType);
return;
}
dst->appendFormat("%*s- mix type: %s\n", spaces, "", mixTypeLiteral.c_str());
std::string routeFlagLiteral;
- RouteFlagTypeConverter::maskToString(mMix.mRouteFlags, routeFlagLiteral);
+ RouteFlagTypeConverter::maskToString(mRouteFlags, routeFlagLiteral);
dst->appendFormat("%*s- Route Flags: %s\n", spaces, "", routeFlagLiteral.c_str());
- std::string deviceLiteral;
- deviceToString(mMix.mDeviceType, deviceLiteral);
- dst->appendFormat("%*s- device type: %s\n", spaces, "", deviceLiteral.c_str());
+ dst->appendFormat("%*s- device type: %s\n", spaces, "", toString(mDeviceType).c_str());
- dst->appendFormat("%*s- device address: %s\n", spaces, "", mMix.mDeviceAddress.string());
+ dst->appendFormat("%*s- device address: %s\n", spaces, "", mDeviceAddress.string());
int indexCriterion = 0;
- for (const auto &criterion : mMix.mCriteria) {
+ for (const auto &criterion : mCriteria) {
dst->appendFormat("%*s- Criterion %d:\n", spaces + 2, "", indexCriterion++);
std::string usageLiteral;
@@ -83,7 +56,7 @@
}
dst->appendFormat("%*s- Usage:%s\n", spaces + 4, "", usageLiteral.c_str());
- if (mMix.mMixType == MIX_TYPE_RECORDERS) {
+ if (mMixType == MIX_TYPE_RECORDERS) {
std::string sourceLiteral;
if (!SourceTypeConverter::toString(criterion.mValue.mSource, sourceLiteral)) {
ALOGE("%s: failed to convert source %d", __FUNCTION__, criterion.mValue.mSource);
@@ -111,12 +84,11 @@
ALOGE("registerPolicyMixes(): mix for address %s already registered", address.string());
return BAD_VALUE;
}
- sp<AudioPolicyMix> policyMix = new AudioPolicyMix();
- policyMix->setMix(mix);
+ sp<AudioPolicyMix> policyMix = new AudioPolicyMix(mix);
add(address, policyMix);
if (desc != 0) {
- desc->mPolicyMix = policyMix->getMix();
+ desc->mPolicyMix = policyMix;
policyMix->setOutput(desc);
}
return NO_ERROR;
@@ -170,15 +142,14 @@
continue;
}
- AudioMix *mix = policyMix->getMix();
- const bool primaryOutputMix = !is_mix_loopback_render(mix->mRouteFlags);
+ const bool primaryOutputMix = !is_mix_loopback_render(policyMix->mRouteFlags);
if (primaryOutputMix && primaryDesc != 0) {
ALOGV("%s: Skiping %zu: Primary output already found", __func__, i);
continue; // Primary output already found
}
- switch (mixMatch(mix, i, attributes, uid)) {
+ switch (mixMatch(policyMix.get(), i, attributes, uid)) {
case MixMatchStatus::INVALID_MIX: return BAD_VALUE; // TODO: Do we really want to abort?
case MixMatchStatus::NO_MATCH:
ALOGV("%s: Mix %zu: does not match", __func__, i);
@@ -186,7 +157,7 @@
case MixMatchStatus::MATCH:;
}
- policyDesc->mPolicyMix = mix;
+ policyDesc->mPolicyMix = policyMix;
if (primaryOutputMix) {
primaryDesc = policyDesc;
ALOGV("%s: Mix %zu: set primary desc", __func__, i);
@@ -329,17 +300,13 @@
sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForOutput(
const sp<SwAudioOutputDescriptor> &output,
- const DeviceVector &availableOutputDevices,
- AudioMix **policyMix)
+ const DeviceVector &availableOutputDevices)
{
for (size_t i = 0; i < size(); i++) {
if (valueAt(i)->getOutput() == output) {
- AudioMix *mix = valueAt(i)->getMix();
- if (policyMix != nullptr)
- *policyMix = mix;
// This Desc is involved in a Mix, which has the highest prio
- audio_devices_t deviceType = mix->mDeviceType;
- String8 address = mix->mDeviceAddress;
+ audio_devices_t deviceType = valueAt(i)->mDeviceType;
+ String8 address = valueAt(i)->mDeviceAddress;
ALOGV("%s: device (0x%x, addr=%s) forced by mix",
__FUNCTION__, deviceType, address.c_str());
return availableOutputDevices.getDevice(deviceType, address, AUDIO_FORMAT_DEFAULT);
@@ -349,10 +316,12 @@
}
sp<DeviceDescriptor> AudioPolicyMixCollection::getDeviceAndMixForInputSource(
- audio_source_t inputSource, const DeviceVector &availDevices, AudioMix **policyMix) const
+ audio_source_t inputSource,
+ const DeviceVector &availDevices,
+ sp<AudioPolicyMix> *policyMix) const
{
for (size_t i = 0; i < size(); i++) {
- AudioMix *mix = valueAt(i)->getMix();
+ AudioPolicyMix *mix = valueAt(i).get();
if (mix->mMixType != MIX_TYPE_RECORDERS) {
continue;
}
@@ -367,7 +336,7 @@
auto mixDevice =
availDevices.getDevice(device, mix->mDeviceAddress, AUDIO_FORMAT_DEFAULT);
if (mixDevice != nullptr) {
- if (policyMix != NULL) {
+ if (policyMix != nullptr) {
*policyMix = mix;
}
return mixDevice;
@@ -379,7 +348,8 @@
return nullptr;
}
-status_t AudioPolicyMixCollection::getInputMixForAttr(audio_attributes_t attr, AudioMix **policyMix)
+status_t AudioPolicyMixCollection::getInputMixForAttr(
+ audio_attributes_t attr, sp<AudioPolicyMix> *policyMix)
{
if (strncmp(attr.tags, "addr=", strlen("addr=")) != 0) {
return BAD_VALUE;
@@ -389,9 +359,8 @@
#ifdef LOG_NDEBUG
ALOGV("getInputMixForAttr looking for address %s\n mixes available:", address.string());
for (size_t i = 0; i < size(); i++) {
- sp<AudioPolicyMix> policyMix = valueAt(i);
- const AudioMix *mix = policyMix->getMix();
- ALOGV("\tmix %zu address=%s", i, mix->mDeviceAddress.string());
+ sp<AudioPolicyMix> audioPolicyMix = valueAt(i);
+ ALOGV("\tmix %zu address=%s", i, audioPolicyMix->mDeviceAddress.string());
}
#endif
@@ -401,13 +370,14 @@
return BAD_VALUE;
}
sp<AudioPolicyMix> audioPolicyMix = valueAt(index);
- AudioMix *mix = audioPolicyMix->getMix();
- if (mix->mMixType != MIX_TYPE_PLAYERS) {
+ if (audioPolicyMix->mMixType != MIX_TYPE_PLAYERS) {
ALOGW("getInputMixForAttr() bad policy mix type for address %s", address.string());
return BAD_VALUE;
}
- *policyMix = mix;
+ if (policyMix != nullptr) {
+ *policyMix = audioPolicyMix;
+ }
return NO_ERROR;
}
@@ -418,7 +388,7 @@
// for each player mix: add a rule to match or exclude the uid based on the device
for (size_t i = 0; i < size(); i++) {
- const AudioMix *mix = valueAt(i)->getMix();
+ const AudioPolicyMix *mix = valueAt(i).get();
if (mix->mMixType != MIX_TYPE_PLAYERS) {
continue;
}
@@ -447,7 +417,7 @@
// for each player mix: remove existing rules that match or exclude this uid
for (size_t i = 0; i < size(); i++) {
bool foundUidRule = false;
- const AudioMix *mix = valueAt(i)->getMix();
+ const AudioPolicyMix *mix = valueAt(i).get();
if (mix->mMixType != MIX_TYPE_PLAYERS) {
continue;
}
@@ -475,7 +445,7 @@
// for each player mix: find rules that don't exclude this uid, and add the device to the list
for (size_t i = 0; i < size(); i++) {
bool ruleAllowsUid = true;
- const AudioMix *mix = valueAt(i)->getMix();
+ const AudioPolicyMix *mix = valueAt(i).get();
if (mix->mMixType != MIX_TYPE_PLAYERS) {
continue;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
index 9fcf5e7..a66c695 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPort.cpp
@@ -479,4 +479,14 @@
dstConfig, srcConfig, AUDIO_PORT_CONFIG_FLAGS, { AUDIO_INPUT_FLAG_NONE });
}
+bool AudioPortConfig::hasGainController(bool canUseForVolume) const
+{
+ sp<AudioPort> audioport = getAudioPort();
+ if (audioport == nullptr) {
+ return false;
+ }
+ return canUseForVolume ? audioport->getGains().canUseForVolume()
+ : audioport->getGains().size() > 0;
+}
+
} // namespace android
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index a3121d1..91961d0 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -18,6 +18,7 @@
//#define LOG_NDEBUG 0
#include <audio_utils/string.h>
+#include <media/TypeConverter.h>
#include <set>
#include "DeviceDescriptor.h"
#include "TypeConverter.h"
@@ -346,10 +347,9 @@
if (!mTagName.isEmpty()) {
dst->appendFormat("%*s- tag name: %s\n", spaces, "", mTagName.string());
}
- std::string deviceLiteral;
- if (deviceToString(mDeviceType, deviceLiteral)) {
- dst->appendFormat("%*s- type: %-48s\n", spaces, "", deviceLiteral.c_str());
- }
+
+ dst->appendFormat("%*s- type: %-48s\n", spaces, "", ::android::toString(mDeviceType).c_str());
+
if (mAddress.size() != 0) {
dst->appendFormat("%*s- address: %-32s\n", spaces, "", mAddress.string());
}
@@ -401,9 +401,8 @@
void DeviceDescriptor::log() const
{
- std::string device;
- deviceToString(mDeviceType, device);
- ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId, mDeviceType, device.c_str(),
+ ALOGI("Device id:%d type:0x%08X:%s, addr:%s", mId, mDeviceType,
+ ::android::toString(mDeviceType).c_str(),
mAddress.string());
AudioPort::log(" ");
diff --git a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
index e0b233d..ec7ff57 100644
--- a/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/HwModule.cpp
@@ -341,7 +341,7 @@
}
}
if (!allowToCreate) {
- ALOGE("%s: could not find HW module for device %s %04x address %s", __FUNCTION__,
+ ALOGV("%s: could not find HW module for device %s %04x address %s", __FUNCTION__,
name, deviceType, address);
return nullptr;
}
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 98d375c..5f820c2 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -64,7 +64,7 @@
}
};
-struct AudioGainTraits : public AndroidCollectionTraits<AudioGain, AudioGainCollection>
+struct AudioGainTraits : public AndroidCollectionTraits<AudioGain, AudioGains>
{
static constexpr const char *tag = "gain";
static constexpr const char *collectionTag = "gains";
@@ -84,6 +84,9 @@
static constexpr const char *minRampMs = "minRampMs";
/** needed if mode AUDIO_GAIN_MODE_RAMP. */
static constexpr const char *maxRampMs = "maxRampMs";
+ /** needed to allow use setPortGain instead of setStreamVolume. */
+ static constexpr const char *useForVolume = "useForVolume";
+
};
static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
@@ -201,25 +204,6 @@
static status_t deserialize(const xmlNode *root, AudioPolicyConfig *config);
};
-struct VolumeTraits : public AndroidCollectionTraits<VolumeCurve, VolumeCurvesCollection>
-{
- static constexpr const char *tag = "volume";
- static constexpr const char *collectionTag = "volumes";
- static constexpr const char *volumePointTag = "point";
- static constexpr const char *referenceTag = "reference";
-
- struct Attributes
- {
- static constexpr const char *stream = "stream";
- static constexpr const char *deviceCategory = "deviceCategory";
- static constexpr const char *reference = "ref";
- static constexpr const char *referenceName = "name";
- };
-
- static Return<Element> deserialize(const xmlNode *cur, PtrSerializingCtx serializingContext);
- // No Children
-};
-
struct SurroundSoundTraits
{
static constexpr const char *tag = "surroundSound";
@@ -394,9 +378,14 @@
if (!maxRampMsLiteral.empty() && convertTo(maxRampMsLiteral, maxRampMs)) {
gain->setMaxRampInMs(maxRampMs);
}
- ALOGV("%s: adding new gain mode %08x channel mask %08x min mB %d max mB %d", __func__,
- gain->getMode(), gain->getChannelMask(), gain->getMinValueInMb(),
- gain->getMaxValueInMb());
+ std::string useForVolumeLiteral = getXmlAttribute(cur, Attributes::useForVolume);
+ bool useForVolume = false;
+ if (!useForVolumeLiteral.empty() && convertTo(useForVolumeLiteral, useForVolume)) {
+ gain->setUseForVolume(useForVolume);
+ }
+ ALOGV("%s: adding new gain mode %08x channel mask %08x min mB %d max mB %d UseForVolume: %d",
+ __func__, gain->getMode(), gain->getChannelMask(), gain->getMinValueInMb(),
+ gain->getMaxValueInMb(), useForVolume);
if (gain->getMode() != 0) {
return gain;
@@ -703,67 +692,6 @@
return NO_ERROR;
}
-Return<VolumeTraits::Element> VolumeTraits::deserialize(const xmlNode *cur,
- PtrSerializingCtx /*serializingContext*/)
-{
- std::string streamTypeLiteral = getXmlAttribute(cur, Attributes::stream);
- if (streamTypeLiteral.empty()) {
- ALOGE("%s: No %s found", __func__, Attributes::stream);
- return Status::fromStatusT(BAD_VALUE);
- }
- audio_stream_type_t streamType;
- if (!StreamTypeConverter::fromString(streamTypeLiteral, streamType)) {
- ALOGE("%s: Invalid %s", __func__, Attributes::stream);
- return Status::fromStatusT(BAD_VALUE);
- }
- std::string deviceCategoryLiteral = getXmlAttribute(cur, Attributes::deviceCategory);
- if (deviceCategoryLiteral.empty()) {
- ALOGE("%s: No %s found", __func__, Attributes::deviceCategory);
- return Status::fromStatusT(BAD_VALUE);
- }
- device_category deviceCategory;
- if (!DeviceCategoryConverter::fromString(deviceCategoryLiteral, deviceCategory)) {
- ALOGE("%s: Invalid %s=%s", __func__, Attributes::deviceCategory,
- deviceCategoryLiteral.c_str());
- return Status::fromStatusT(BAD_VALUE);
- }
-
- std::string referenceName = getXmlAttribute(cur, Attributes::reference);
- const xmlNode *ref = NULL;
- if (!referenceName.empty()) {
- ref = getReference<VolumeTraits>(cur->parent, referenceName);
- if (ref == NULL) {
- ALOGE("%s: No reference Ptr found for %s", __func__, referenceName.c_str());
- return Status::fromStatusT(BAD_VALUE);
- }
- }
-
- Element volCurve = new VolumeCurve(deviceCategory, streamType);
-
- for (const xmlNode *child = referenceName.empty() ? cur->xmlChildrenNode : ref->xmlChildrenNode;
- child != NULL; child = child->next) {
- if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>(volumePointTag))) {
- auto pointDefinition = make_xmlUnique(xmlNodeListGetString(
- child->doc, child->xmlChildrenNode, 1));
- if (pointDefinition == nullptr) {
- return Status::fromStatusT(BAD_VALUE);
- }
- ALOGV("%s: %s=%s",
- __func__, tag, reinterpret_cast<const char*>(pointDefinition.get()));
- std::vector<int32_t> point;
- collectionFromString<DefaultTraits<int32_t>>(
- reinterpret_cast<const char*>(pointDefinition.get()), point, ",");
- if (point.size() != 2) {
- ALOGE("%s: Invalid %s: %s", __func__, volumePointTag,
- reinterpret_cast<const char*>(pointDefinition.get()));
- return Status::fromStatusT(BAD_VALUE);
- }
- volCurve->add(CurvePoint(point[0], point[1]));
- }
- }
- return volCurve;
-}
-
status_t SurroundSoundTraits::deserialize(const xmlNode *root, AudioPolicyConfig *config)
{
config->setDefaultSurroundFormats();
@@ -851,14 +779,6 @@
}
config->setHwModules(modules);
- // deserialize volume section
- VolumeTraits::Collection volumes;
- status = deserializeCollection<VolumeTraits>(root, &volumes, config);
- if (status != NO_ERROR) {
- return status;
- }
- config->setVolumes(volumes);
-
// Global Configuration
GlobalConfigTraits::deserialize(root, config);
diff --git a/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml b/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml
new file mode 100644
index 0000000..57bd4f8
--- /dev/null
+++ b/services/audiopolicy/config/a2dp_in_audio_policy_configuration.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Input Audio HAL Audio Policy Configuration file -->
+<module name="a2dp" halVersion="2.0">
+ <mixPorts>
+ <mixPort name="a2dp input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <devicePort tagName="BT A2DP In" type="AUDIO_DEVICE_IN_BLUETOOTH_A2DP" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO"/>
+ </devicePort>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="a2dp input"
+ sources="BT A2DP In"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/config/audio_policy_configuration.xml b/services/audiopolicy/config/audio_policy_configuration.xml
index 42c52de..b28381b 100644
--- a/services/audiopolicy/config/audio_policy_configuration.xml
+++ b/services/audiopolicy/config/audio_policy_configuration.xml
@@ -1,5 +1,5 @@
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
-<!-- Copyright (C) 2015 The Android Open Source Project
+<!-- Copyright (C) 2019 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -173,8 +173,8 @@
</module>
- <!-- A2dp Audio HAL -->
- <xi:include href="a2dp_audio_policy_configuration.xml"/>
+ <!-- A2dp Input Audio HAL -->
+ <xi:include href="a2dp_in_audio_policy_configuration.xml"/>
<!-- Usb Audio HAL -->
<xi:include href="usb_audio_policy_configuration.xml"/>
@@ -182,8 +182,8 @@
<!-- Remote Submix Audio HAL -->
<xi:include href="r_submix_audio_policy_configuration.xml"/>
- <!-- Hearing aid Audio HAL -->
- <xi:include href="hearing_aid_audio_policy_configuration.xml"/>
+ <!-- Bluetooth Audio HAL -->
+ <xi:include href="bluetooth_audio_policy_configuration.xml"/>
<!-- MSD Audio HAL (optional) -->
<xi:include href="msd_audio_policy_configuration.xml"/>
@@ -191,7 +191,11 @@
</modules>
<!-- End of Modules section -->
- <!-- Volume section -->
+ <!-- Volume section:
+ IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+ Keep it here for legacy.
+ Engine will fallback on these files if none are provided by engine.
+ -->
<xi:include href="audio_policy_volumes.xml"/>
<xi:include href="default_volume_tables.xml"/>
diff --git a/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml b/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml
new file mode 100644
index 0000000..b4cc1d3
--- /dev/null
+++ b/services/audiopolicy/config/audio_policy_configuration_bluetooth_legacy_hal.xml
@@ -0,0 +1,211 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+
+<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
+ <!-- version section contains a “version” tag in the form “major.minor” e.g version=”1.0” -->
+
+ <!-- Global configuration Decalaration -->
+ <globalConfiguration speaker_drc_enabled="true"/>
+
+
+ <!-- Modules section:
+ There is one section per audio HW module present on the platform.
+ Each module section will contains two mandatory tags for audio HAL “halVersion” and “name”.
+ The module names are the same as in current .conf file:
+ “primary”, “A2DP”, “remote_submix”, “USB”
+ Each module will contain the following sections:
+ “devicePorts”: a list of device descriptors for all input and output devices accessible via this
+ module.
+ This contains both permanently attached devices and removable devices.
+ “mixPorts”: listing all output and input streams exposed by the audio HAL
+ “routes”: list of possible connections between input and output devices or between stream and
+ devices.
+ "route": is defined by an attribute:
+ -"type": <mux|mix> means all sources are mutual exclusive (mux) or can be mixed (mix)
+ -"sink": the sink involved in this route
+ -"sources": all the sources than can be connected to the sink via vis route
+ “attachedDevices”: permanently attached devices.
+ The attachedDevices section is a list of devices names. The names correspond to device names
+ defined in <devicePorts> section.
+ “defaultOutputDevice”: device to be used by default when no policy rule applies
+ -->
+ <modules>
+ <!-- Primary Audio HAL -->
+ <module name="primary" halVersion="3.0">
+ <attachedDevices>
+ <item>Speaker</item>
+ <item>Built-In Mic</item>
+ <item>Built-In Back Mic</item>
+ </attachedDevices>
+ <defaultOutputDevice>Speaker</defaultOutputDevice>
+ <mixPorts>
+ <mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="deep_buffer" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DEEP_BUFFER">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ <mixPort name="compressed_offload" role="source"
+ flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|AUDIO_OUTPUT_FLAG_NON_BLOCKING">
+ <profile name="" format="AUDIO_FORMAT_MP3"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+ <profile name="" format="AUDIO_FORMAT_AAC"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+ <profile name="" format="AUDIO_FORMAT_AAC_LC"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
+ </mixPort>
+ <mixPort name="voice_tx" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </mixPort>
+ <mixPort name="primary input" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </mixPort>
+ <mixPort name="voice_rx" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <!-- Output devices declaration, i.e. Sink DEVICE PORT -->
+ <devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </devicePort>
+ <devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ <gains>
+ <gain name="gain_1" mode="AUDIO_GAIN_MODE_JOINT"
+ minValueMB="-8400"
+ maxValueMB="4000"
+ defaultValueMB="0"
+ stepValueMB="100"/>
+ </gains>
+ </devicePort>
+ <devicePort tagName="Wired Headset" type="AUDIO_DEVICE_OUT_WIRED_HEADSET" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="Wired Headphones" type="AUDIO_DEVICE_OUT_WIRED_HEADPHONE" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Headset" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Car Kit" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+ <devicePort tagName="Telephony Tx" type="AUDIO_DEVICE_OUT_TELEPHONY_TX" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
+ </devicePort>
+
+ <devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="Built-In Back Mic" type="AUDIO_DEVICE_IN_BACK_MIC" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="Wired Headset Mic" type="AUDIO_DEVICE_IN_WIRED_HEADSET" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
+ channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
+ </devicePort>
+ <devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </devicePort>
+ <devicePort tagName="Telephony Rx" type="AUDIO_DEVICE_IN_TELEPHONY_RX" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
+ </devicePort>
+ </devicePorts>
+ <!-- route declaration, i.e. list all available sources for a given sink -->
+ <routes>
+ <route type="mix" sink="Earpiece"
+ sources="primary output,deep_buffer,BT SCO Headset Mic"/>
+ <route type="mix" sink="Speaker"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="Wired Headset"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="Wired Headphones"
+ sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
+ <route type="mix" sink="primary input"
+ sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/>
+ <route type="mix" sink="Telephony Tx"
+ sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic, voice_tx"/>
+ <route type="mix" sink="voice_rx"
+ sources="Telephony Rx"/>
+ </routes>
+
+ </module>
+
+ <!-- A2dp Audio HAL -->
+ <xi:include href="a2dp_audio_policy_configuration.xml"/>
+
+ <!-- Usb Audio HAL -->
+ <xi:include href="usb_audio_policy_configuration.xml"/>
+
+ <!-- Remote Submix Audio HAL -->
+ <xi:include href="r_submix_audio_policy_configuration.xml"/>
+
+ <!-- Hearing aid Audio HAL -->
+ <xi:include href="hearing_aid_audio_policy_configuration.xml"/>
+
+ <!-- MSD Audio HAL (optional) -->
+ <xi:include href="msd_audio_policy_configuration.xml"/>
+
+ </modules>
+ <!-- End of Modules section -->
+
+ <!-- Volume section:
+ IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+ Keep it here for legacy.
+ Engine will fallback on these files if none are provided by engine.
+ -->
+
+ <xi:include href="audio_policy_volumes.xml"/>
+ <xi:include href="default_volume_tables.xml"/>
+
+ <!-- End of Volume section -->
+
+ <!-- Surround Sound configuration -->
+
+ <xi:include href="surround_sound_configuration_5_0.xml"/>
+
+ <!-- End of Surround Sound configuration -->
+
+</audioPolicyConfiguration>
diff --git a/services/audiopolicy/config/audio_policy_configuration_generic.xml b/services/audiopolicy/config/audio_policy_configuration_generic.xml
index 40dcc22..9ad609d 100644
--- a/services/audiopolicy/config/audio_policy_configuration_generic.xml
+++ b/services/audiopolicy/config/audio_policy_configuration_generic.xml
@@ -30,7 +30,11 @@
</modules>
<!-- End of Modules section -->
- <!-- Volume section -->
+ <!-- Volume section:
+ IMPORTANT NOTE: Volume tables have been moved to engine configuration.
+ Keep it here for legacy.
+ Engine will fallback on these files if none are provided by engine.
+ -->
<xi:include href="audio_policy_volumes.xml"/>
<xi:include href="default_volume_tables.xml"/>
diff --git a/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
new file mode 100644
index 0000000..ce78eb0
--- /dev/null
+++ b/services/audiopolicy/config/bluetooth_audio_policy_configuration.xml
@@ -0,0 +1,44 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Bluetooth Audio HAL Audio Policy Configuration file -->
+<module name="bluetooth" halVersion="2.0">
+ <mixPorts>
+ <!-- A2DP Audio Ports -->
+ <mixPort name="a2dp output" role="source"/>
+ <!-- Hearing AIDs Audio Ports -->
+ <mixPort name="hearing aid output" role="source">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="24000,16000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </mixPort>
+ </mixPorts>
+ <devicePorts>
+ <!-- A2DP Audio Ports -->
+ <devicePort tagName="BT A2DP Out" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Headphones" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <devicePort tagName="BT A2DP Speaker" type="AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER" role="sink">
+ <profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
+ samplingRates="44100,48000,88200,96000"
+ channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
+ </devicePort>
+ <!-- Hearing AIDs Audio Ports -->
+ <devicePort tagName="BT Hearing Aid Out" type="AUDIO_DEVICE_OUT_HEARING_AID" role="sink"/>
+ </devicePorts>
+ <routes>
+ <route type="mix" sink="BT A2DP Out"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Headphones"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT A2DP Speaker"
+ sources="a2dp output"/>
+ <route type="mix" sink="BT Hearing Aid Out"
+ sources="hearing aid output"/>
+ </routes>
+</module>
diff --git a/services/audiopolicy/engine/common/Android.bp b/services/audiopolicy/engine/common/Android.bp
index e6ede07..d0775ad 100644
--- a/services/audiopolicy/engine/common/Android.bp
+++ b/services/audiopolicy/engine/common/Android.bp
@@ -17,3 +17,31 @@
host_supported: true,
export_include_dirs: ["include"],
}
+
+cc_library_static {
+ name: "libaudiopolicyengine_common",
+ srcs: [
+ "src/EngineBase.cpp",
+ "src/ProductStrategy.cpp",
+ "src/VolumeCurve.cpp",
+ "src/VolumeGroup.cpp",
+ ],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+ header_libs: [
+ "libbase_headers",
+ "libaudiopolicycommon",
+ "libaudiopolicyengine_common_headers",
+ "libaudiopolicyengine_interface_headers",
+ ],
+ export_header_lib_headers: [
+ "libaudiopolicyengine_common_headers",
+ ],
+ static_libs: [
+ "libaudiopolicycomponents",
+ "libaudiopolicyengine_config",
+ ],
+}
diff --git a/services/audiopolicy/engine/common/include/EngineBase.h b/services/audiopolicy/engine/common/include/EngineBase.h
index 5c33fb3..cedc78f 100644
--- a/services/audiopolicy/engine/common/include/EngineBase.h
+++ b/services/audiopolicy/engine/common/include/EngineBase.h
@@ -19,6 +19,7 @@
#include <EngineConfig.h>
#include <AudioPolicyManagerInterface.h>
#include <ProductStrategy.h>
+#include <VolumeGroup.h>
namespace android {
namespace audio_policy {
@@ -67,6 +68,24 @@
status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) const override;
+ VolumeCurves *getVolumeCurvesForAttributes(const audio_attributes_t &attr) const override;
+
+ VolumeCurves *getVolumeCurvesForStreamType(audio_stream_type_t stream) const override;
+
+ IVolumeCurves *getVolumeCurvesForVolumeGroup(volume_group_t group) const override
+ {
+ return mVolumeGroups.find(group) != end(mVolumeGroups) ?
+ mVolumeGroups.at(group)->getVolumeCurves() : nullptr;
+ }
+
+ VolumeGroupVector getVolumeGroups() const override;
+
+ volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const override;
+
+ volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const override;
+
+ status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups) const override;
+
void dump(String8 *dst) const override;
@@ -87,10 +106,20 @@
return is_state_in_call(getPhoneState());
}
-private:
+ VolumeSource toVolumeSource(audio_stream_type_t stream) const
+ {
+ return static_cast<VolumeSource>(getVolumeGroupForStreamType(stream));
+ }
+
+ status_t switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst);
+
+ status_t restoreOriginVolumeCurve(audio_stream_type_t stream);
+
+ private:
AudioPolicyManagerObserver *mApmObserver = nullptr;
ProductStrategyMap mProductStrategies;
+ VolumeGroupMap mVolumeGroups;
audio_mode_t mPhoneState = AUDIO_MODE_NORMAL; /**< current phone state. */
/** current forced use configuration. */
diff --git a/services/audiopolicy/engine/common/include/ProductStrategy.h b/services/audiopolicy/engine/common/include/ProductStrategy.h
index 72505b2..1a2a198 100644
--- a/services/audiopolicy/engine/common/include/ProductStrategy.h
+++ b/services/audiopolicy/engine/common/include/ProductStrategy.h
@@ -16,6 +16,8 @@
#pragma once
+#include "VolumeGroup.h"
+
#include <system/audio.h>
#include <AudioPolicyManagerInterface.h>
#include <utils/RefBase.h>
@@ -38,7 +40,7 @@
private:
struct AudioAttributes {
audio_stream_type_t mStream = AUDIO_STREAM_DEFAULT;
- uint32_t mGroupId = 0;
+ volume_group_t mVolumeGroup = VOLUME_GROUP_NONE;
audio_attributes_t mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
};
@@ -85,6 +87,12 @@
audio_attributes_t getAttributesForStreamType(audio_stream_type_t stream) const;
audio_stream_type_t getStreamTypeForAttributes(const audio_attributes_t &attr) const;
+ volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const;
+
+ volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const;
+
+ volume_group_t getDefaultVolumeGroup() const;
+
bool isDefault() const;
void dump(String8 *dst, int spaces = 0) const;
@@ -108,6 +116,10 @@
{
public:
/**
+ * @brief initialize: set default product strategy in cache.
+ */
+ void initialize();
+ /**
* @brief getProductStrategyForAttribute. The order of the vector is dimensionning.
* @param attr
* @return applicable product strategy for the given attribute, default if none applicable.
@@ -136,9 +148,18 @@
std::string getDeviceAddressForProductStrategy(product_strategy_t strategy) const;
+ volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const;
+
+ volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const;
+
+ volume_group_t getDefaultVolumeGroup() const;
+
product_strategy_t getDefault() const;
void dump(String8 *dst, int spaces = 0) const;
+
+private:
+ product_strategy_t mDefaultStrategy = PRODUCT_STRATEGY_NONE;
};
} // namespace android
diff --git a/services/audiopolicy/engine/common/include/VolumeCurve.h b/services/audiopolicy/engine/common/include/VolumeCurve.h
new file mode 100644
index 0000000..54314e3
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/VolumeCurve.h
@@ -0,0 +1,196 @@
+/*
+ * Copyright (C) 2015 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "IVolumeCurves.h"
+#include <policy.h>
+#include <AudioPolicyManagerInterface.h>
+#include <utils/RefBase.h>
+#include <HandleGenerator.h>
+#include <utils/String8.h>
+#include <utils/SortedVector.h>
+#include <utils/KeyedVector.h>
+#include <system/audio.h>
+#include <cutils/config_utils.h>
+#include <string>
+#include <map>
+#include <utility>
+
+namespace android {
+
+struct CurvePoint
+{
+ CurvePoint() {}
+ CurvePoint(int index, int attenuationInMb) :
+ mIndex(index), mAttenuationInMb(attenuationInMb) {}
+ uint32_t mIndex;
+ int mAttenuationInMb;
+};
+
+inline bool operator< (const CurvePoint &lhs, const CurvePoint &rhs)
+{
+ return lhs.mIndex < rhs.mIndex;
+}
+
+// A volume curve for a given use case and device category
+// It contains of list of points of this curve expressing the attenuation in Millibels for
+// a given volume index from 0 to 100
+class VolumeCurve : public RefBase
+{
+public:
+ VolumeCurve(device_category device) : mDeviceCategory(device) {}
+
+ void add(const CurvePoint &point) { mCurvePoints.add(point); }
+
+ float volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const;
+
+ void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const;
+
+ device_category getDeviceCategory() const { return mDeviceCategory; }
+
+private:
+ const device_category mDeviceCategory;
+ SortedVector<CurvePoint> mCurvePoints;
+};
+
+// Volume Curves for a given use case indexed by device category
+class VolumeCurves : public KeyedVector<device_category, sp<VolumeCurve> >,
+ public IVolumeCurves
+{
+public:
+ VolumeCurves(int indexMin = 0, int indexMax = 100) :
+ mIndexMin(indexMin), mIndexMax(indexMax)
+ {
+ addCurrentVolumeIndex(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, 0);
+ }
+ status_t initVolume(int indexMin, int indexMax) override
+ {
+ mIndexMin = indexMin;
+ mIndexMax = indexMax;
+ return NO_ERROR;
+ }
+
+ sp<VolumeCurve> getCurvesFor(device_category device) const
+ {
+ if (indexOfKey(device) < 0) {
+ return 0;
+ }
+ return valueFor(device);
+ }
+
+ virtual int getVolumeIndex(audio_devices_t device) const
+ {
+ device = Volume::getDeviceForVolume(device);
+ // there is always a valid entry for AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME
+ if (mIndexCur.find(device) == end(mIndexCur)) {
+ device = AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME;
+ }
+ return mIndexCur.at(device);
+ }
+
+ virtual bool canBeMuted() const { return mCanBeMuted; }
+ virtual void clearCurrentVolumeIndex() { mIndexCur.clear(); }
+ void addCurrentVolumeIndex(audio_devices_t device, int index) override
+ {
+ mIndexCur[device] = index;
+ }
+
+ int getVolumeIndexMin() const { return mIndexMin; }
+
+ int getVolumeIndexMax() const { return mIndexMax; }
+
+ bool hasVolumeIndexForDevice(audio_devices_t device) const
+ {
+ device = Volume::getDeviceForVolume(device);
+ return mIndexCur.find(device) != end(mIndexCur);
+ }
+
+ status_t switchCurvesFrom(const VolumeCurves &referenceCurves)
+ {
+ if (size() != referenceCurves.size()) {
+ ALOGE("%s! device category not aligned, cannot switch", __FUNCTION__);
+ return BAD_TYPE;
+ }
+ for (size_t index = 0; index < size(); index++) {
+ device_category cat = keyAt(index);
+ setVolumeCurve(cat, referenceCurves.getOriginVolumeCurve(cat));
+ }
+ return NO_ERROR;
+ }
+ status_t restoreOriginVolumeCurve()
+ {
+ return switchCurvesFrom(*this);
+ }
+
+ const sp<VolumeCurve> getOriginVolumeCurve(device_category deviceCategory) const
+ {
+ ALOG_ASSERT(mOriginVolumeCurves.indexOfKey(deviceCategory) >= 0, "Invalid device category");
+ return mOriginVolumeCurves.valueFor(deviceCategory);
+ }
+ void setVolumeCurve(device_category deviceCategory, const sp<VolumeCurve> &volumeCurve)
+ {
+ ALOG_ASSERT(indexOfKey(deviceCategory) >= 0, "Invalid device category for Volume Curve");
+ replaceValueFor(deviceCategory, volumeCurve);
+ }
+
+ ssize_t add(const sp<VolumeCurve> &volumeCurve)
+ {
+ device_category deviceCategory = volumeCurve->getDeviceCategory();
+ ssize_t index = indexOfKey(deviceCategory);
+ if (index < 0) {
+ // Keep track of original Volume Curves per device category in order to switch curves.
+ mOriginVolumeCurves.add(deviceCategory, volumeCurve);
+ return KeyedVector::add(deviceCategory, volumeCurve);
+ }
+ return index;
+ }
+
+ virtual float volIndexToDb(device_category deviceCat, int indexInUi) const
+ {
+ sp<VolumeCurve> vc = getCurvesFor(deviceCat);
+ if (vc != 0) {
+ return vc->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
+ } else {
+ ALOGE("Invalid device category %d for Volume Curve", deviceCat);
+ return 0.0f;
+ }
+ }
+ void addAttributes(const audio_attributes_t &attr)
+ {
+ mAttributes.push_back(attr);
+ }
+ AttributesVector getAttributes() const override { return mAttributes; }
+ void addStreamType(audio_stream_type_t stream)
+ {
+ mStreams.push_back(stream);
+ }
+ StreamTypeVector getStreamTypes() const override { return mStreams; }
+
+ void dump(String8 *dst, int spaces = 0, bool curvePoints = false) const override;
+
+private:
+ KeyedVector<device_category, sp<VolumeCurve> > mOriginVolumeCurves;
+ std::map<audio_devices_t, int> mIndexCur; /**< current volume index per device. */
+ int mIndexMin; /**< min volume index. */
+ int mIndexMax; /**< max volume index. */
+ const bool mCanBeMuted = true; /**< true is the stream can be muted. */
+
+ AttributesVector mAttributes;
+ StreamTypeVector mStreams; /**< Keep it for legacy. */
+};
+
+} // namespace android
diff --git a/services/audiopolicy/engine/common/include/VolumeGroup.h b/services/audiopolicy/engine/common/include/VolumeGroup.h
new file mode 100644
index 0000000..c34b406
--- /dev/null
+++ b/services/audiopolicy/engine/common/include/VolumeGroup.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <AudioPolicyManagerInterface.h>
+#include <VolumeCurve.h>
+#include <system/audio.h>
+#include <utils/RefBase.h>
+#include <HandleGenerator.h>
+#include <string>
+#include <vector>
+#include <map>
+#include <utils/Errors.h>
+
+namespace android {
+
+class VolumeGroup : public virtual RefBase, private HandleGenerator<uint32_t>
+{
+public:
+ VolumeGroup(const std::string &name, int indexMin, int indexMax);
+ std::string getName() const { return mName; }
+ volume_group_t getId() const { return mId; }
+
+ void add(const sp<VolumeCurve> &curve);
+
+ VolumeCurves *getVolumeCurves() { return &mGroupVolumeCurves; }
+
+ void addSupportedAttributes(const audio_attributes_t &attr);
+ AttributesVector getSupportedAttributes() const { return mGroupVolumeCurves.getAttributes(); }
+
+ void addSupportedStream(audio_stream_type_t stream);
+ StreamTypeVector getStreamTypes() const { return mGroupVolumeCurves.getStreamTypes(); }
+
+ void dump(String8 *dst, int spaces = 0) const;
+
+private:
+ const std::string mName;
+ const volume_group_t mId;
+ VolumeCurves mGroupVolumeCurves;
+};
+
+class VolumeGroupMap : public std::map<volume_group_t, sp<VolumeGroup> >
+{
+public:
+ void dump(String8 *dst, int spaces = 0) const;
+};
+
+} // namespace android
diff --git a/services/audiopolicy/engine/common/src/EngineBase.cpp b/services/audiopolicy/engine/common/src/EngineBase.cpp
index 755f2a8..07a7e65 100644
--- a/services/audiopolicy/engine/common/src/EngineBase.cpp
+++ b/services/audiopolicy/engine/common/src/EngineBase.cpp
@@ -15,7 +15,7 @@
*/
#define LOG_TAG "APM::AudioPolicyEngine/Base"
-#define LOG_NDEBUG 0
+//#define LOG_NDEBUG 0
#include "EngineBase.h"
#include "EngineDefaultConfig.h"
@@ -55,8 +55,10 @@
if (!is_state_in_call(oldState) && is_state_in_call(state)) {
ALOGV(" Entering call in setPhoneState()");
+ switchVolumeCurve(AUDIO_STREAM_VOICE_CALL, AUDIO_STREAM_DTMF);
} else if (is_state_in_call(oldState) && !is_state_in_call(state)) {
ALOGV(" Exiting call in setPhoneState()");
+ restoreOriginVolumeCurve(AUDIO_STREAM_DTMF);
}
return NO_ERROR;
}
@@ -94,28 +96,60 @@
engineConfig::ParsingResult EngineBase::loadAudioPolicyEngineConfig()
{
auto loadProductStrategies =
- [](auto& strategyConfigs, auto& productStrategies) {
- uint32_t groupid = 0;
+ [](auto& strategyConfigs, auto& productStrategies, auto& volumeGroups) {
for (auto& strategyConfig : strategyConfigs) {
sp<ProductStrategy> strategy = new ProductStrategy(strategyConfig.name);
for (const auto &group : strategyConfig.attributesGroups) {
- for (const auto &attr : group.attributesVect) {
- strategy->addAttributes({group.stream, groupid, attr});
+ const auto &iter = std::find_if(begin(volumeGroups), end(volumeGroups),
+ [&group](const auto &volumeGroup) {
+ return group.volumeGroup == volumeGroup.second->getName(); });
+ ALOG_ASSERT(iter != end(volumeGroups), "Invalid Volume Group Name %s",
+ group.volumeGroup.c_str());
+ if (group.stream != AUDIO_STREAM_DEFAULT) {
+ iter->second->addSupportedStream(group.stream);
}
- groupid += 1;
+ for (const auto &attr : group.attributesVect) {
+ strategy->addAttributes({group.stream, iter->second->getId(), attr});
+ iter->second->addSupportedAttributes(attr);
+ }
}
product_strategy_t strategyId = strategy->getId();
productStrategies[strategyId] = strategy;
}
};
+ auto loadVolumeGroups = [](auto &volumeConfigs, auto &volumeGroups) {
+ for (auto &volumeConfig : volumeConfigs) {
+ sp<VolumeGroup> volumeGroup = new VolumeGroup(volumeConfig.name, volumeConfig.indexMin,
+ volumeConfig.indexMax);
+ volumeGroups[volumeGroup->getId()] = volumeGroup;
+ for (auto &configCurve : volumeConfig.volumeCurves) {
+ device_category deviceCat = DEVICE_CATEGORY_SPEAKER;
+ if (!DeviceCategoryConverter::fromString(configCurve.deviceCategory, deviceCat)) {
+ ALOGE("%s: Invalid %s", __FUNCTION__, configCurve.deviceCategory.c_str());
+ continue;
+ }
+ sp<VolumeCurve> curve = new VolumeCurve(deviceCat);
+ for (auto &point : configCurve.curvePoints) {
+ curve->add({point.index, point.attenuationInMb});
+ }
+ volumeGroup->add(curve);
+ }
+ }
+ };
auto result = engineConfig::parse();
if (result.parsedConfig == nullptr) {
ALOGW("%s: No configuration found, using default matching phone experience.", __FUNCTION__);
- result = {std::make_unique<engineConfig::Config>(gDefaultEngineConfig), 0};
+ engineConfig::Config config = gDefaultEngineConfig;
+ android::status_t ret = engineConfig::parseLegacyVolumes(config.volumeGroups);
+ result = {std::make_unique<engineConfig::Config>(config),
+ static_cast<size_t>(ret == NO_ERROR ? 0 : 1)};
}
ALOGE_IF(result.nbSkippedElement != 0, "skipped %zu elements", result.nbSkippedElement);
- loadProductStrategies(result.parsedConfig->productStrategies, mProductStrategies);
+ loadVolumeGroups(result.parsedConfig->volumeGroups, mVolumeGroups);
+ loadProductStrategies(result.parsedConfig->productStrategies, mProductStrategies,
+ mVolumeGroups);
+ mProductStrategies.initialize();
return result;
}
@@ -173,9 +207,75 @@
return NO_ERROR;
}
+VolumeCurves *EngineBase::getVolumeCurvesForAttributes(const audio_attributes_t &attr) const
+{
+ volume_group_t volGr = mProductStrategies.getVolumeGroupForAttributes(attr);
+ const auto &iter = mVolumeGroups.find(volGr);
+ LOG_ALWAYS_FATAL_IF(iter == std::end(mVolumeGroups), "No volume groups for %s", toString(attr).c_str());
+ return mVolumeGroups.at(volGr)->getVolumeCurves();
+}
+
+VolumeCurves *EngineBase::getVolumeCurvesForStreamType(audio_stream_type_t stream) const
+{
+ volume_group_t volGr = mProductStrategies.getVolumeGroupForStreamType(stream);
+ if (volGr == VOLUME_GROUP_NONE) {
+ volGr = mProductStrategies.getDefaultVolumeGroup();
+ }
+ const auto &iter = mVolumeGroups.find(volGr);
+ LOG_ALWAYS_FATAL_IF(iter == std::end(mVolumeGroups), "No volume groups for %s",
+ toString(stream).c_str());
+ return mVolumeGroups.at(volGr)->getVolumeCurves();
+}
+
+status_t EngineBase::switchVolumeCurve(audio_stream_type_t streamSrc, audio_stream_type_t streamDst)
+{
+ auto srcCurves = getVolumeCurvesForStreamType(streamSrc);
+ auto dstCurves = getVolumeCurvesForStreamType(streamDst);
+
+ if (srcCurves == nullptr || dstCurves == nullptr) {
+ return BAD_VALUE;
+ }
+ return dstCurves->switchCurvesFrom(*srcCurves);
+}
+
+status_t EngineBase::restoreOriginVolumeCurve(audio_stream_type_t stream)
+{
+ VolumeCurves *curves = getVolumeCurvesForStreamType(stream);
+ return curves != nullptr ? curves->switchCurvesFrom(*curves) : BAD_VALUE;
+}
+
+VolumeGroupVector EngineBase::getVolumeGroups() const
+{
+ VolumeGroupVector group;
+ for (const auto &iter : mVolumeGroups) {
+ group.push_back(iter.first);
+ }
+ return group;
+}
+
+volume_group_t EngineBase::getVolumeGroupForAttributes(const audio_attributes_t &attr) const
+{
+ return mProductStrategies.getVolumeGroupForAttributes(attr);
+}
+
+volume_group_t EngineBase::getVolumeGroupForStreamType(audio_stream_type_t stream) const
+{
+ return mProductStrategies.getVolumeGroupForStreamType(stream);
+}
+
+status_t EngineBase::listAudioVolumeGroups(AudioVolumeGroupVector &groups) const
+{
+ for (const auto &iter : mVolumeGroups) {
+ groups.push_back({iter.second->getName(), iter.second->getId(),
+ iter.second->getSupportedAttributes(), iter.second->getStreamTypes()});
+ }
+ return NO_ERROR;
+}
+
void EngineBase::dump(String8 *dst) const
{
mProductStrategies.dump(dst, 2);
+ mVolumeGroups.dump(dst, 2);
}
} // namespace audio_policy
diff --git a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
index 3940c0c..fede0d9 100644
--- a/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
+++ b/services/audiopolicy/engine/common/src/EngineDefaultConfig.h
@@ -25,11 +25,11 @@
const engineConfig::ProductStrategies gOrderedStrategies = {
{"STRATEGY_PHONE",
{
- {"phone", AUDIO_STREAM_VOICE_CALL,
+ {"phone", AUDIO_STREAM_VOICE_CALL, "AUDIO_STREAM_VOICE_CALL",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VOICE_COMMUNICATION, AUDIO_SOURCE_DEFAULT, 0,
""}},
},
- {"sco", AUDIO_STREAM_BLUETOOTH_SCO,
+ {"sco", AUDIO_STREAM_BLUETOOTH_SCO, "AUDIO_STREAM_BLUETOOTH_SCO",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_SCO,
""}},
}
@@ -37,18 +37,18 @@
},
{"STRATEGY_SONIFICATION",
{
- {"ring", AUDIO_STREAM_RING,
+ {"ring", AUDIO_STREAM_RING, "AUDIO_STREAM_RING",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE,
AUDIO_SOURCE_DEFAULT, 0, ""}}
},
- {"alarm", AUDIO_STREAM_ALARM,
+ {"alarm", AUDIO_STREAM_ALARM, "AUDIO_STREAM_ALARM",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ALARM, AUDIO_SOURCE_DEFAULT, 0, ""}},
}
},
},
{"STRATEGY_ENFORCED_AUDIBLE",
{
- {"", AUDIO_STREAM_ENFORCED_AUDIBLE,
+ {"", AUDIO_STREAM_ENFORCED_AUDIBLE, "AUDIO_STREAM_ENFORCED_AUDIBLE",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
AUDIO_FLAG_AUDIBILITY_ENFORCED, ""}}
}
@@ -56,7 +56,7 @@
},
{"STRATEGY_ACCESSIBILITY",
{
- {"", AUDIO_STREAM_ACCESSIBILITY,
+ {"", AUDIO_STREAM_ACCESSIBILITY, "AUDIO_STREAM_ACCESSIBILITY",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY,
AUDIO_SOURCE_DEFAULT, 0, ""}}
}
@@ -64,7 +64,7 @@
},
{"STRATEGY_SONIFICATION_RESPECTFUL",
{
- {"", AUDIO_STREAM_NOTIFICATION,
+ {"", AUDIO_STREAM_NOTIFICATION, "AUDIO_STREAM_NOTIFICATION",
{
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION, AUDIO_SOURCE_DEFAULT, 0, ""},
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST,
@@ -81,7 +81,7 @@
},
{"STRATEGY_MEDIA",
{
- {"music", AUDIO_STREAM_MUSIC,
+ {"music", AUDIO_STREAM_MUSIC, "AUDIO_STREAM_MUSIC",
{
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA, AUDIO_SOURCE_DEFAULT, 0, ""},
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_GAME, AUDIO_SOURCE_DEFAULT, 0, ""},
@@ -91,7 +91,7 @@
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}
},
},
- {"system", AUDIO_STREAM_SYSTEM,
+ {"system", AUDIO_STREAM_SYSTEM, "AUDIO_STREAM_SYSTEM",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_SONIFICATION,
AUDIO_SOURCE_DEFAULT, 0, ""}}
}
@@ -99,7 +99,7 @@
},
{"STRATEGY_DTMF",
{
- {"", AUDIO_STREAM_DTMF,
+ {"", AUDIO_STREAM_DTMF, "AUDIO_STREAM_DTMF",
{
{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING,
AUDIO_SOURCE_DEFAULT, 0, ""}
@@ -109,7 +109,7 @@
},
{"STRATEGY_TRANSMITTED_THROUGH_SPEAKER",
{
- {"", AUDIO_STREAM_TTS,
+ {"", AUDIO_STREAM_TTS, "AUDIO_STREAM_TTS",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT,
AUDIO_FLAG_BEACON, ""}}
}
@@ -117,14 +117,14 @@
},
{"STRATEGY_REROUTING",
{
- {"", AUDIO_STREAM_REROUTING,
+ {"", AUDIO_STREAM_REROUTING, "AUDIO_STREAM_REROUTING",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}}
}
},
},
{"STRATEGY_PATCH",
{
- {"", AUDIO_STREAM_PATCH,
+ {"", AUDIO_STREAM_PATCH, "AUDIO_STREAM_PATCH",
{{AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_UNKNOWN, AUDIO_SOURCE_DEFAULT, 0, ""}}
}
},
@@ -135,6 +135,7 @@
1.0,
gOrderedStrategies,
{},
+ {},
{}
};
} // namespace android
diff --git a/services/audiopolicy/engine/common/src/ProductStrategy.cpp b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
index 71607d1..f74f190 100644
--- a/services/audiopolicy/engine/common/src/ProductStrategy.cpp
+++ b/services/audiopolicy/engine/common/src/ProductStrategy.cpp
@@ -44,7 +44,7 @@
{
std::vector<android::AudioAttributes> androidAa;
for (const auto &attr : mAttributesVector) {
- androidAa.push_back({attr.mGroupId, attr.mStream, attr.mAttributes});
+ androidAa.push_back({attr.mVolumeGroup, attr.mStream, attr.mAttributes});
}
return androidAa;
}
@@ -69,7 +69,8 @@
}) != end(mAttributesVector);
}
-audio_stream_type_t ProductStrategy::getStreamTypeForAttributes(const audio_attributes_t &attr) const
+audio_stream_type_t ProductStrategy::getStreamTypeForAttributes(
+ const audio_attributes_t &attr) const
{
const auto iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
[&attr](const auto &supportedAttr) {
@@ -110,6 +111,33 @@
return supportedAttr.mStream == streamType; }) != end(mAttributesVector);
}
+volume_group_t ProductStrategy::getVolumeGroupForAttributes(const audio_attributes_t &attr) const
+{
+ for (const auto &supportedAttr : mAttributesVector) {
+ if (AudioProductStrategy::attributesMatches(supportedAttr.mAttributes, attr)) {
+ return supportedAttr.mVolumeGroup;
+ }
+ }
+ return VOLUME_GROUP_NONE;
+}
+
+volume_group_t ProductStrategy::getVolumeGroupForStreamType(audio_stream_type_t stream) const
+{
+ for (const auto &supportedAttr : mAttributesVector) {
+ if (supportedAttr.mStream == stream) {
+ return supportedAttr.mVolumeGroup;
+ }
+ }
+ return VOLUME_GROUP_NONE;
+}
+
+volume_group_t ProductStrategy::getDefaultVolumeGroup() const
+{
+ const auto &iter = std::find_if(begin(mAttributesVector), end(mAttributesVector),
+ [](const auto &attr) {return attr.mAttributes == defaultAttr;});
+ return iter != end(mAttributesVector) ? iter->mVolumeGroup : VOLUME_GROUP_NONE;
+}
+
void ProductStrategy::dump(String8 *dst, int spaces) const
{
dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
@@ -121,7 +149,7 @@
deviceLiteral.c_str(), mDeviceAddress.c_str());
for (const auto &attr : mAttributesVector) {
- dst->appendFormat("%*sGroup: %d stream: %s\n", spaces + 3, "", attr.mGroupId,
+ dst->appendFormat("%*sGroup: %d stream: %s\n", spaces + 3, "", attr.mVolumeGroup,
android::toString(attr.mStream).c_str());
dst->appendFormat("%*s Attributes: ", spaces + 3, "");
std::string attStr =
@@ -172,6 +200,9 @@
product_strategy_t ProductStrategyMap::getDefault() const
{
+ if (mDefaultStrategy != PRODUCT_STRATEGY_NONE) {
+ return mDefaultStrategy;
+ }
for (const auto &iter : *this) {
if (iter.second->isDefault()) {
ALOGV("%s: using default %s", __FUNCTION__, iter.second->getName().c_str());
@@ -231,6 +262,44 @@
return at(psId)->getDeviceAddress();
}
+volume_group_t ProductStrategyMap::getVolumeGroupForAttributes(const audio_attributes_t &attr) const
+{
+ for (const auto &iter : *this) {
+ volume_group_t group = iter.second->getVolumeGroupForAttributes(attr);
+ if (group != VOLUME_GROUP_NONE) {
+ return group;
+ }
+ }
+ return getDefaultVolumeGroup();
+}
+
+volume_group_t ProductStrategyMap::getVolumeGroupForStreamType(audio_stream_type_t stream) const
+{
+ for (const auto &iter : *this) {
+ volume_group_t group = iter.second->getVolumeGroupForStreamType(stream);
+ if (group != VOLUME_GROUP_NONE) {
+ return group;
+ }
+ }
+ ALOGW("%s: no volume group for %s, using default", __func__, toString(stream).c_str());
+ return getDefaultVolumeGroup();
+}
+
+volume_group_t ProductStrategyMap::getDefaultVolumeGroup() const
+{
+ product_strategy_t defaultStrategy = getDefault();
+ if (defaultStrategy == PRODUCT_STRATEGY_NONE) {
+ return VOLUME_GROUP_NONE;
+ }
+ return at(defaultStrategy)->getDefaultVolumeGroup();
+}
+
+void ProductStrategyMap::initialize()
+{
+ mDefaultStrategy = getDefault();
+ ALOG_ASSERT(mDefaultStrategy != PRODUCT_STRATEGY_NONE, "No default product strategy found");
+}
+
void ProductStrategyMap::dump(String8 *dst, int spaces) const
{
dst->appendFormat("%*sProduct Strategies dump:", spaces, "");
diff --git a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
similarity index 61%
rename from services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
rename to services/audiopolicy/engine/common/src/VolumeCurve.cpp
index 2625733..c352578 100644
--- a/services/audiopolicy/common/managerdefinitions/src/VolumeCurve.cpp
+++ b/services/audiopolicy/engine/common/src/VolumeCurve.cpp
@@ -19,13 +19,17 @@
#include "VolumeCurve.h"
#include "TypeConverter.h"
+#include <media/TypeConverter.h>
namespace android {
float VolumeCurve::volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const
{
ALOG_ASSERT(!mCurvePoints.isEmpty(), "Invalid volume curve");
-
+ if (volIndexMin < 0 || volIndexMax < 0) {
+ // In order to let AudioService initialize the min and max, convention is to use -1
+ return NAN;
+ }
if (indexInUi < volIndexMin) {
// an index of 0 means mute request when volIndexMin > 0
if (indexInUi == 0) {
@@ -64,8 +68,7 @@
((float)(mCurvePoints[indexInUiPosition].mIndex -
mCurvePoints[indexInUiPosition - 1].mIndex)) );
- ALOGV("VOLUME mDeviceCategory %d, mStreamType %d vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
- mDeviceCategory, mStreamType,
+ ALOGV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
mCurvePoints[indexInUiPosition - 1].mIndex, volIdx,
mCurvePoints[indexInUiPosition].mIndex,
((float)mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f), decibels,
@@ -74,55 +77,52 @@
return decibels;
}
-void VolumeCurve::dump(String8 *dst) const
+void VolumeCurve::dump(String8 *dst, int spaces, bool curvePoints) const
{
+ if (!curvePoints) {
+ return;
+ }
dst->append(" {");
for (size_t i = 0; i < mCurvePoints.size(); i++) {
- dst->appendFormat("(%3d, %5d)",
- mCurvePoints[i].mIndex, mCurvePoints[i].mAttenuationInMb);
- dst->append(i == (mCurvePoints.size() - 1) ? " }\n" : ", ");
+ dst->appendFormat("%*s(%3d, %5d)", spaces, "", mCurvePoints[i].mIndex,
+ mCurvePoints[i].mAttenuationInMb);
+ dst->appendFormat(i == (mCurvePoints.size() - 1) ? " }\n" : ", ");
}
}
-void VolumeCurvesForStream::dump(String8 *dst, int spaces = 0, bool curvePoints) const
+void VolumeCurves::dump(String8 *dst, int spaces, bool curvePoints) const
{
if (!curvePoints) {
- dst->appendFormat("%s %02d %02d ",
- mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
- for (size_t i = 0; i < mIndexCur.size(); i++) {
- dst->appendFormat("%04x : %02d, ", mIndexCur.keyAt(i), mIndexCur.valueAt(i));
+// dst->appendFormat("%*s%02d %s %03d %03d ", spaces, "",
+// mStream, mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
+ dst->appendFormat("%*s Can be muted Index Min Index Max Index Cur [device : index]...\n",
+ spaces + 1, "");
+ dst->appendFormat("%*s %s %02d %02d ", spaces + 1, "",
+ mCanBeMuted ? "true " : "false", mIndexMin, mIndexMax);
+ for (const auto &pair : mIndexCur) {
+ dst->appendFormat("%04x : %02d, ", pair.first, pair.second);
}
- dst->append("\n");
+ dst->appendFormat("\n");
return;
}
-
+ std::string streamNames;
+ for (const auto &stream : mStreams) {
+ streamNames += android::toString(stream) + "("+std::to_string(stream)+") ";
+ }
+ dst->appendFormat("%*sVolume Curves Streams/Attributes, Curve points Streams for device"
+ " category (index, attenuation in millibel)\n", spaces, "");
+ dst->appendFormat("%*s Streams: %s \n", spaces, "", streamNames.c_str());
+ if (!mAttributes.empty()) dst->appendFormat("%*s Attributes:", spaces, "");
+ for (const auto &attributes : mAttributes) {
+ std::string attStr = attributes == defaultAttr ? "{ Any }" : android::toString(attributes);
+ dst->appendFormat("%*s %s\n", attributes == mAttributes.front() ? 0 : spaces + 13, "",
+ attStr.c_str());
+ }
for (size_t i = 0; i < size(); i++) {
std::string deviceCatLiteral;
DeviceCategoryConverter::toString(keyAt(i), deviceCatLiteral);
- dst->appendFormat("%*s %s :",
- spaces, "", deviceCatLiteral.c_str());
- valueAt(i)->dump(dst);
- }
- dst->append("\n");
-}
-
-void VolumeCurvesCollection::dump(String8 *dst) const
-{
- dst->append("\nStreams dump:\n");
- dst->append(
- " Stream Can be muted Index Min Index Max Index Cur [device : index]...\n");
- for (size_t i = 0; i < size(); i++) {
- dst->appendFormat(" %02zu ", i);
- valueAt(i).dump(dst);
- }
- dst->append("\nVolume Curves for Use Cases (aka Stream types) dump:\n");
- for (size_t i = 0; i < size(); i++) {
- std::string streamTypeLiteral;
- StreamTypeConverter::toString(keyAt(i), streamTypeLiteral);
- dst->appendFormat(
- " %s (%02zu): Curve points for device category (index, attenuation in millibel)\n",
- streamTypeLiteral.c_str(), i);
- valueAt(i).dump(dst, 2, true);
+ dst->appendFormat("%*s %s :", spaces, "", deviceCatLiteral.c_str());
+ valueAt(i)->dump(dst, 1, true);
}
}
diff --git a/services/audiopolicy/engine/common/src/VolumeGroup.cpp b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
new file mode 100644
index 0000000..e189807
--- /dev/null
+++ b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "APM::AudioPolicyEngine/VolumeGroup"
+//#define LOG_NDEBUG 0
+
+#include "VolumeGroup.h"
+#include <media/TypeConverter.h>
+#include <utils/String8.h>
+#include <cstdint>
+#include <string>
+
+#include <log/log.h>
+
+
+namespace android {
+
+//
+// VolumeGroup implementation
+//
+VolumeGroup::VolumeGroup(const std::string &name, int indexMin, int indexMax) :
+ mName(name), mId(static_cast<volume_group_t>(HandleGenerator<uint32_t>::getNextHandle())),
+ mGroupVolumeCurves(VolumeCurves(indexMin, indexMax))
+{
+}
+
+void VolumeGroup::dump(String8 *dst, int spaces) const
+{
+ dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
+ mGroupVolumeCurves.dump(dst, spaces + 2, true);
+ mGroupVolumeCurves.dump(dst, spaces + 2, false);
+ dst->appendFormat("\n");
+}
+
+void VolumeGroup::add(const sp<VolumeCurve> &curve)
+{
+ mGroupVolumeCurves.add(curve);
+}
+
+void VolumeGroup::addSupportedAttributes(const audio_attributes_t &attr)
+{
+ mGroupVolumeCurves.addAttributes(attr);
+}
+
+void VolumeGroup::addSupportedStream(audio_stream_type_t stream)
+{
+ mGroupVolumeCurves.addStreamType(stream);
+}
+
+//
+// VolumeGroupMap implementation
+//
+void VolumeGroupMap::dump(String8 *dst, int spaces) const
+{
+ dst->appendFormat("\n%*sVolume Groups dump:", spaces, "");
+ for (const auto &iter : *this) {
+ iter.second->dump(dst, spaces + 2);
+ }
+}
+
+} // namespace android
+
diff --git a/services/audiopolicy/engine/config/Android.bp b/services/audiopolicy/engine/config/Android.bp
new file mode 100644
index 0000000..6e72f2a
--- /dev/null
+++ b/services/audiopolicy/engine/config/Android.bp
@@ -0,0 +1,31 @@
+cc_library_static {
+ name: "libaudiopolicyengine_config",
+ export_include_dirs: ["include"],
+ include_dirs: [
+ "external/libxml2/include",
+ "external/icu/icu4c/source/common",
+ ],
+ srcs: [
+ "src/EngineConfig.cpp",
+ ],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+ shared_libs: [
+ "libmedia_helper",
+ "libandroidicu",
+ "libxml2",
+ "libutils",
+ "liblog",
+ "libcutils",
+ ],
+ static_libs: [
+ "libaudiopolicycomponents",
+ ],
+ header_libs: [
+ "libaudio_system_headers",
+ "libaudiopolicycommon",
+ ],
+}
diff --git a/services/audiopolicy/engine/config/Android.mk b/services/audiopolicy/engine/config/Android.mk
deleted file mode 100644
index fe7d961..0000000
--- a/services/audiopolicy/engine/config/Android.mk
+++ /dev/null
@@ -1,41 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-##################################################################
-# Component build
-##################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := $(LOCAL_PATH)/include
-
-LOCAL_C_INCLUDES := \
- $(LOCAL_EXPORT_C_INCLUDE_DIRS) \
- external/libxml2/include \
- external/icu/icu4c/source/common
-
-LOCAL_SRC_FILES := \
- src/EngineConfig.cpp
-
-LOCAL_CFLAGS += -Wall -Werror -Wextra
-
-LOCAL_SHARED_LIBRARIES := \
- libmedia_helper \
- libandroidicu \
- libxml2 \
- libutils \
- liblog
-
-LOCAL_STATIC_LIBRARIES := \
- libaudiopolicycomponents
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_MODULE := libaudiopolicyengineconfig
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_HEADER_LIBRARIES := \
- libaudio_system_headers \
- libaudiopolicycommon
-
-include $(BUILD_SHARED_LIBRARY)
-
diff --git a/services/audiopolicy/engine/config/include/EngineConfig.h b/services/audiopolicy/engine/config/include/EngineConfig.h
index e18f687..7f5ed5e 100644
--- a/services/audiopolicy/engine/config/include/EngineConfig.h
+++ b/services/audiopolicy/engine/config/include/EngineConfig.h
@@ -40,11 +40,32 @@
struct AttributesGroup {
std::string name;
audio_stream_type_t stream;
+ std::string volumeGroup;
AttributesVector attributesVect;
};
using AttributesGroups = std::vector<AttributesGroup>;
+struct CurvePoint {
+ int index;
+ int attenuationInMb;
+};
+using CurvePoints = std::vector<CurvePoint>;
+
+struct VolumeCurve {
+ std::string deviceCategory;
+ CurvePoints curvePoints;
+};
+using VolumeCurves = std::vector<VolumeCurve>;
+
+struct VolumeGroup {
+ std::string name;
+ int indexMin;
+ int indexMax;
+ VolumeCurves volumeCurves;
+};
+using VolumeGroups = std::vector<VolumeGroup>;
+
struct ProductStrategy {
std::string name;
AttributesGroups attributesGroups;
@@ -78,6 +99,7 @@
ProductStrategies productStrategies;
Criteria criteria;
CriterionTypes criterionTypes;
+ VolumeGroups volumeGroups;
};
/** Result of `parse(const char*)` */
@@ -91,6 +113,7 @@
* @return audio policy usage @see Config
*/
ParsingResult parse(const char* path = DEFAULT_PATH);
+android::status_t parseLegacyVolumes(VolumeGroups &volumeGroups);
} // namespace engineConfig
} // namespace android
diff --git a/services/audiopolicy/engine/config/src/EngineConfig.cpp b/services/audiopolicy/engine/config/src/EngineConfig.cpp
index 3aa38cf..1ad7739 100644
--- a/services/audiopolicy/engine/config/src/EngineConfig.cpp
+++ b/services/audiopolicy/engine/config/src/EngineConfig.cpp
@@ -19,6 +19,7 @@
#include "EngineConfig.h"
#include <policy.h>
+#include <cutils/properties.h>
#include <media/TypeConverter.h>
#include <media/convert.h>
#include <utils/Log.h>
@@ -26,6 +27,7 @@
#include <libxml/xinclude.h>
#include <string>
#include <vector>
+#include <map>
#include <sstream>
#include <istream>
@@ -57,6 +59,7 @@
struct Attributes {
static constexpr const char *name = "name";
static constexpr const char *streamType = "streamType";
+ static constexpr const char *volumeGroup = "volumeGroup";
};
static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &ps);
};
@@ -107,6 +110,34 @@
static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
Collection &collection);
};
+struct VolumeTraits : public BaseSerializerTraits<VolumeCurve, VolumeCurves> {
+ static constexpr const char *tag = "volume";
+ static constexpr const char *collectionTag = "volumes";
+ static constexpr const char *volumePointTag = "point";
+
+ struct Attributes {
+ static constexpr const char *deviceCategory = "deviceCategory";
+ static constexpr const char *stream = "stream"; // For legacy volume curves
+ static constexpr const char *reference = "ref"; /**< For volume curves factorization. */
+ };
+
+ static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+ Collection &collection);
+};
+struct VolumeGroupTraits : public BaseSerializerTraits<VolumeGroup, VolumeGroups> {
+ static constexpr const char *tag = "volumeGroup";
+ static constexpr const char *collectionTag = "volumeGroups";
+
+ struct Attributes {
+ static constexpr const char *name = "name";
+ static constexpr const char *stream = "stream"; // For legacy volume curves
+ static constexpr const char *indexMin = "indexMin";
+ static constexpr const char *indexMax = "indexMax";
+ };
+
+ static android::status_t deserialize(_xmlDoc *doc, const _xmlNode *root,
+ Collection &collection);
+};
using xmlCharUnique = std::unique_ptr<xmlChar, decltype(xmlFree)>;
@@ -273,6 +304,12 @@
}
ALOGV("%s: %s = %s", __FUNCTION__, Attributes::name, name.c_str());
+ std::string volumeGroup = getXmlAttribute(child, Attributes::volumeGroup);
+ if (volumeGroup.empty()) {
+ ALOGE("%s: No attribute %s found", __FUNCTION__, Attributes::volumeGroup);
+ }
+ ALOGV("%s: %s = %s", __FUNCTION__, Attributes::volumeGroup, volumeGroup.c_str());
+
audio_stream_type_t streamType = AUDIO_STREAM_DEFAULT;
std::string streamTypeXml = getXmlAttribute(child, Attributes::streamType);
if (streamTypeXml.empty()) {
@@ -287,7 +324,7 @@
AttributesVector attributesVect;
deserializeAttributesCollection(doc, child, attributesVect);
- attributesGroup.push_back({name, streamType, attributesVect});
+ attributesGroup.push_back({name, streamType, volumeGroup, attributesVect});
return NO_ERROR;
}
@@ -383,6 +420,189 @@
return NO_ERROR;
}
+status_t VolumeTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &volumes)
+{
+ std::string deviceCategory = getXmlAttribute(root, Attributes::deviceCategory);
+ if (deviceCategory.empty()) {
+ ALOGW("%s: No %s found", __FUNCTION__, Attributes::deviceCategory);
+ }
+ std::string referenceName = getXmlAttribute(root, Attributes::reference);
+ const _xmlNode *ref = NULL;
+ if (!referenceName.empty()) {
+ getReference(xmlDocGetRootElement(doc), ref, referenceName, collectionTag);
+ if (ref == NULL) {
+ ALOGE("%s: No reference Ptr found for %s", __FUNCTION__, referenceName.c_str());
+ return BAD_VALUE;
+ }
+ }
+ // Retrieve curve point from reference element if found or directly from current curve
+ CurvePoints curvePoints;
+ for (const xmlNode *child = referenceName.empty() ?
+ root->xmlChildrenNode : ref->xmlChildrenNode; child != NULL; child = child->next) {
+ if (!xmlStrcmp(child->name, (const xmlChar *)volumePointTag)) {
+ xmlCharUnique pointXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (pointXml == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s=%s", __func__, tag, reinterpret_cast<const char*>(pointXml.get()));
+ std::vector<int> point;
+ collectionFromString<DefaultTraits<int>>(
+ reinterpret_cast<const char*>(pointXml.get()), point, ",");
+ if (point.size() != 2) {
+ ALOGE("%s: Invalid %s: %s", __func__, volumePointTag,
+ reinterpret_cast<const char*>(pointXml.get()));
+ return BAD_VALUE;
+ }
+ curvePoints.push_back({point[0], point[1]});
+ }
+ }
+ volumes.push_back({ deviceCategory, curvePoints });
+ return NO_ERROR;
+}
+
+status_t VolumeGroupTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, Collection &volumes)
+{
+ std::string name;
+ int indexMin = 0;
+ int indexMax = 0;
+ StreamVector streams = {};
+ AttributesVector attributesVect = {};
+
+ for (const xmlNode *child = root->xmlChildrenNode; child != NULL; child = child->next) {
+ if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::name)) {
+ xmlCharUnique nameXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (nameXml == nullptr) {
+ return BAD_VALUE;
+ }
+ name = reinterpret_cast<const char*>(nameXml.get());
+ }
+ if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::indexMin)) {
+ xmlCharUnique indexMinXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (indexMinXml == nullptr) {
+ return BAD_VALUE;
+ }
+ std::string indexMinLiteral(reinterpret_cast<const char*>(indexMinXml.get()));
+ if (!convertTo(indexMinLiteral, indexMin)) {
+ return BAD_VALUE;
+ }
+ }
+ if (not xmlStrcmp(child->name, (const xmlChar *)Attributes::indexMax)) {
+ xmlCharUnique indexMaxXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (indexMaxXml == nullptr) {
+ return BAD_VALUE;
+ }
+ std::string indexMaxLiteral(reinterpret_cast<const char*>(indexMaxXml.get()));
+ if (!convertTo(indexMaxLiteral, indexMax)) {
+ return BAD_VALUE;
+ }
+ }
+ }
+ deserializeAttributesCollection(doc, root, attributesVect);
+
+ std::string streamNames;
+ for (const auto &stream : streams) {
+ streamNames += android::toString(stream) + " ";
+ }
+ std::string attrmNames;
+ for (const auto &attr : attributesVect) {
+ attrmNames += android::toString(attr) + "\n";
+ }
+ ALOGV("%s: group=%s indexMin=%d, indexMax=%d streams=%s attributes=%s",
+ __func__, name.c_str(), indexMin, indexMax, streamNames.c_str(), attrmNames.c_str( ));
+
+ VolumeCurves groupVolumeCurves;
+ size_t skipped = 0;
+ deserializeCollection<VolumeTraits>(doc, root, groupVolumeCurves, skipped);
+ volumes.push_back({ name, indexMin, indexMax, groupVolumeCurves });
+ return NO_ERROR;
+}
+
+static constexpr const char *legacyVolumecollectionTag = "volumes";
+static constexpr const char *legacyVolumeTag = "volume";
+
+status_t deserializeLegacyVolume(_xmlDoc *doc, const _xmlNode *cur,
+ std::map<std::string, VolumeCurves> &legacyVolumes)
+{
+ std::string streamTypeLiteral = getXmlAttribute(cur, "stream");
+ if (streamTypeLiteral.empty()) {
+ ALOGE("%s: No attribute stream found", __func__);
+ return BAD_VALUE;
+ }
+ std::string deviceCategoryLiteral = getXmlAttribute(cur, "deviceCategory");
+ if (deviceCategoryLiteral.empty()) {
+ ALOGE("%s: No attribute deviceCategory found", __func__);
+ return BAD_VALUE;
+ }
+ std::string referenceName = getXmlAttribute(cur, "ref");
+ const xmlNode *ref = NULL;
+ if (!referenceName.empty()) {
+ getReference(xmlDocGetRootElement(doc), ref, referenceName, legacyVolumecollectionTag);
+ if (ref == NULL) {
+ ALOGE("%s: No reference Ptr found for %s", __func__, referenceName.c_str());
+ return BAD_VALUE;
+ }
+ ALOGV("%s: reference found for %s", __func__, referenceName.c_str());
+ }
+ CurvePoints curvePoints;
+ for (const xmlNode *child = referenceName.empty() ?
+ cur->xmlChildrenNode : ref->xmlChildrenNode; child != NULL; child = child->next) {
+ if (!xmlStrcmp(child->name, (const xmlChar *)VolumeTraits::volumePointTag)) {
+ xmlCharUnique pointXml(xmlNodeListGetString(doc, child->xmlChildrenNode, 1), xmlFree);
+ if (pointXml == NULL) {
+ return BAD_VALUE;
+ }
+ ALOGV("%s: %s=%s", __func__, legacyVolumeTag,
+ reinterpret_cast<const char*>(pointXml.get()));
+ std::vector<int> point;
+ collectionFromString<DefaultTraits<int>>(
+ reinterpret_cast<const char*>(pointXml.get()), point, ",");
+ if (point.size() != 2) {
+ ALOGE("%s: Invalid %s: %s", __func__, VolumeTraits::volumePointTag,
+ reinterpret_cast<const char*>(pointXml.get()));
+ return BAD_VALUE;
+ }
+ curvePoints.push_back({point[0], point[1]});
+ }
+ }
+ legacyVolumes[streamTypeLiteral].push_back({ deviceCategoryLiteral, curvePoints });
+ return NO_ERROR;
+}
+
+static status_t deserializeLegacyVolumeCollection(_xmlDoc *doc, const _xmlNode *cur,
+ VolumeGroups &volumeGroups,
+ size_t &nbSkippedElement)
+{
+ std::map<std::string, VolumeCurves> legacyVolumeMap;
+ for (cur = cur->xmlChildrenNode; cur != NULL; cur = cur->next) {
+ if (xmlStrcmp(cur->name, (const xmlChar *)legacyVolumecollectionTag)) {
+ continue;
+ }
+ const xmlNode *child = cur->xmlChildrenNode;
+ for (; child != NULL; child = child->next) {
+ if (!xmlStrcmp(child->name, (const xmlChar *)legacyVolumeTag)) {
+
+ status_t status = deserializeLegacyVolume(doc, child, legacyVolumeMap);
+ if (status != NO_ERROR) {
+ nbSkippedElement += 1;
+ }
+ }
+ }
+ }
+ for (const auto &volumeMapIter : legacyVolumeMap) {
+ // In order to let AudioService setting the min and max (compatibility), set Min and Max
+ // to -1 except for private streams
+ audio_stream_type_t streamType;
+ if (!StreamTypeConverter::fromString(volumeMapIter.first, streamType)) {
+ ALOGE("%s: Invalid stream %s", __func__, volumeMapIter.first.c_str());
+ return BAD_VALUE;
+ }
+ int indexMin = streamType >= AUDIO_STREAM_PUBLIC_CNT ? 0 : -1;
+ int indexMax = streamType >= AUDIO_STREAM_PUBLIC_CNT ? 100 : -1;
+ volumeGroups.push_back({ volumeMapIter.first, indexMin, indexMax, volumeMapIter.second });
+ }
+ return NO_ERROR;
+}
+
ParsingResult parse(const char* path) {
xmlDocPtr doc;
doc = xmlParseFile(path);
@@ -414,8 +634,66 @@
doc, cur, config->criteria, nbSkippedElements);
deserializeCollection<CriterionTypeTraits>(
doc, cur, config->criterionTypes, nbSkippedElements);
+ deserializeCollection<VolumeGroupTraits>(
+ doc, cur, config->volumeGroups, nbSkippedElements);
+
return {std::move(config), nbSkippedElements};
}
+android::status_t parseLegacyVolumeFile(const char* path, VolumeGroups &volumeGroups) {
+ xmlDocPtr doc;
+ doc = xmlParseFile(path);
+ if (doc == NULL) {
+ ALOGE("%s: Could not parse document %s", __FUNCTION__, path);
+ return BAD_VALUE;
+ }
+ xmlNodePtr cur = xmlDocGetRootElement(doc);
+ if (cur == NULL) {
+ ALOGE("%s: Could not parse: empty document %s", __FUNCTION__, path);
+ xmlFreeDoc(doc);
+ return BAD_VALUE;
+ }
+ if (xmlXIncludeProcess(doc) < 0) {
+ ALOGE("%s: libxml failed to resolve XIncludes on document %s", __FUNCTION__, path);
+ return BAD_VALUE;
+ }
+ size_t nbSkippedElements = 0;
+ return deserializeLegacyVolumeCollection(doc, cur, volumeGroups, nbSkippedElements);
+}
+
+static const char *kConfigLocationList[] = {"/odm/etc", "/vendor/etc", "/system/etc"};
+static const int kConfigLocationListSize =
+ (sizeof(kConfigLocationList) / sizeof(kConfigLocationList[0]));
+static const int gApmXmlConfigFilePathMaxLength = 128;
+
+static constexpr const char *apmXmlConfigFileName = "audio_policy_configuration.xml";
+static constexpr const char *apmA2dpOffloadDisabledXmlConfigFileName =
+ "audio_policy_configuration_a2dp_offload_disabled.xml";
+
+android::status_t parseLegacyVolumes(VolumeGroups &volumeGroups) {
+ char audioPolicyXmlConfigFile[gApmXmlConfigFilePathMaxLength];
+ std::vector<const char *> fileNames;
+ status_t ret;
+
+ if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false) &&
+ property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+ // A2DP offload supported but disabled: try to use special XML file
+ fileNames.push_back(apmA2dpOffloadDisabledXmlConfigFileName);
+ }
+ fileNames.push_back(apmXmlConfigFileName);
+
+ for (const char* fileName : fileNames) {
+ for (int i = 0; i < kConfigLocationListSize; i++) {
+ snprintf(audioPolicyXmlConfigFile, sizeof(audioPolicyXmlConfigFile),
+ "%s/%s", kConfigLocationList[i], fileName);
+ ret = parseLegacyVolumeFile(audioPolicyXmlConfigFile, volumeGroups);
+ if (ret == NO_ERROR) {
+ return ret;
+ }
+ }
+ }
+ return BAD_VALUE;
+}
+
} // namespace engineConfig
} // namespace android
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
index 498cc3b..b7fd031 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerInterface.h
@@ -18,6 +18,8 @@
#include <AudioPolicyManagerObserver.h>
#include <media/AudioProductStrategy.h>
+#include <media/AudioVolumeGroup.h>
+#include <IVolumeCurves.h>
#include <policy.h>
#include <Volume.h>
#include <HwModule.h>
@@ -31,7 +33,7 @@
using DeviceStrategyMap = std::map<product_strategy_t, DeviceVector>;
using StrategyVector = std::vector<product_strategy_t>;
-
+using VolumeGroupVector = std::vector<volume_group_t>;
/**
* This interface is dedicated to the policy manager that a Policy Engine shall implement.
@@ -167,7 +169,7 @@
* @return selected input device for the audio attributes, may be null if error.
*/
virtual sp<DeviceDescriptor> getInputDeviceForAttributes(
- const audio_attributes_t &attr, AudioMix **mix = nullptr) const = 0;
+ const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const = 0;
/**
* Get the legacy stream type for a given audio attributes.
@@ -181,6 +183,7 @@
/**
* @brief getAttributesForStream get the audio attributes from legacy stream type
+ * Attributes returned might only be used to check upon routing decision, not volume decisions.
* @param stream to consider
* @return audio attributes matching the legacy stream type
*/
@@ -234,6 +237,61 @@
*/
virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies) const = 0;
+ /**
+ * @brief getVolumeCurvesForAttributes retrieves the Volume Curves interface for the
+ * requested Audio Attributes.
+ * @param attr to be considered
+ * @return IVolumeCurves interface pointer if found, nullptr otherwise
+ */
+ virtual IVolumeCurves *getVolumeCurvesForAttributes(const audio_attributes_t &attr) const = 0;
+
+ /**
+ * @brief getVolumeCurvesForStreamType retrieves the Volume Curves interface for the stream
+ * @param stream to be considered
+ * @return IVolumeCurves interface pointer if found, nullptr otherwise
+ */
+ virtual IVolumeCurves *getVolumeCurvesForStreamType(audio_stream_type_t stream) const = 0;
+
+ /**
+ * @brief getVolumeCurvesForVolumeGroup retrieves the Volume Curves interface for volume group
+ * @param group to be considered
+ * @return IVolumeCurves interface pointer if found, nullptr otherwise
+ */
+ virtual IVolumeCurves *getVolumeCurvesForVolumeGroup(volume_group_t group) const = 0;
+
+ /**
+ * @brief getVolumeGroups retrieves the collection of volume groups.
+ * @return vector of volume groups
+ */
+ virtual VolumeGroupVector getVolumeGroups() const = 0;
+
+ /**
+ * @brief getVolumeGroupForAttributes gets the appropriate volume group to be used for a given
+ * Audio Attributes.
+ * @param attr to be considered
+ * @return volume group associated to the given audio attributes, default group if none
+ * applicable, VOLUME_GROUP_NONE if no default group defined.
+ */
+ virtual volume_group_t getVolumeGroupForAttributes(const audio_attributes_t &attr) const = 0;
+
+ /**
+ * @brief getVolumeGroupForStreamType gets the appropriate volume group to be used for a given
+ * legacy stream type
+ * @param stream type to be considered
+ * @return volume group associated to the given stream type, default group if none applicable,
+ * VOLUME_GROUP_NONE if no default group defined.
+ */
+ virtual volume_group_t getVolumeGroupForStreamType(audio_stream_type_t stream) const = 0;
+
+ /**
+ * @brief listAudioVolumeGroups introspection API to get the Audio Volume Groups, aka
+ * former stream aliases in Audio Service, defining volume curves attached to one or more
+ * Audio Attributes.
+ * @param groups
+ * @return NO_ERROR if the volume groups were retrieved successfully, error code otherwise
+ */
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups) const = 0;
+
virtual void dump(String8 *dst) const = 0;
protected:
diff --git a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
index b7902cf..43ba625 100644
--- a/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
+++ b/services/audiopolicy/engine/interface/AudioPolicyManagerObserver.h
@@ -16,7 +16,6 @@
#pragma once
-#include <IVolumeCurvesCollection.h>
#include <AudioGain.h>
#include <AudioPort.h>
#include <AudioPatch.h>
@@ -51,8 +50,6 @@
virtual const DeviceVector &getAvailableInputDevices() const = 0;
- virtual IVolumeCurvesCollection &getVolumeCurves() = 0;
-
virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const = 0;
protected:
diff --git a/services/audiopolicy/engineconfigurable/Android.mk b/services/audiopolicy/engineconfigurable/Android.mk
index bbd9688..84a4422 100644
--- a/services/audiopolicy/engineconfigurable/Android.mk
+++ b/services/audiopolicy/engineconfigurable/Android.mk
@@ -12,8 +12,6 @@
src/EngineInstance.cpp \
src/Stream.cpp \
src/InputSource.cpp \
- ../engine/common/src/ProductStrategy.cpp \
- ../engine/common/src/EngineBase.cpp
audio_policy_engine_includes_common := \
frameworks/av/services/audiopolicy/engineconfigurable/include \
@@ -35,7 +33,6 @@
LOCAL_HEADER_LIBRARIES := \
libaudiopolicycommon \
- libaudiopolicyengine_common_headers \
libaudiopolicyengine_interface_headers
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
@@ -45,13 +42,15 @@
LOCAL_STATIC_LIBRARIES := \
libaudiopolicypfwwrapper \
- libaudiopolicycomponents
+ libaudiopolicycomponents \
+ libaudiopolicyengine_common \
+ libaudiopolicyengine_config \
LOCAL_SHARED_LIBRARIES := \
- libaudiopolicyengineconfig \
liblog \
libutils \
liblog \
+ libcutils \
libaudioutils \
libparameter \
libmedia_helper \
diff --git a/services/audiopolicy/engineconfigurable/config/example/Android.mk b/services/audiopolicy/engineconfigurable/config/example/Android.mk
index 95a2ecc..45419f0 100644
--- a/services/audiopolicy/engineconfigurable/config/example/Android.mk
+++ b/services/audiopolicy/engineconfigurable/config/example/Android.mk
@@ -20,6 +20,8 @@
LOCAL_REQUIRED_MODULES := \
audio_policy_engine_product_strategies_phone.xml \
+ audio_policy_engine_stream_volumes.xml \
+ audio_policy_engine_default_stream_volumes.xml \
audio_policy_engine_criteria.xml \
audio_policy_engine_criterion_types.xml
@@ -34,6 +36,22 @@
LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
include $(BUILD_PREBUILT)
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_default_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_configurable)
@@ -54,7 +72,8 @@
LOCAL_REQUIRED_MODULES := \
audio_policy_engine_product_strategies_automotive.xml \
audio_policy_engine_criteria.xml \
- audio_policy_engine_criterion_types.xml
+ audio_policy_engine_criterion_types.xml \
+ audio_policy_engine_volumes.xml
include $(BUILD_PREBUILT)
@@ -71,6 +90,14 @@
LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE_STEM)
include $(BUILD_PREBUILT)
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := automotive/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
endif #ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), automotive_configurable)
ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),$(filter $(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION),phone_configurable automotive_configurable))
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
index e2fb02b..28a140a 100644
--- a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_configuration.xml
@@ -19,6 +19,7 @@
<xi:include href="audio_policy_engine_product_strategies.xml"/>
<xi:include href="audio_policy_engine_criterion_types.xml"/>
<xi:include href="audio_policy_engine_criteria.xml"/>
+ <xi:include href="audio_policy_engine_volumes.xml"/>
</configuration>
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml
index 543a2f0..c487da9 100644
--- a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_product_strategies.xml
@@ -31,7 +31,7 @@
-->
<ProductStrategy name="oem_traffic_anouncement">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="oem_traffic_anouncement">
<ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
<Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
<!-- traffic_annoucement = 1 -->
@@ -39,14 +39,14 @@
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="oem_strategy_1">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="oem_adas_2">
<ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
<Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
<Bundle key="oem" value="2"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="oem_strategy_2">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="oem_adas_3">
<ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
<Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
<Bundle key="oem" value="3"/>
@@ -70,21 +70,21 @@
( type == CAR_AUDIO_TYPE_RADIO ) )
-->
<ProductStrategy name="radio">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="media_car_audio_type_3">
<ContentType value="AUDIO_CONTENT_TYPE_MUSIC"/>
<Usage value="AUDIO_USAGE_MEDIA"/>
<Bundle key="car_audio_type" value="3"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="ext_audio_source">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="media_car_audio_type_7">
<ContentType value="AUDIO_CONTENT_TYPE_MUSIC"/>
<Usage value="AUDIO_USAGE_MEDIA"/>
<Bundle key="car_audio_type" value="7"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="voice_command">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="speech">
<Attributes>
<ContentType value="AUDIO_CONTENT_TYPE_SPEECH"/>
<Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
@@ -96,7 +96,7 @@
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="safety_alert">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="system">
<ContentType value="AUDIO_CONTENT_TYPE_SONIFICATION"/>
<Usage value="AUDIO_USAGE_NOTIFICATION"/>
<!-- CAR_AUDIO_TYPE_SAFETY_ALERT = 2 -->
@@ -112,7 +112,7 @@
<!-- Generic Usages -->
<ProductStrategy name="music">
- <AttributesGroup streamType="AUDIO_STREAM_MUSIC">
+ <AttributesGroup streamType="AUDIO_STREAM_MUSIC" volumeGroup="media">
<Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
<!-- Default product strategy has empty attributes -->
@@ -121,29 +121,31 @@
</ProductStrategy>
<ProductStrategy name="nav_guidance">
- <AttributesGroup>
+ <AttributesGroup volumeGroup="speech">
<Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="voice_call">
- <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL">
+ <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL" volumeGroup="phone">
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
+ </AttributesGroup>
+ <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO" volumeGroup="phone">
<Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="alarm">
- <AttributesGroup streamType="AUDIO_STREAM_ALARM">
+ <AttributesGroup streamType="AUDIO_STREAM_ALARM" volumeGroup="ring">
<Usage value="AUDIO_USAGE_ALARM"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="ring">
- <AttributesGroup streamType="AUDIO_STREAM_RING">
+ <AttributesGroup streamType="AUDIO_STREAM_RING" volumeGroup="ring">
<Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="notification">
- <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION">
+ <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="ring">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED"/> </Attributes>
@@ -152,10 +154,17 @@
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="system">
- <AttributesGroup streamType="AUDIO_STREAM_SYSTEM">
+ <AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
<Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/>
</AttributesGroup>
</ProductStrategy>
-
+ <ProductStrategy name="tts">
+ <!-- TTS stream MUST BE MANAGED OUTSIDE default product strategy if NO DEDICATED OUTPUT
+ for TTS, otherwise when beacon happens, default strategy is ... muted.
+ If it is media, it is annoying... -->
+ <AttributesGroup streamType="AUDIO_STREAM_TTS" volumeGroup="tts">
+ <Attributes> <Flags value="AUDIO_FLAG_BEACON"/> </Attributes>
+ </AttributesGroup>
+ </ProductStrategy>
</ProductStrategies>
diff --git a/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_volumes.xml
new file mode 100644
index 0000000..b326b50
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/automotive/audio_policy_engine_volumes.xml
@@ -0,0 +1,192 @@
+<?xml version='1.0' encoding='UTF-8'?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+ -->
+
+<!-- Volume Groups Tables included by Audio Policy Configuration file -->
+<!-- Note:
+ It is VALID to have a group without attributes if a product strategy is following
+ this group for all attributes.
+ Otherwise, attributes must be specified
+-->
+
+<volumeGroups>
+ <volumeGroup>
+ <name>oem_traffic_anouncement</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+<!-- OEM ADAS is a volume group that has a single port gain (this is the reason why it is a group
+ but may host different streams.
+ A priority must be given among them (either they are multualy excluisve, so the volume
+ will be the one of the currently acitve stream, otherwise a priority must be given by
+ any mean. -->
+ <volumeGroup>
+ <name>oem_adas_2</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+ <volumeGroup>
+ <name>oem_adas_3</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+<!-- MEDIA is a volume group that has a single port gain (this is the reason why it is a group
+ but may host different streams.
+ A priority must be given among them (either they are multualy exclusive, so the volume
+ will be the one of the active stream with highest priority (ORDER MATTERS) unless the curves
+ followed will the the curves for the requested attributes.-->
+ <volumeGroup>
+ <name>media_car_audio_type_3</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+ <volumeGroup>
+ <name>media_car_audio_type_7</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+ <volumeGroup>
+ <name>media</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>speech</name>
+ <indexMin>1</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>system</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>phone</name>
+ <indexMin>1</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>ring</name>
+ <indexMin>0</indexMin>
+ <indexMax>40</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>tts</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-0</point>
+ <point>100,0</point>
+ </volume>
+ </volumeGroup>
+</volumeGroups>
+
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
index ab61d8a..4ca33b4 100644
--- a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_configuration.xml
@@ -17,6 +17,8 @@
<configuration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="audio_policy_engine_product_strategies.xml"/>
+ <xi:include href="audio_policy_engine_stream_volumes.xml"/>
+ <xi:include href="audio_policy_engine_default_stream_volumes.xml"/>
</configuration>
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml
new file mode 100644
index 0000000..21e6dd5
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_default_stream_volumes.xml
@@ -0,0 +1,136 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Default Volume Tables included by Audio Policy Configuration file -->
+<!-- Full Default Volume table for all device category -->
+<volumes>
+ <reference name="FULL_SCALE_VOLUME_CURVE">
+ <!-- Full Scale reference Volume Curve -->
+ <point>0,0</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="SILENT_VOLUME_CURVE">
+ <point>0,-9600</point>
+ <point>100,-9600</point>
+ </reference>
+ <reference name="DEFAULT_SYSTEM_VOLUME_CURVE">
+ <!-- Default System reference Volume Curve -->
+ <point>1,-2400</point>
+ <point>33,-1800</point>
+ <point>66,-1200</point>
+ <point>100,-600</point>
+ </reference>
+ <reference name="DEFAULT_MEDIA_VOLUME_CURVE">
+ <!-- Default Media reference Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE">
+ <!-- Default is Speaker Media Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE">
+ <!-- Default is Speaker System Volume Curve -->
+ <point>1,-4680</point>
+ <point>42,-2070</point>
+ <point>85,-540</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE">
+ <!-- Default is Ext Media System Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_HEARING_AID_VOLUME_CURVE">
+ <!-- Default Hearing Aid Volume Curve -->
+ <point>1,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+ <!-- **************************************************************** -->
+ <!-- Non-mutable default volume curves: -->
+ <!-- * first point is always for index 0 -->
+ <!-- * attenuation is small enough that stream can still be heard -->
+ <reference name="DEFAULT_NON_MUTABLE_VOLUME_CURVE">
+ <!-- Default non-mutable reference Volume Curve -->
+ <!-- based on DEFAULT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve for headset -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE">
+ <!-- Default non-mutable Speaker Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE">
+ <!-- Default non-mutable Ext Media System Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE">
+ <!-- Default non-mutable Hearing Aid Volume Curve -->
+ <!-- based on DEFAULT_HEARING_AID_VOLUME_CURVE -->
+ <point>0,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+</volumes>
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
index f72e379..9398743 100644
--- a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -25,37 +25,37 @@
enforced. -->
<ProductStrategy name="STRATEGY_PHONE">
- <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL">
+ <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL" volumeGroup="voice_call">
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO">
+ <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO" volumeGroup="bluetooth_sco">
<Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_SONIFICATION">
- <AttributesGroup streamType="AUDIO_STREAM_RING">
+ <AttributesGroup streamType="AUDIO_STREAM_RING" volumeGroup="ring">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/> </Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_ALARM">
+ <AttributesGroup streamType="AUDIO_STREAM_ALARM" volumeGroup="alarm">
<Attributes> <Usage value="AUDIO_USAGE_ALARM"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_ENFORCED_AUDIBLE">
- <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE">
+ <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE" volumeGroup="enforced_audible">
<Attributes> <Flags value="AUDIO_FLAG_AUDIBILITY_ENFORCED"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_ACCESSIBILITY">
- <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY">
+ <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY" volumeGroup="accessibility">
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
- <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION">
+ <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
@@ -65,20 +65,20 @@
</ProductStrategy>
<ProductStrategy name="STRATEGY_MEDIA">
- <AttributesGroup streamType="AUDIO_STREAM_MUSIC">
+ <AttributesGroup streamType="AUDIO_STREAM_MUSIC" volumeGroup="music">
<Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANT"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/> </Attributes>
<Attributes></Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_SYSTEM">
+ <AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_DTMF">
- <AttributesGroup streamType="AUDIO_STREAM_DTMF">
+ <AttributesGroup streamType="AUDIO_STREAM_DTMF" volumeGroup="dtmf">
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
@@ -86,21 +86,21 @@
<!-- Used to identify the volume of audio streams exclusively transmitted through the speaker
(TTS) of the device -->
<ProductStrategy name="STRATEGY_TRANSMITTED_THROUGH_SPEAKER">
- <AttributesGroup streamType="AUDIO_STREAM_TTS">
+ <AttributesGroup streamType="AUDIO_STREAM_TTS" volumeGroup="tts">
<Attributes> <Flags value="AUDIO_FLAG_BEACON"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<!-- Routing Strategy rerouting may be removed as following media??? -->
<ProductStrategy name="STRATEGY_REROUTING">
- <AttributesGroup streamType="AUDIO_STREAM_REROUTING">
+ <AttributesGroup streamType="AUDIO_STREAM_REROUTING" volumeGroup="rerouting">
<Attributes></Attributes>
</AttributesGroup>
</ProductStrategy>
<!-- Default product strategy has empty attributes -->
<ProductStrategy name="STRATEGY_PATCH">
- <AttributesGroup streamType="AUDIO_STREAM_PATCH">
+ <AttributesGroup streamType="AUDIO_STREAM_PATCH" volumeGroup="patch">
<Attributes></Attributes>
</AttributesGroup>
</ProductStrategy>
diff --git a/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml
new file mode 100644
index 0000000..707a184
--- /dev/null
+++ b/services/audiopolicy/engineconfigurable/config/example/phone/audio_policy_engine_stream_volumes.xml
@@ -0,0 +1,231 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Volume section defines a volume curve for a given use case and device category.
+It contains a list of points of this curve expressing the attenuation in Millibels for a given
+volume index from 0 to 100.
+<volume deviceCategory=””>
+<point>0,-9600</point>
+<point>100,0</point>
+</volume>
+-->
+
+<volumeGroups>
+ <volumeGroup>
+ <name>voice_call</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-2700</point>
+ <point>33,-1800</point>
+ <point>66,-900</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>system</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-5100</point>
+ <point>57,-2800</point>
+ <point>71,-2500</point>
+ <point>85,-2300</point>
+ <point>100,-2100</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>ring</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>music</name>
+ <indexMin>0</indexMin>
+ <indexMax>25</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>alarm</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>notification</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>bluetooth_sco</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>enforced_audible</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-3400</point>
+ <point>71,-2400</point>
+ <point>100,-2000</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>dtmf</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-4000</point>
+ <point>71,-2400</point>
+ <point>100,-1400</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>tts</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="SILENT_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>accessibility</name>
+ <indexMin>1</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>rerouting</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>patch</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+</volumeGroups>
+
diff --git a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
index 65dc9af..4706d7d 100644
--- a/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
+++ b/services/audiopolicy/engineconfigurable/parameter-framework/plugin/Android.mk
@@ -36,7 +36,9 @@
LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-LOCAL_STATIC_LIBRARIES := libpfw_utility
+LOCAL_STATIC_LIBRARIES := \
+ libpfw_utility \
+ libaudiopolicycomponents
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE := libpolicy-subsystem
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.cpp b/services/audiopolicy/engineconfigurable/src/Engine.cpp
index 719ef01..89aaa84 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.cpp
+++ b/services/audiopolicy/engineconfigurable/src/Engine.cpp
@@ -113,7 +113,7 @@
const audio_stream_type_t &profile)
{
if (setPropertyForKey<audio_stream_type_t, audio_stream_type_t>(stream, profile)) {
- getApmObserver()->getVolumeCurves().switchVolumeCurve(profile, stream);
+ switchVolumeCurve(profile, stream);
return true;
}
return false;
@@ -295,7 +295,7 @@
}
sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
- AudioMix **mix) const
+ sp<AudioPolicyMix> *mix) const
{
const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
const auto &availableInputDevices = getApmObserver()->getAvailableInputDevices();
diff --git a/services/audiopolicy/engineconfigurable/src/Engine.h b/services/audiopolicy/engineconfigurable/src/Engine.h
index 5553994..4662e7e 100644
--- a/services/audiopolicy/engineconfigurable/src/Engine.h
+++ b/services/audiopolicy/engineconfigurable/src/Engine.h
@@ -62,7 +62,7 @@
bool fromCache = false) const override;
sp<DeviceDescriptor> getInputDeviceForAttributes(
- const audio_attributes_t &attr, AudioMix **mix = nullptr) const override;
+ const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const override;
void updateDeviceSelectionCache() override;
diff --git a/services/audiopolicy/enginedefault/Android.bp b/services/audiopolicy/enginedefault/Android.bp
new file mode 100644
index 0000000..7b42c6a
--- /dev/null
+++ b/services/audiopolicy/enginedefault/Android.bp
@@ -0,0 +1,32 @@
+cc_library_shared {
+ name: "libaudiopolicyenginedefault",
+ export_include_dirs: ["include"],
+ srcs: [
+ "src/Engine.cpp",
+ "src/EngineInstance.cpp",
+ ],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+ local_include_dirs: ["include"],
+ header_libs: [
+ "libbase_headers",
+ "libaudiopolicycommon",
+ "libaudiopolicyengine_interface_headers",
+ ],
+ static_libs: [
+ "libaudiopolicycomponents",
+ "libaudiopolicyengine_common",
+ "libaudiopolicyengine_config",
+ ],
+ shared_libs: [
+ "liblog",
+ "libcutils",
+ "libutils",
+ "libmedia_helper",
+ "libaudiopolicy",
+ "libxml2",
+ ],
+}
diff --git a/services/audiopolicy/enginedefault/Android.mk b/services/audiopolicy/enginedefault/Android.mk
deleted file mode 100644
index 95eac1c..0000000
--- a/services/audiopolicy/enginedefault/Android.mk
+++ /dev/null
@@ -1,55 +0,0 @@
-LOCAL_PATH := $(call my-dir)
-
-# Component build
-#######################################################################
-
-include $(CLEAR_VARS)
-
-LOCAL_SRC_FILES := \
- src/Engine.cpp \
- src/EngineInstance.cpp \
- ../engine/common/src/ProductStrategy.cpp \
- ../engine/common/src/EngineBase.cpp
-
-audio_policy_engine_includes_common := \
- $(LOCAL_PATH)/include
-
-LOCAL_CFLAGS += \
- -Wall \
- -Werror \
- -Wextra \
-
-LOCAL_EXPORT_C_INCLUDE_DIRS := \
- $(audio_policy_engine_includes_common)
-
-LOCAL_C_INCLUDES := \
- $(audio_policy_engine_includes_common) \
- $(TARGET_OUT_HEADERS)/hw \
- $(call include-path-for, frameworks-av) \
- $(call include-path-for, audio-utils) \
- $(call include-path-for, bionic)
-
-LOCAL_MULTILIB := $(AUDIOSERVER_MULTILIB)
-
-LOCAL_MODULE := libaudiopolicyenginedefault
-LOCAL_MODULE_TAGS := optional
-
-LOCAL_HEADER_LIBRARIES := libbase_headers
-
-LOCAL_STATIC_LIBRARIES := \
- libaudiopolicycomponents
-
-LOCAL_SHARED_LIBRARIES := \
- liblog \
- libcutils \
- libutils \
- libmedia_helper \
- libaudiopolicyengineconfig \
- libaudiopolicy
-
-LOCAL_HEADER_LIBRARIES := \
- libaudiopolicycommon \
- libaudiopolicyengine_common_headers \
- libaudiopolicyengine_interface_headers
-
-include $(BUILD_SHARED_LIBRARY)
diff --git a/services/audiopolicy/enginedefault/config/example/Android.mk b/services/audiopolicy/enginedefault/config/example/Android.mk
index 866466f..f06ee4c 100644
--- a/services/audiopolicy/enginedefault/config/example/Android.mk
+++ b/services/audiopolicy/enginedefault/config/example/Android.mk
@@ -16,7 +16,9 @@
LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
LOCAL_REQUIRED_MODULES := \
- audio_policy_engine_product_strategies_phone.xml
+ audio_policy_engine_product_strategies_phone.xml \
+ audio_policy_engine_stream_volumes.xml \
+ audio_policy_engine_default_stream_volumes.xml
include $(BUILD_PREBUILT)
@@ -29,4 +31,20 @@
LOCAL_SRC_FILES := phone/$(LOCAL_MODULE_STEM)
include $(BUILD_PREBUILT)
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE := audio_policy_engine_default_stream_volumes.xml
+LOCAL_MODULE_TAGS := optional
+LOCAL_MODULE_CLASS := ETC
+LOCAL_VENDOR_MODULE := true
+LOCAL_SRC_FILES := phone/$(LOCAL_MODULE)
+include $(BUILD_PREBUILT)
+
endif # ifeq ($(BUILD_AUDIO_POLICY_EXAMPLE_CONFIGURATION), phone_default)
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
index ab61d8a..4ca33b4 100644
--- a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_configuration.xml
@@ -17,6 +17,8 @@
<configuration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
<xi:include href="audio_policy_engine_product_strategies.xml"/>
+ <xi:include href="audio_policy_engine_stream_volumes.xml"/>
+ <xi:include href="audio_policy_engine_default_stream_volumes.xml"/>
</configuration>
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml
new file mode 100644
index 0000000..21e6dd5
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_default_stream_volumes.xml
@@ -0,0 +1,136 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Default Volume Tables included by Audio Policy Configuration file -->
+<!-- Full Default Volume table for all device category -->
+<volumes>
+ <reference name="FULL_SCALE_VOLUME_CURVE">
+ <!-- Full Scale reference Volume Curve -->
+ <point>0,0</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="SILENT_VOLUME_CURVE">
+ <point>0,-9600</point>
+ <point>100,-9600</point>
+ </reference>
+ <reference name="DEFAULT_SYSTEM_VOLUME_CURVE">
+ <!-- Default System reference Volume Curve -->
+ <point>1,-2400</point>
+ <point>33,-1800</point>
+ <point>66,-1200</point>
+ <point>100,-600</point>
+ </reference>
+ <reference name="DEFAULT_MEDIA_VOLUME_CURVE">
+ <!-- Default Media reference Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE">
+ <!-- Default is Speaker Media Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE">
+ <!-- Default is Speaker System Volume Curve -->
+ <point>1,-4680</point>
+ <point>42,-2070</point>
+ <point>85,-540</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE">
+ <!--Default Volume Curve -->
+ <point>1,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE">
+ <!-- Default is Ext Media System Volume Curve -->
+ <point>1,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_HEARING_AID_VOLUME_CURVE">
+ <!-- Default Hearing Aid Volume Curve -->
+ <point>1,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+ <!-- **************************************************************** -->
+ <!-- Non-mutable default volume curves: -->
+ <!-- * first point is always for index 0 -->
+ <!-- * attenuation is small enough that stream can still be heard -->
+ <reference name="DEFAULT_NON_MUTABLE_VOLUME_CURVE">
+ <!-- Default non-mutable reference Volume Curve -->
+ <!-- based on DEFAULT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve for headset -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE">
+ <!-- Default non-mutable Speaker Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE">
+ <!--Default non-mutable Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE -->
+ <point>0,-4950</point>
+ <point>33,-3350</point>
+ <point>66,-1700</point>
+ <point>100,0</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE">
+ <!-- Default non-mutable Ext Media System Volume Curve -->
+ <!-- based on DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE -->
+ <point>0,-5800</point>
+ <point>20,-4000</point>
+ <point>60,-2100</point>
+ <point>100,-1000</point>
+ </reference>
+ <reference name="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE">
+ <!-- Default non-mutable Hearing Aid Volume Curve -->
+ <!-- based on DEFAULT_HEARING_AID_VOLUME_CURVE -->
+ <point>0,-12700</point>
+ <point>20,-8000</point>
+ <point>60,-4000</point>
+ <point>100,0</point>
+ </reference>
+</volumes>
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
index f72e379..9398743 100644
--- a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_product_strategies.xml
@@ -25,37 +25,37 @@
enforced. -->
<ProductStrategy name="STRATEGY_PHONE">
- <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL">
+ <AttributesGroup streamType="AUDIO_STREAM_VOICE_CALL" volumeGroup="voice_call">
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION"/> </Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO">
+ <AttributesGroup streamType="AUDIO_STREAM_BLUETOOTH_SCO" volumeGroup="bluetooth_sco">
<Attributes> <Flags value="AUDIO_FLAG_SCO"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_SONIFICATION">
- <AttributesGroup streamType="AUDIO_STREAM_RING">
+ <AttributesGroup streamType="AUDIO_STREAM_RING" volumeGroup="ring">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE"/> </Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_ALARM">
+ <AttributesGroup streamType="AUDIO_STREAM_ALARM" volumeGroup="alarm">
<Attributes> <Usage value="AUDIO_USAGE_ALARM"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_ENFORCED_AUDIBLE">
- <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE">
+ <AttributesGroup streamType="AUDIO_STREAM_ENFORCED_AUDIBLE" volumeGroup="enforced_audible">
<Attributes> <Flags value="AUDIO_FLAG_AUDIBILITY_ENFORCED"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_ACCESSIBILITY">
- <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY">
+ <AttributesGroup streamType="AUDIO_STREAM_ACCESSIBILITY" volumeGroup="accessibility">
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_SONIFICATION_RESPECTFUL">
- <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION">
+ <AttributesGroup streamType="AUDIO_STREAM_NOTIFICATION" volumeGroup="notification">
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT"/> </Attributes>
@@ -65,20 +65,20 @@
</ProductStrategy>
<ProductStrategy name="STRATEGY_MEDIA">
- <AttributesGroup streamType="AUDIO_STREAM_MUSIC">
+ <AttributesGroup streamType="AUDIO_STREAM_MUSIC" volumeGroup="music">
<Attributes> <Usage value="AUDIO_USAGE_MEDIA"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_GAME"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANT"/> </Attributes>
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE"/> </Attributes>
<Attributes></Attributes>
</AttributesGroup>
- <AttributesGroup streamType="AUDIO_STREAM_SYSTEM">
+ <AttributesGroup streamType="AUDIO_STREAM_SYSTEM" volumeGroup="system">
<Attributes> <Usage value="AUDIO_USAGE_ASSISTANCE_SONIFICATION"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<ProductStrategy name="STRATEGY_DTMF">
- <AttributesGroup streamType="AUDIO_STREAM_DTMF">
+ <AttributesGroup streamType="AUDIO_STREAM_DTMF" volumeGroup="dtmf">
<Attributes> <Usage value="AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
@@ -86,21 +86,21 @@
<!-- Used to identify the volume of audio streams exclusively transmitted through the speaker
(TTS) of the device -->
<ProductStrategy name="STRATEGY_TRANSMITTED_THROUGH_SPEAKER">
- <AttributesGroup streamType="AUDIO_STREAM_TTS">
+ <AttributesGroup streamType="AUDIO_STREAM_TTS" volumeGroup="tts">
<Attributes> <Flags value="AUDIO_FLAG_BEACON"/> </Attributes>
</AttributesGroup>
</ProductStrategy>
<!-- Routing Strategy rerouting may be removed as following media??? -->
<ProductStrategy name="STRATEGY_REROUTING">
- <AttributesGroup streamType="AUDIO_STREAM_REROUTING">
+ <AttributesGroup streamType="AUDIO_STREAM_REROUTING" volumeGroup="rerouting">
<Attributes></Attributes>
</AttributesGroup>
</ProductStrategy>
<!-- Default product strategy has empty attributes -->
<ProductStrategy name="STRATEGY_PATCH">
- <AttributesGroup streamType="AUDIO_STREAM_PATCH">
+ <AttributesGroup streamType="AUDIO_STREAM_PATCH" volumeGroup="patch">
<Attributes></Attributes>
</AttributesGroup>
</ProductStrategy>
diff --git a/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml
new file mode 100644
index 0000000..707a184
--- /dev/null
+++ b/services/audiopolicy/enginedefault/config/example/phone/audio_policy_engine_stream_volumes.xml
@@ -0,0 +1,231 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- Copyright (C) 2015 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- Volume section defines a volume curve for a given use case and device category.
+It contains a list of points of this curve expressing the attenuation in Millibels for a given
+volume index from 0 to 100.
+<volume deviceCategory=””>
+<point>0,-9600</point>
+<point>100,0</point>
+</volume>
+-->
+
+<volumeGroups>
+ <volumeGroup>
+ <name>voice_call</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-2700</point>
+ <point>33,-1800</point>
+ <point>66,-900</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>system</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-5100</point>
+ <point>57,-2800</point>
+ <point>71,-2500</point>
+ <point>85,-2300</point>
+ <point>100,-2100</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>ring</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>music</name>
+ <indexMin>0</indexMin>
+ <indexMax>25</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>alarm</name>
+ <indexMin>1</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_EXT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>notification</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>bluetooth_sco</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>0,-2400</point>
+ <point>33,-1600</point>
+ <point>66,-800</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE">
+ <point>0,-4200</point>
+ <point>33,-2800</point>
+ <point>66,-1400</point>
+ <point>100,0</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>enforced_audible</name>
+ <indexMin>0</indexMin>
+ <indexMax>7</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-3400</point>
+ <point>71,-2400</point>
+ <point>100,-2000</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>dtmf</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET">
+ <point>1,-3000</point>
+ <point>33,-2600</point>
+ <point>66,-2200</point>
+ <point>100,-1800</point>
+ </volume>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER">
+ <point>1,-4000</point>
+ <point>71,-2400</point>
+ <point>100,-1400</point>
+ </volume>
+ <!--volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/-->
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>tts</name>
+ <indexMin>0</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="SILENT_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="SILENT_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>accessibility</name>
+ <indexMin>1</indexMin>
+ <indexMax>15</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="DEFAULT_NON_MUTABLE_SPEAKER_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="DEFAULT_NON_MUTABLE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="DEFAULT_NON_MUTABLE_HEARING_AID_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>rerouting</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+
+ <volumeGroup>
+ <name>patch</name>
+ <indexMin>0</indexMin>
+ <indexMax>1</indexMax>
+ <volume deviceCategory="DEVICE_CATEGORY_HEADSET" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_SPEAKER" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EARPIECE" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_EXT_MEDIA" ref="FULL_SCALE_VOLUME_CURVE"/>
+ <volume deviceCategory="DEVICE_CATEGORY_HEARING_AID" ref="FULL_SCALE_VOLUME_CURVE"/>
+ </volumeGroup>
+</volumeGroups>
+
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index 43023a8..66a6965 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -66,35 +66,6 @@
}
}
-status_t Engine::setPhoneState(audio_mode_t state)
-{
- ALOGV("setPhoneState() state %d", state);
-
- if (state < 0 || state >= AUDIO_MODE_CNT) {
- ALOGW("setPhoneState() invalid state %d", state);
- return BAD_VALUE;
- }
-
- if (state == getPhoneState()) {
- ALOGW("setPhoneState() setting same state %d", state);
- return BAD_VALUE;
- }
-
- // store previous phone state for management of sonification strategy below
- int oldState = getPhoneState();
- EngineBase::setPhoneState(state);
-
- if (!is_state_in_call(oldState) && is_state_in_call(state)) {
- ALOGV(" Entering call in setPhoneState()");
- getApmObserver()->getVolumeCurves().switchVolumeCurve(AUDIO_STREAM_VOICE_CALL,
- AUDIO_STREAM_DTMF);
- } else if (is_state_in_call(oldState) && !is_state_in_call(state)) {
- ALOGV(" Exiting call in setPhoneState()");
- getApmObserver()->getVolumeCurves().restoreOriginVolumeCurve(AUDIO_STREAM_DTMF);
- }
- return NO_ERROR;
-}
-
status_t Engine::setForceUse(audio_policy_force_use_t usage, audio_policy_forced_cfg_t config)
{
switch(usage) {
@@ -431,7 +402,9 @@
outputDeviceTypesToIgnore);
break;
}
- if (device2 == AUDIO_DEVICE_NONE) {
+ // FIXME: Find a better solution to prevent routing to BT hearing aid(b/122931261).
+ if ((device2 == AUDIO_DEVICE_NONE) &&
+ (getForceUse(AUDIO_POLICY_FORCE_FOR_MEDIA) != AUDIO_POLICY_FORCE_NO_BT_A2DP)) {
device2 = availableOutputDevicesType & AUDIO_DEVICE_OUT_HEARING_AID;
}
if ((device2 == AUDIO_DEVICE_NONE) &&
@@ -754,7 +727,7 @@
}
sp<DeviceDescriptor> Engine::getInputDeviceForAttributes(const audio_attributes_t &attr,
- AudioMix **mix) const
+ sp<AudioPolicyMix> *mix) const
{
const auto &policyMixes = getApmObserver()->getAudioPolicyMixCollection();
const auto &availableInputDevices = getApmObserver()->getAvailableInputDevices();
diff --git a/services/audiopolicy/enginedefault/src/Engine.h b/services/audiopolicy/enginedefault/src/Engine.h
index 15fc358..d5dfacc 100644
--- a/services/audiopolicy/enginedefault/src/Engine.h
+++ b/services/audiopolicy/enginedefault/src/Engine.h
@@ -55,8 +55,6 @@
///
/// from EngineBase, so from AudioPolicyManagerInterface
///
- status_t setPhoneState(audio_mode_t mode) override;
-
status_t setForceUse(audio_policy_force_use_t usage,
audio_policy_forced_cfg_t config) override;
@@ -68,7 +66,7 @@
bool fromCache = false) const override;
sp<DeviceDescriptor> getInputDeviceForAttributes(
- const audio_attributes_t &attr, AudioMix **mix = nullptr) const override;
+ const audio_attributes_t &attr, sp<AudioPolicyMix> *mix = nullptr) const override;
void updateDeviceSelectionCache() override;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 721a658..4a0e764 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -33,8 +33,8 @@
#define AUDIO_POLICY_XML_CONFIG_FILE_NAME "audio_policy_configuration.xml"
#define AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME \
"audio_policy_configuration_a2dp_offload_disabled.xml"
-#define AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME \
- "audio_policy_configuration_bluetooth_hal_enabled.xml"
+#define AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME \
+ "audio_policy_configuration_bluetooth_legacy_hal.xml"
#include <inttypes.h>
#include <math.h>
@@ -209,7 +209,27 @@
return BAD_VALUE;
}
- checkForDeviceAndOutputChanges([&]() {
+ // No need to evaluate playback routing when connecting a remote submix
+ // output device used by a dynamic policy of type recorder as no
+ // playback use case is affected.
+ bool doCheckForDeviceAndOutputChanges = true;
+ if (device->type() == AUDIO_DEVICE_OUT_REMOTE_SUBMIX
+ && strncmp(device_address, "0", AUDIO_DEVICE_MAX_ADDRESS_LEN) != 0) {
+ for (audio_io_handle_t output : outputs) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
+ sp<AudioPolicyMix> policyMix = desc->mPolicyMix.promote();
+ if (policyMix != nullptr
+ && policyMix->mMixType == MIX_TYPE_RECORDERS
+ && strncmp(device_address,
+ policyMix->mDeviceAddress.string(),
+ AUDIO_DEVICE_MAX_ADDRESS_LEN) == 0) {
+ doCheckForDeviceAndOutputChanges = false;
+ break;
+ }
+ }
+ }
+
+ auto checkCloseOutputs = [&]() {
// outputs must be closed after checkOutputForAllStrategies() is executed
if (!outputs.isEmpty()) {
for (audio_io_handle_t output : outputs) {
@@ -218,7 +238,7 @@
// been opened by checkOutputsForDevice() to query dynamic parameters
if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
(((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
- (desc->mDirectOpenCount == 0))) {
+ (desc->mDirectOpenCount == 0))) {
closeOutput(output);
}
}
@@ -226,7 +246,13 @@
return true;
}
return false;
- });
+ };
+
+ if (doCheckForDeviceAndOutputChanges) {
+ checkForDeviceAndOutputChanges(checkCloseOutputs);
+ } else {
+ checkCloseOutputs();
+ }
if (mEngine->getPhoneState() == AUDIO_MODE_IN_CALL && hasPrimaryOutput()) {
DeviceVector newDevices = getNewOutputDevices(mPrimaryOutput, false /*fromCache*/);
@@ -352,7 +378,7 @@
(strlen(device_address) != 0)/*matchAddress*/);
if (devDesc == 0) {
- ALOGW("getDeviceConnectionState() undeclared device, type %08x, address: %s",
+ ALOGV("getDeviceConnectionState() undeclared device, type %08x, address: %s",
device, device_address);
return AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE;
}
@@ -454,36 +480,16 @@
std::vector<audio_format_t> *formats)
{
ALOGV("getHwOffloadEncodingFormatsSupportedForA2DP()");
- char *tok = NULL, *saveptr;
status_t status = NO_ERROR;
- char encoding_formats_list[PROPERTY_VALUE_MAX];
- audio_format_t format = AUDIO_FORMAT_DEFAULT;
- // FIXME This list should not come from a property but the supported encoded
- // formats of declared A2DP devices in primary module
- property_get("persist.bluetooth.a2dp_offload.cap", encoding_formats_list, "");
- tok = strtok_r(encoding_formats_list, "-", &saveptr);
- for (;tok != NULL; tok = strtok_r(NULL, "-", &saveptr)) {
- if (strcmp(tok, "sbc") == 0) {
- ALOGV("%s: SBC offload supported\n",__func__);
- format = AUDIO_FORMAT_SBC;
- } else if (strcmp(tok, "aptx") == 0) {
- ALOGV("%s: APTX offload supported\n",__func__);
- format = AUDIO_FORMAT_APTX;
- } else if (strcmp(tok, "aptxhd") == 0) {
- ALOGV("%s: APTX HD offload supported\n",__func__);
- format = AUDIO_FORMAT_APTX_HD;
- } else if (strcmp(tok, "ldac") == 0) {
- ALOGV("%s: LDAC offload supported\n",__func__);
- format = AUDIO_FORMAT_LDAC;
- } else if (strcmp(tok, "aac") == 0) {
- ALOGV("%s: AAC offload supported\n",__func__);
- format = AUDIO_FORMAT_AAC;
- } else {
- ALOGE("%s: undefined token - %s\n",__func__, tok);
- continue;
- }
- formats->push_back(format);
+ std::unordered_set<audio_format_t> formatSet;
+ sp<HwModule> primaryModule =
+ mHwModules.getModuleFromName(AUDIO_HARDWARE_MODULE_ID_PRIMARY);
+ DeviceVector declaredDevices = primaryModule->getDeclaredDevices().getDevicesFromTypeMask(
+ AUDIO_DEVICE_OUT_ALL_A2DP);
+ for (const auto& device : declaredDevices) {
+ formatSet.insert(device->encodedFormats().begin(), device->encodedFormats().end());
}
+ formats->assign(formatSet.begin(), formatSet.end());
return status;
}
@@ -963,7 +969,7 @@
}
if (usePrimaryOutputFromPolicyMixes) {
*output = policyDesc->mIoHandle;
- AudioMix *mix = policyDesc->mPolicyMix;
+ sp<AudioPolicyMix> mix = policyDesc->mPolicyMix.promote();
sp<DeviceDescriptor> deviceDesc =
mAvailableOutputDevices.getDevice(mix->mDeviceType,
mix->mDeviceAddress,
@@ -1017,7 +1023,8 @@
}
}
if (*output == AUDIO_IO_HANDLE_NONE) {
- *output = getOutputForDevices(outputDevices, session, *stream, config, flags);
+ *output = getOutputForDevices(outputDevices, session, *stream, config,
+ flags, attr->flags & AUDIO_FLAG_MUTE_HAPTIC);
}
if (*output == AUDIO_IO_HANDLE_NONE) {
return INVALID_OPERATION;
@@ -1078,7 +1085,7 @@
new TrackClientDescriptor(*portId, uid, session, resultAttr, clientConfig,
sanitizedRequestedPortId, *stream,
mEngine->getProductStrategyForAttributes(resultAttr),
- streamToVolumeSource(*stream),
+ toVolumeSource(resultAttr),
*flags, isRequestedDeviceForExclusiveUse,
std::move(weakSecondaryOutputDescs));
sp<SwAudioOutputDescriptor> outputDesc = mOutputs.valueFor(*output);
@@ -1095,11 +1102,16 @@
audio_session_t session,
audio_stream_type_t stream,
const audio_config_t *config,
- audio_output_flags_t *flags)
+ audio_output_flags_t *flags,
+ bool forceMutingHaptic)
{
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
status_t status;
+ // Discard haptic channel mask when forcing muting haptic channels.
+ audio_channel_mask_t channelMask = forceMutingHaptic
+ ? (config->channel_mask & ~AUDIO_CHANNEL_HAPTIC_ALL) : config->channel_mask;
+
// open a direct output if required by specified parameters
//force direct flag if offload flag is set: offloading implies a direct output stream
// and all common behaviors are driven by checking only the direct flag
@@ -1136,7 +1148,7 @@
// and not explicitly requested
if (((*flags & AUDIO_OUTPUT_FLAG_DIRECT) == 0) &&
audio_is_linear_pcm(config->format) && config->sample_rate <= SAMPLE_RATE_HZ_MAX &&
- audio_channel_count_from_out_mask(config->channel_mask) <= 2) {
+ audio_channel_count_from_out_mask(channelMask) <= 2) {
goto non_direct_output;
}
@@ -1152,7 +1164,7 @@
profile = getProfileForOutput(devices,
config->sample_rate,
config->format,
- config->channel_mask,
+ channelMask,
(audio_output_flags_t)*flags,
true /* directOnly */);
}
@@ -1166,7 +1178,7 @@
// and configured with same parameters
if ((config->sample_rate == desc->mSamplingRate) &&
(config->format == desc->mFormat) &&
- (config->channel_mask == desc->mChannelMask) &&
+ (channelMask == desc->mChannelMask) &&
(session == desc->mDirectClientSession)) {
desc->mDirectOpenCount++;
ALOGI("%s reusing direct output %d for session %d", __func__,
@@ -1208,11 +1220,11 @@
if (status != NO_ERROR ||
(config->sample_rate != 0 && config->sample_rate != outputDesc->mSamplingRate) ||
(config->format != AUDIO_FORMAT_DEFAULT && config->format != outputDesc->mFormat) ||
- (config->channel_mask != 0 && config->channel_mask != outputDesc->mChannelMask)) {
+ (channelMask != 0 && channelMask != outputDesc->mChannelMask)) {
ALOGV("%s failed opening direct output: output %d sample rate %d %d,"
"format %d %d, channel mask %04x %04x", __func__, output, config->sample_rate,
outputDesc->mSamplingRate, config->format, outputDesc->mFormat,
- config->channel_mask, outputDesc->mChannelMask);
+ channelMask, outputDesc->mChannelMask);
if (output != AUDIO_IO_HANDLE_NONE) {
outputDesc->close();
}
@@ -1253,12 +1265,11 @@
// at this stage we should ignore the DIRECT flag as no direct output could be found earlier
*flags = (audio_output_flags_t)(*flags & ~AUDIO_OUTPUT_FLAG_DIRECT);
- output = selectOutput(outputs, *flags, config->format,
- config->channel_mask, config->sample_rate);
+ output = selectOutput(outputs, *flags, config->format, channelMask, config->sample_rate);
}
ALOGW_IF((output == 0), "getOutputForDevices() could not find output for stream %d, "
"sampling rate %d, format %#x, channels %#x, flags %#x",
- stream, config->sample_rate, config->format, config->channel_mask, *flags);
+ stream, config->sample_rate, config->format, channelMask, *flags);
return output;
}
@@ -1592,10 +1603,9 @@
(outputDesc->getPatchHandle() == AUDIO_PATCH_HANDLE_NONE);
DeviceVector devices;
- AudioMix *policyMix = NULL;
+ sp<AudioPolicyMix> policyMix = outputDesc->mPolicyMix.promote();
const char *address = NULL;
- if (outputDesc->mPolicyMix != NULL) {
- policyMix = outputDesc->mPolicyMix;
+ if (policyMix != NULL) {
audio_devices_t newDeviceType;
address = policyMix->mDeviceAddress.string();
if ((policyMix->mRouteFlags & MIX_ROUTE_FLAG_LOOP_BACK) == MIX_ROUTE_FLAG_LOOP_BACK) {
@@ -1684,8 +1694,9 @@
setOutputDevices(outputDesc, devices, force, 0, NULL, requiresMuteCheck);
// apply volume rules for current stream and device if necessary
- checkAndSetVolume(stream,
- mVolumeCurves->getVolumeIndex(stream, outputDesc->devices().types()),
+ auto &curves = getVolumeCurves(client->attributes());
+ checkAndSetVolume(curves, client->volumeSource(),
+ curves.getVolumeIndex(outputDesc->devices().types()),
outputDesc,
outputDesc->devices().types());
@@ -1765,12 +1776,13 @@
if (outputDesc->getActivityCount(clientVolSrc) == 1) {
// Automatically disable the remote submix input when output is stopped on a
// re routing mix of type MIX_TYPE_RECORDERS
+ sp<AudioPolicyMix> policyMix = outputDesc->mPolicyMix.promote();
if (audio_is_remote_submix_device(outputDesc->devices().types()) &&
- outputDesc->mPolicyMix != NULL &&
- outputDesc->mPolicyMix->mMixType == MIX_TYPE_RECORDERS) {
+ policyMix != NULL &&
+ policyMix->mMixType == MIX_TYPE_RECORDERS) {
setDeviceConnectionStateInt(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
- outputDesc->mPolicyMix->mDeviceAddress,
+ policyMix->mDeviceAddress,
"remote-submix", AUDIO_FORMAT_DEFAULT);
}
}
@@ -1885,7 +1897,7 @@
status_t status = NO_ERROR;
audio_source_t halInputSource;
audio_attributes_t attributes = *attr;
- AudioMix *policyMix = NULL;
+ sp<AudioPolicyMix> policyMix;
sp<DeviceDescriptor> device;
sp<AudioInputDescriptor> inputDesc;
sp<RecordClientDescriptor> clientDesc;
@@ -1961,7 +1973,11 @@
if (status != NO_ERROR) {
goto error;
}
- *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
+ if (is_mix_loopback_render(policyMix->mRouteFlags)) {
+ *inputType = API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK;
+ } else {
+ *inputType = API_INPUT_MIX_EXT_POLICY_REROUTE;
+ }
device = mAvailableInputDevices.getDevice(AUDIO_DEVICE_IN_REMOTE_SUBMIX,
String8(attr->tags + strlen("addr=")),
AUDIO_FORMAT_DEFAULT);
@@ -1978,7 +1994,7 @@
status = BAD_VALUE;
goto error;
}
- if (policyMix != nullptr) {
+ if (policyMix) {
ALOG_ASSERT(policyMix->mMixType == MIX_TYPE_RECORDERS, "Invalid Mix Type");
// there is an external policy, but this input is attached to a mix of recorders,
// meaning it receives audio injected into the framework, so the recorder doesn't
@@ -2030,7 +2046,7 @@
const audio_attributes_t &attributes,
const audio_config_base_t *config,
audio_input_flags_t flags,
- AudioMix *policyMix)
+ const sp<AudioPolicyMix> &policyMix)
{
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
audio_source_t halInputSource = attributes.source;
@@ -2190,10 +2206,11 @@
setInputDevice(input, device, true /* force */);
if (inputDesc->activeCount() == 1) {
+ sp<AudioPolicyMix> policyMix = inputDesc->mPolicyMix.promote();
// if input maps to a dynamic policy with an activity listener, notify of state change
- if ((inputDesc->mPolicyMix != NULL)
- && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
- mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
+ if ((policyMix != NULL)
+ && ((policyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+ mpClientInterface->onDynamicPolicyMixStateUpdate(policyMix->mDeviceAddress,
MIX_STATE_MIXING);
}
@@ -2208,10 +2225,10 @@
// For remote submix (a virtual device), we open only one input per capture request.
if (audio_is_remote_submix_device(inputDesc->getDeviceType())) {
String8 address = String8("");
- if (inputDesc->mPolicyMix == NULL) {
+ if (policyMix == NULL) {
address = String8("0");
- } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
- address = inputDesc->mPolicyMix->mDeviceAddress;
+ } else if (policyMix->mMixType == MIX_TYPE_PLAYERS) {
+ address = policyMix->mDeviceAddress;
}
if (address != "") {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
@@ -2248,10 +2265,11 @@
if (inputDesc->isActive()) {
setInputDevice(input, getNewInputDevice(inputDesc), false /* force */);
} else {
+ sp<AudioPolicyMix> policyMix = inputDesc->mPolicyMix.promote();
// if input maps to a dynamic policy with an activity listener, notify of state change
- if ((inputDesc->mPolicyMix != NULL)
- && ((inputDesc->mPolicyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
- mpClientInterface->onDynamicPolicyMixStateUpdate(inputDesc->mPolicyMix->mDeviceAddress,
+ if ((policyMix != NULL)
+ && ((policyMix->mCbFlags & AudioMix::kCbFlagNotifyActivity) != 0)) {
+ mpClientInterface->onDynamicPolicyMixStateUpdate(policyMix->mDeviceAddress,
MIX_STATE_IDLE);
}
@@ -2259,10 +2277,10 @@
// used by a policy mix of type MIX_TYPE_RECORDERS
if (audio_is_remote_submix_device(inputDesc->getDeviceType())) {
String8 address = String8("");
- if (inputDesc->mPolicyMix == NULL) {
+ if (policyMix == NULL) {
address = String8("0");
- } else if (inputDesc->mPolicyMix->mMixType == MIX_TYPE_PLAYERS) {
- address = inputDesc->mPolicyMix->mDeviceAddress;
+ } else if (policyMix->mMixType == MIX_TYPE_PLAYERS) {
+ address = policyMix->mDeviceAddress;
}
if (address != "") {
setDeviceConnectionStateInt(AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
@@ -2348,23 +2366,21 @@
}
}
-void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream,
- int indexMin,
- int indexMax)
+void AudioPolicyManager::initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax)
{
ALOGV("initStreamVolume() stream %d, min %d, max %d", stream , indexMin, indexMax);
if (indexMin < 0 || indexMax < 0) {
ALOGE("%s for stream %d: invalid min %d or max %d", __func__, stream , indexMin, indexMax);
return;
}
- mVolumeCurves->initStreamVolume(stream, indexMin, indexMax);
+ getVolumeCurves(stream).initVolume(indexMin, indexMax);
// initialize other private stream volumes which follow this one
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
- mVolumeCurves->initStreamVolume((audio_stream_type_t)curStream, indexMin, indexMax);
+ getVolumeCurves((audio_stream_type_t)curStream).initVolume(indexMin, indexMax);
}
}
@@ -2372,33 +2388,58 @@
int index,
audio_devices_t device)
{
-
- // VOICE_CALL and BLUETOOTH_SCO stream have minVolumeIndex > 0 but
- // can be muted directly by an app that has MODIFY_PHONE_STATE permission.
- if (((index < mVolumeCurves->getVolumeIndexMin(stream)) &&
- !((stream == AUDIO_STREAM_VOICE_CALL || stream == AUDIO_STREAM_BLUETOOTH_SCO) &&
- index == 0)) ||
- (index > mVolumeCurves->getVolumeIndexMax(stream))) {
+ auto attributes = mEngine->getAttributesForStreamType(stream);
+ auto volumeGroup = mEngine->getVolumeGroupForStreamType(stream);
+ if (volumeGroup == VOLUME_GROUP_NONE) {
+ ALOGE("%s: no group matching with stream %s", __FUNCTION__, toString(stream).c_str());
return BAD_VALUE;
}
- if (!audio_is_output_device(device)) {
+ ALOGV("%s: stream %s attributes=%s", __func__,
+ toString(stream).c_str(), toString(attributes).c_str());
+ return setVolumeGroupIndex(getVolumeCurves(stream), volumeGroup, index, device, attributes);
+}
+
+status_t AudioPolicyManager::getStreamVolumeIndex(audio_stream_type_t stream,
+ int *index,
+ audio_devices_t device)
+{
+ // if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device selected for this
+ // stream by the engine.
+ if (device == AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
+ device = mEngine->getOutputDevicesForStream(stream, true /*fromCache*/).types();
+ }
+ return getVolumeIndex(getVolumeCurves(stream), *index, device);
+}
+
+status_t AudioPolicyManager::setVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int index,
+ audio_devices_t device)
+{
+ // Get Volume group matching the Audio Attributes
+ auto volumeGroup = mEngine->getVolumeGroupForAttributes(attr);
+ if (volumeGroup == VOLUME_GROUP_NONE) {
+ ALOGD("%s: could not find group matching with %s", __FUNCTION__, toString(attr).c_str());
return BAD_VALUE;
}
+ ALOGV("%s: group %d matching with %s", __FUNCTION__, volumeGroup, toString(attr).c_str());
+ return setVolumeGroupIndex(getVolumeCurves(attr), volumeGroup, index, device, attr);
+}
- // Force max volume if stream cannot be muted
- if (!mVolumeCurves->canBeMuted(stream)) index = mVolumeCurves->getVolumeIndexMax(stream);
+status_t AudioPolicyManager::setVolumeGroupIndex(IVolumeCurves &curves, volume_group_t group,
+ int index,
+ audio_devices_t device,
+ const audio_attributes_t attributes)
+{
+ ALOGVV("%s: group=%d", __func__, group);
+ status_t status = NO_ERROR;
+ VolumeSource vs = toVolumeSource(group);
+ product_strategy_t strategy = mEngine->getProductStrategyForAttributes(attributes);
- ALOGV("setStreamVolumeIndex() stream %d, device %08x, index %d",
- stream, device, index);
-
- // update other private stream volumes which follow this one
- for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
- if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
- continue;
- }
- mVolumeCurves->addCurrentVolumeIndex((audio_stream_type_t)curStream, device, index);
+ status = setVolumeCurveIndex(index, device, curves);
+ if (status != NO_ERROR) {
+ ALOGE("%s failed to set curve index for group %d device 0x%X", __func__, group, device);
+ return status;
}
-
// update volume on all outputs and streams matching the following:
// - The requested stream (or a stream matching for volume control) is active on the output
// - The device (or devices) selected by the engine for this stream includes
@@ -2407,70 +2448,169 @@
// requested device or one of the devices selected by the engine for this stream
// - For default requested device (AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME), apply volume only if
// no specific device volume value exists for currently selected device.
- status_t status = NO_ERROR;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
- audio_devices_t curDevice = desc->devices().types();
- for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
- if (!(streamsMatchForvolume(stream, (audio_stream_type_t)curStream))) {
+ audio_devices_t curDevice = Volume::getDeviceForVolume(desc->devices().types());
+
+ // Inter / intra volume group priority management: Loop on strategies arranged by priority
+ // If a higher priority strategy is active, and the output is routed to a device with a
+ // HW Gain management, do not change the volume
+ bool applyVolume = false;
+ if (desc->useHwGain()) {
+ if (!(desc->isActive(group) || isInCall())) {
continue;
}
- if (!(desc->isActive(streamToVolumeSource((audio_stream_type_t)curStream)) || isInCall())) {
+ for (const auto &productStrategy : mEngine->getOrderedProductStrategies()) {
+ auto activeClients = desc->clientsList(true /*activeOnly*/, productStrategy,
+ false /*preferredDevice*/);
+ if (activeClients.empty()) {
+ continue;
+ }
+ bool isPreempted = false;
+ bool isHigherPriority = productStrategy < strategy;
+ for (const auto &client : activeClients) {
+ if (isHigherPriority && (client->volumeSource() != vs)) {
+ ALOGV("%s: Strategy=%d (\nrequester:\n"
+ " group %d, volumeGroup=%d attributes=%s)\n"
+ " higher priority source active:\n"
+ " volumeGroup=%d attributes=%s) \n"
+ " on output %zu, bailing out", __func__, productStrategy,
+ group, group, toString(attributes).c_str(),
+ client->volumeSource(), toString(client->attributes()).c_str(), i);
+ applyVolume = false;
+ isPreempted = true;
+ break;
+ }
+ // However, continue for loop to ensure no higher prio clients running on output
+ if (client->volumeSource() == vs) {
+ applyVolume = true;
+ }
+ }
+ if (isPreempted || applyVolume) {
+ break;
+ }
+ }
+ if (!applyVolume) {
+ continue; // next output
+ }
+ status_t volStatus = checkAndSetVolume(curves, vs, index, desc, curDevice,
+ (vs == toVolumeSource(AUDIO_STREAM_SYSTEM)?
+ TOUCH_SOUND_FIXED_DELAY_MS : 0));
+ if (volStatus != NO_ERROR) {
+ status = volStatus;
+ }
+ continue;
+ }
+ for (auto curVolGroup : getVolumeGroups()) {
+ VolumeSource curVolSrc = toVolumeSource(curVolGroup);
+ if (!(curVolSrc == vs || isInCall())) {
continue;
}
- audio_devices_t curStreamDevice = Volume::getDeviceForVolume(
- mEngine->getOutputDevicesForStream((audio_stream_type_t)curStream,
- false /*fromCache*/).types());
- if ((device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) &&
- ((curStreamDevice & device) == 0)) {
+ if (!(desc->isActive(vs) || isInCall())) {
continue;
}
- bool applyVolume;
- if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
- curStreamDevice |= device;
- applyVolume = (Volume::getDeviceForVolume(curDevice) & curStreamDevice) != 0;
+ audio_devices_t curSrcDevice;
+ auto &curCurves = getVolumeCurves(curVolSrc);
+ auto curCurvAttrs = curCurves.getAttributes();
+ if (!curCurvAttrs.empty() && curCurvAttrs.front() != defaultAttr) {
+ auto attr = curCurvAttrs.front();
+ curSrcDevice = mEngine->getOutputDevicesForAttributes(attr, nullptr, false).types();
+ } else if (!curCurves.getStreamTypes().empty()) {
+ auto stream = curCurves.getStreamTypes().front();
+ curSrcDevice = mEngine->getOutputDevicesForStream(stream, false).types();
} else {
- applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
- stream, curStreamDevice);
+ ALOGE("%s: Invalid src %d: no valid attributes nor stream",__func__, curVolSrc);
+ continue;
}
- // rescale index before applying to curStream as ranges may be different for
- // stream and curStream
- int idx = rescaleVolumeIndex(index, stream, (audio_stream_type_t)curStream);
+ curSrcDevice = Volume::getDeviceForVolume(curSrcDevice);
+ if ((device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) && ((curDevice & device) == 0)) {
+ continue;
+ }
+ if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
+ curSrcDevice |= device;
+ applyVolume = (curDevice & curSrcDevice) != 0;
+ } else {
+ applyVolume = !curves.hasVolumeIndexForDevice(curSrcDevice);
+ }
if (applyVolume) {
//FIXME: workaround for truncated touch sounds
// delayed volume change for system stream to be removed when the problem is
// handled by system UI
- status_t volStatus =
- checkAndSetVolume((audio_stream_type_t)curStream, idx, desc, curDevice,
- (stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
+ status_t volStatus = checkAndSetVolume(
+ curCurves, curVolSrc, index, desc, curDevice,
+ ((vs == toVolumeSource(AUDIO_STREAM_SYSTEM))?
+ TOUCH_SOUND_FIXED_DELAY_MS : 0));
if (volStatus != NO_ERROR) {
status = volStatus;
}
}
}
}
+ mpClientInterface->onAudioVolumeGroupChanged(group, 0 /*flags*/);
return status;
}
-status_t AudioPolicyManager::getStreamVolumeIndex(audio_stream_type_t stream,
- int *index,
- audio_devices_t device)
+status_t AudioPolicyManager::setVolumeCurveIndex(int index,
+ audio_devices_t device,
+ IVolumeCurves &volumeCurves)
{
- if (index == NULL) {
+ // VOICE_CALL stream has minVolumeIndex > 0 but can be muted directly by an
+ // app that has MODIFY_PHONE_STATE permission.
+ bool hasVoice = hasVoiceStream(volumeCurves.getStreamTypes());
+ if (((index < volumeCurves.getVolumeIndexMin()) && !(hasVoice && index == 0)) ||
+ (index > volumeCurves.getVolumeIndexMax())) {
+ ALOGD("%s: wrong index %d min=%d max=%d", __FUNCTION__, index,
+ volumeCurves.getVolumeIndexMin(), volumeCurves.getVolumeIndexMax());
return BAD_VALUE;
}
if (!audio_is_output_device(device)) {
return BAD_VALUE;
}
+
+ // Force max volume if stream cannot be muted
+ if (!volumeCurves.canBeMuted()) index = volumeCurves.getVolumeIndexMax();
+
+ ALOGV("%s device %08x, index %d", __FUNCTION__ , device, index);
+ volumeCurves.addCurrentVolumeIndex(device, index);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManager::getVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index,
+ audio_devices_t device)
+{
// if device is AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, return volume for device selected for this
// stream by the engine.
if (device == AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
- device = mEngine->getOutputDevicesForStream(stream, true /*fromCache*/).types();
+ device = mEngine->getOutputDevicesForAttributes(attr, nullptr, true /*fromCache*/).types();
+ }
+ return getVolumeIndex(getVolumeCurves(attr), index, device);
+}
+
+status_t AudioPolicyManager::getVolumeIndex(const IVolumeCurves &curves,
+ int &index,
+ audio_devices_t device) const
+{
+ if (!audio_is_output_device(device)) {
+ return BAD_VALUE;
}
device = Volume::getDeviceForVolume(device);
+ index = curves.getVolumeIndex(device);
+ ALOGV("%s: device %08x index %d", __FUNCTION__, device, index);
+ return NO_ERROR;
+}
- *index = mVolumeCurves->getVolumeIndex(stream, device);
- ALOGV("getStreamVolumeIndex() stream %d device %08x index %d", stream, device, *index);
+status_t AudioPolicyManager::getMinVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index)
+{
+ index = getVolumeCurves(attr).getVolumeIndexMin();
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManager::getMaxVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index)
+{
+ index = getVolumeCurves(attr).getVolumeIndexMax();
return NO_ERROR;
}
@@ -2592,19 +2732,12 @@
bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
{
- bool active = false;
- for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT && !active; curStream++) {
- if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
- continue;
- }
- active = mOutputs.isActive(streamToVolumeSource((audio_stream_type_t)curStream), inPastMs);
- }
- return active;
+ return mOutputs.isActive(toVolumeSource(stream), inPastMs);
}
bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
{
- return mOutputs.isActiveRemotely(streamToVolumeSource((audio_stream_type_t)stream), inPastMs);
+ return mOutputs.isActiveRemotely(toVolumeSource(stream), inPastMs);
}
bool AudioPolicyManager::isSourceActive(audio_source_t source) const
@@ -2902,7 +3035,6 @@
mHwModulesAll.dump(dst);
mOutputs.dump(dst);
mInputs.dump(dst);
- mVolumeCurves->dump(dst);
mEffects.dump(dst);
mAudioPatches.dump(dst);
mPolicyMixes.dump(dst);
@@ -3634,11 +3766,11 @@
struct audio_patch dummyPatch = {};
sp<AudioPatch> patchDesc = new AudioPatch(&dummyPatch, uid);
- sp<SourceClientDescriptor> sourceDesc = new SourceClientDescriptor(
- *portId, uid, *attributes, patchDesc, srcDevice,
- mEngine->getStreamTypeForAttributes(*attributes),
- mEngine->getProductStrategyForAttributes(*attributes),
- streamToVolumeSource(mEngine->getStreamTypeForAttributes(*attributes)));
+ sp<SourceClientDescriptor> sourceDesc =
+ new SourceClientDescriptor(*portId, uid, *attributes, patchDesc, srcDevice,
+ mEngine->getStreamTypeForAttributes(*attributes),
+ mEngine->getProductStrategyForAttributes(*attributes),
+ toVolumeSource(*attributes));
status_t status = connectAudioSource(sourceDesc);
if (status == NO_ERROR) {
@@ -3803,7 +3935,7 @@
float AudioPolicyManager::getStreamVolumeDB(
audio_stream_type_t stream, int index, audio_devices_t device)
{
- return computeVolume(stream, index, device);
+ return computeVolume(getVolumeCurves(stream), toVolumeSource(stream), index, device);
}
status_t AudioPolicyManager::getSurroundFormats(unsigned int *numSurroundFormats,
@@ -4054,17 +4186,17 @@
status_t ret;
if (property_get_bool("ro.bluetooth.a2dp_offload.supported", false)) {
- if (property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+ if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.disabled", false) &&
+ property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+ // Both BluetoothAudio@2.0 and BluetoothA2dp@1.0 (Offlaod) are disabled, and uses
+ // the legacy hardware module for A2DP and hearing aid.
+ fileNames.push_back(AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME);
+ } else if (property_get_bool("persist.bluetooth.a2dp_offload.disabled", false)) {
+ // A2DP offload supported but disabled: try to use special XML file
fileNames.push_back(AUDIO_POLICY_A2DP_OFFLOAD_DISABLED_XML_CONFIG_FILE_NAME);
- } else if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.enabled", false)) {
- // This property persist.bluetooth.bluetooth_audio_hal.enabled is temporary only.
- // xml files AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME, although having
- // the same name, must be different in offload and non offload cases in device
- // specific configuration file.
- fileNames.push_back(AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME);
}
- } else if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.enabled", false)) {
- fileNames.push_back(AUDIO_POLICY_BLUETOOTH_HAL_ENABLED_XML_CONFIG_FILE_NAME);
+ } else if (property_get_bool("persist.bluetooth.bluetooth_audio_hal.disabled", false)) {
+ fileNames.push_back(AUDIO_POLICY_BLUETOOTH_LEGACY_HAL_XML_CONFIG_FILE_NAME);
}
fileNames.push_back(AUDIO_POLICY_XML_CONFIG_FILE_NAME);
@@ -4089,9 +4221,7 @@
mpClientInterface(clientInterface),
mLimitRingtoneVolume(false), mLastVoiceVolume(-1.0f),
mA2dpSuspended(false),
- mVolumeCurves(new VolumeCurvesCollection()),
- mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices,
- mDefaultOutputDevice, static_cast<VolumeCurvesCollection*>(mVolumeCurves.get())),
+ mConfig(mHwModulesAll, mAvailableOutputDevices, mAvailableInputDevices, mDefaultOutputDevice),
mAudioPortGeneration(1),
mBeaconMuteRefCount(0),
mBeaconPlayingRefCount(0),
@@ -4125,8 +4255,6 @@
}
status_t AudioPolicyManager::initialize() {
- mVolumeCurves->initializeVolumeCurves(getConfig().isSpeakerDrcEnabled());
-
// Once policy config has been parsed, retrieve an instance of the engine and initialize it.
audio_policy::EngineInstance *engineInstance = audio_policy::EngineInstance::getInstance();
if (!engineInstance) {
@@ -4473,7 +4601,7 @@
sp<AudioPolicyMix> policyMix;
if (mPolicyMixes.getAudioPolicyMix(address, policyMix) == NO_ERROR) {
policyMix->setOutput(desc);
- desc->mPolicyMix = policyMix->getMix();
+ desc->mPolicyMix = policyMix;
} else {
ALOGW("checkOutputsForDevice() cannot find policy for address %s",
address.string());
@@ -5035,7 +5163,7 @@
if ((hasVoiceStream(streams) &&
(isInCall() || mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc))) ||
- (hasStream(streams, AUDIO_STREAM_ALARM) &&
+ ((hasStream(streams, AUDIO_STREAM_ALARM) || hasStream(streams, AUDIO_STREAM_ENFORCED_AUDIBLE)) &&
mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc)) ||
outputDesc->isStrategyActive(productStrategy)) {
// Retrieval of devices for voice DL is done on primary output profile, cannot
@@ -5183,11 +5311,10 @@
// mute/unmute AUDIO_STREAM_TTS on all outputs
ALOGV("\t muting %d", mute);
uint32_t maxLatency = 0;
+ auto ttsVolumeSource = toVolumeSource(AUDIO_STREAM_TTS);
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
- setStreamMute(AUDIO_STREAM_TTS, mute/*on*/,
- desc,
- 0 /*delay*/, AUDIO_DEVICE_NONE);
+ setVolumeSourceMute(ttsVolumeSource, mute/*on*/, desc, 0 /*delay*/, AUDIO_DEVICE_NONE);
const uint32_t latency = desc->latency() * 2;
if (latency > maxLatency) {
maxLatency = latency;
@@ -5271,15 +5398,12 @@
if (muteWaitMs < tempMuteWaitMs) {
muteWaitMs = tempMuteWaitMs;
}
-
- for (const auto &productStrategy : productStrategies) {
- if (outputDesc->isStrategyActive(productStrategy)) {
- // make sure that we do not start the temporary mute period too early in case of
- // delayed device change
- setStrategyMute(productStrategy, true, outputDesc, delayMs);
- setStrategyMute(productStrategy, false, outputDesc, delayMs + tempMuteDurationMs,
+ for (const auto &activeVs : outputDesc->getActiveVolumeSources()) {
+ // make sure that we do not start the temporary mute period too early in case of
+ // delayed device change
+ setVolumeSourceMute(activeVs, true, outputDesc, delayMs);
+ setVolumeSourceMute(activeVs, false, outputDesc, delayMs + tempMuteDurationMs,
devices.types());
- }
}
}
@@ -5503,51 +5627,51 @@
return NULL;
}
-float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
+float AudioPolicyManager::computeVolume(IVolumeCurves &curves,
+ VolumeSource volumeSource,
int index,
audio_devices_t device)
{
- float volumeDB = mVolumeCurves->volIndexToDb(stream, Volume::getDeviceCategory(device), index);
+ float volumeDb = curves.volIndexToDb(Volume::getDeviceCategory(device), index);
// handle the case of accessibility active while a ringtone is playing: if the ringtone is much
// louder than the accessibility prompt, the prompt cannot be heard, thus masking the touch
// exploration of the dialer UI. In this situation, bring the accessibility volume closer to
// the ringtone volume
- if ((stream == AUDIO_STREAM_ACCESSIBILITY)
- && (AUDIO_MODE_RINGTONE == mEngine->getPhoneState())
- && isStreamActive(AUDIO_STREAM_RING, 0)) {
- const float ringVolumeDB = computeVolume(AUDIO_STREAM_RING, index, device);
- return ringVolumeDB - 4 > volumeDB ? ringVolumeDB - 4 : volumeDB;
+ const auto callVolumeSrc = toVolumeSource(AUDIO_STREAM_VOICE_CALL);
+ const auto ringVolumeSrc = toVolumeSource(AUDIO_STREAM_RING);
+ const auto musicVolumeSrc = toVolumeSource(AUDIO_STREAM_MUSIC);
+ const auto alarmVolumeSrc = toVolumeSource(AUDIO_STREAM_ALARM);
+
+ if (volumeSource == toVolumeSource(AUDIO_STREAM_ACCESSIBILITY)
+ && (AUDIO_MODE_RINGTONE == mEngine->getPhoneState()) &&
+ mOutputs.isActive(ringVolumeSrc, 0)) {
+ auto &ringCurves = getVolumeCurves(AUDIO_STREAM_RING);
+ const float ringVolumeDb = computeVolume(ringCurves, ringVolumeSrc, index, device);
+ return ringVolumeDb - 4 > volumeDb ? ringVolumeDb - 4 : volumeDb;
}
// in-call: always cap volume by voice volume + some low headroom
- if ((stream != AUDIO_STREAM_VOICE_CALL) &&
- (isInCall() || mOutputs.isActiveLocally(streamToVolumeSource(AUDIO_STREAM_VOICE_CALL)))) {
- switch (stream) {
- case AUDIO_STREAM_SYSTEM:
- case AUDIO_STREAM_RING:
- case AUDIO_STREAM_MUSIC:
- case AUDIO_STREAM_ALARM:
- case AUDIO_STREAM_NOTIFICATION:
- case AUDIO_STREAM_ENFORCED_AUDIBLE:
- case AUDIO_STREAM_DTMF:
- case AUDIO_STREAM_ACCESSIBILITY: {
- int voiceVolumeIndex =
- mVolumeCurves->getVolumeIndex(AUDIO_STREAM_VOICE_CALL, device);
- const float maxVoiceVolDb =
- computeVolume(AUDIO_STREAM_VOICE_CALL, voiceVolumeIndex, device)
+ if ((volumeSource != callVolumeSrc && (isInCall() ||
+ mOutputs.isActiveLocally(callVolumeSrc))) &&
+ (volumeSource == toVolumeSource(AUDIO_STREAM_SYSTEM) ||
+ volumeSource == ringVolumeSrc || volumeSource == musicVolumeSrc ||
+ volumeSource == alarmVolumeSrc ||
+ volumeSource == toVolumeSource(AUDIO_STREAM_NOTIFICATION) ||
+ volumeSource == toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE) ||
+ volumeSource == toVolumeSource(AUDIO_STREAM_DTMF) ||
+ volumeSource == toVolumeSource(AUDIO_STREAM_ACCESSIBILITY))) {
+ auto &voiceCurves = getVolumeCurves(callVolumeSrc);
+ int voiceVolumeIndex = voiceCurves.getVolumeIndex(device);
+ const float maxVoiceVolDb =
+ computeVolume(voiceCurves, callVolumeSrc, voiceVolumeIndex, device)
+ IN_CALL_EARPIECE_HEADROOM_DB;
- if (volumeDB > maxVoiceVolDb) {
- ALOGV("computeVolume() stream %d at vol=%f overriden by stream %d at vol=%f",
- stream, volumeDB, AUDIO_STREAM_VOICE_CALL, maxVoiceVolDb);
- volumeDB = maxVoiceVolDb;
- }
- } break;
- default:
- break;
+ if (volumeDb > maxVoiceVolDb) {
+ ALOGV("%s volume source %d at vol=%f overriden by volume group %d at vol=%f", __func__,
+ volumeSource, volumeDb, callVolumeSrc, maxVoiceVolDb);
+ volumeDb = maxVoiceVolDb;
}
}
-
// if a headset is connected, apply the following rules to ring tones and notifications
// to avoid sound level bursts in user's ears:
// - always attenuate notifications volume by 6dB
@@ -5555,57 +5679,54 @@
// speaker is part of the select devices
// - if music is playing, always limit the volume to current music volume,
// with a minimum threshold at -36dB so that notification is always perceived.
- if ((device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
- AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
- AUDIO_DEVICE_OUT_WIRED_HEADSET |
- AUDIO_DEVICE_OUT_WIRED_HEADPHONE |
- AUDIO_DEVICE_OUT_USB_HEADSET |
- AUDIO_DEVICE_OUT_HEARING_AID)) &&
- ((stream == AUDIO_STREAM_ALARM || stream == AUDIO_STREAM_RING)
- || (stream == AUDIO_STREAM_NOTIFICATION)
- || (stream == AUDIO_STREAM_SYSTEM)
- || ((stream == AUDIO_STREAM_ENFORCED_AUDIBLE) &&
- (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) ==
- AUDIO_POLICY_FORCE_NONE))) &&
- mVolumeCurves->canBeMuted(stream)) {
+ if ((device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP | AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
+ AUDIO_DEVICE_OUT_WIRED_HEADSET | AUDIO_DEVICE_OUT_WIRED_HEADPHONE |
+ AUDIO_DEVICE_OUT_USB_HEADSET | AUDIO_DEVICE_OUT_HEARING_AID)) &&
+ ((volumeSource == alarmVolumeSrc ||
+ volumeSource == ringVolumeSrc) ||
+ (volumeSource == toVolumeSource(AUDIO_STREAM_NOTIFICATION)) ||
+ (volumeSource == toVolumeSource(AUDIO_STREAM_SYSTEM)) ||
+ ((volumeSource == toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE)) &&
+ (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) &&
+ curves.canBeMuted()) {
+
// when the phone is ringing we must consider that music could have been paused just before
// by the music application and behave as if music was active if the last music track was
// just stopped
if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) ||
mLimitRingtoneVolume) {
- volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
+ volumeDb += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
audio_devices_t musicDevice =
mEngine->getOutputDevicesForAttributes(attributes_initializer(AUDIO_USAGE_MEDIA),
nullptr, true /*fromCache*/).types();
- float musicVolDB = computeVolume(AUDIO_STREAM_MUSIC,
- mVolumeCurves->getVolumeIndex(AUDIO_STREAM_MUSIC,
- musicDevice),
- musicDevice);
- float minVolDB = (musicVolDB > SONIFICATION_HEADSET_VOLUME_MIN_DB) ?
- musicVolDB : SONIFICATION_HEADSET_VOLUME_MIN_DB;
- if (volumeDB > minVolDB) {
- volumeDB = minVolDB;
- ALOGV("computeVolume limiting volume to %f musicVol %f", minVolDB, musicVolDB);
+ auto &musicCurves = getVolumeCurves(AUDIO_STREAM_MUSIC);
+ float musicVolDb = computeVolume(musicCurves, musicVolumeSrc,
+ musicCurves.getVolumeIndex(musicDevice), musicDevice);
+ float minVolDb = (musicVolDb > SONIFICATION_HEADSET_VOLUME_MIN_DB) ?
+ musicVolDb : SONIFICATION_HEADSET_VOLUME_MIN_DB;
+ if (volumeDb > minVolDb) {
+ volumeDb = minVolDb;
+ ALOGV("computeVolume limiting volume to %f musicVol %f", minVolDb, musicVolDb);
}
if (device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
- AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES)) {
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES)) {
// on A2DP, also ensure notification volume is not too low compared to media when
// intended to be played
- if ((volumeDB > -96.0f) &&
- (musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB > volumeDB)) {
- ALOGV("computeVolume increasing volume for stream=%d device=0x%X from %f to %f",
- stream, device,
- volumeDB, musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB);
- volumeDB = musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB;
+ if ((volumeDb > -96.0f) &&
+ (musicVolDb - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB > volumeDb)) {
+ ALOGV("%s increasing volume for volume source=%d device=0x%X from %f to %f",
+ __func__, volumeSource, device, volumeDb,
+ musicVolDb - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB);
+ volumeDb = musicVolDb - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB;
}
}
} else if ((Volume::getDeviceForVolume(device) != AUDIO_DEVICE_OUT_SPEAKER) ||
- (stream != AUDIO_STREAM_ALARM && stream != AUDIO_STREAM_RING)) {
- volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
+ (!(volumeSource == alarmVolumeSrc || volumeSource == ringVolumeSrc))) {
+ volumeDb += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
}
}
- return volumeDB;
+ return volumeDb;
}
int AudioPolicyManager::rescaleVolumeIndex(int srcIndex,
@@ -5615,10 +5736,12 @@
if (srcStream == dstStream) {
return srcIndex;
}
- float minSrc = (float)mVolumeCurves->getVolumeIndexMin(srcStream);
- float maxSrc = (float)mVolumeCurves->getVolumeIndexMax(srcStream);
- float minDst = (float)mVolumeCurves->getVolumeIndexMin(dstStream);
- float maxDst = (float)mVolumeCurves->getVolumeIndexMax(dstStream);
+ auto &srcCurves = getVolumeCurves(srcStream);
+ auto &dstCurves = getVolumeCurves(dstStream);
+ float minSrc = (float)srcCurves.getVolumeIndexMin();
+ float maxSrc = (float)srcCurves.getVolumeIndexMax();
+ float minDst = (float)dstCurves.getVolumeIndexMin();
+ float maxDst = (float)dstCurves.getVolumeIndexMax();
// preserve mute request or correct range
if (srcIndex < minSrc) {
@@ -5632,58 +5755,61 @@
return (int)(minDst + ((srcIndex - minSrc) * (maxDst - minDst)) / (maxSrc - minSrc));
}
-status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
- int index,
- const sp<AudioOutputDescriptor>& outputDesc,
- audio_devices_t device,
- int delayMs,
- bool force)
+status_t AudioPolicyManager::checkAndSetVolume(IVolumeCurves &curves,
+ VolumeSource volumeSource,
+ int index,
+ const sp<AudioOutputDescriptor>& outputDesc,
+ audio_devices_t device,
+ int delayMs,
+ bool force)
{
- // do not change actual stream volume if the stream is muted
- if (outputDesc->isMuted(streamToVolumeSource(stream))) {
- ALOGVV("%s() stream %d muted count %d", __func__, stream, outputDesc->getMuteCount(stream));
+ // do not change actual attributes volume if the attributes is muted
+ if (outputDesc->isMuted(volumeSource)) {
+ ALOGVV("%s: volume source %d muted count %d active=%d", __func__, volumeSource,
+ outputDesc->getMuteCount(volumeSource), outputDesc->isActive(volumeSource));
return NO_ERROR;
}
+ VolumeSource callVolSrc = toVolumeSource(AUDIO_STREAM_VOICE_CALL);
+ VolumeSource btScoVolSrc = toVolumeSource(AUDIO_STREAM_BLUETOOTH_SCO);
+ bool isVoiceVolSrc = callVolSrc == volumeSource;
+ bool isBtScoVolSrc = btScoVolSrc == volumeSource;
+
audio_policy_forced_cfg_t forceUseForComm =
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION);
// do not change in call volume if bluetooth is connected and vice versa
- if ((stream == AUDIO_STREAM_VOICE_CALL && forceUseForComm == AUDIO_POLICY_FORCE_BT_SCO) ||
- (stream == AUDIO_STREAM_BLUETOOTH_SCO && forceUseForComm != AUDIO_POLICY_FORCE_BT_SCO)) {
- ALOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm",
- stream, forceUseForComm);
+ // if sco and call follow same curves, bypass forceUseForComm
+ if ((callVolSrc != btScoVolSrc) &&
+ ((isVoiceVolSrc && forceUseForComm == AUDIO_POLICY_FORCE_BT_SCO) ||
+ (isBtScoVolSrc && forceUseForComm != AUDIO_POLICY_FORCE_BT_SCO))) {
+ ALOGV("%s cannot set volume group %d volume with force use = %d for comm", __func__,
+ volumeSource, forceUseForComm);
return INVALID_OPERATION;
}
-
if (device == AUDIO_DEVICE_NONE) {
device = outputDesc->devices().types();
}
- float volumeDb = computeVolume(stream, index, device);
+ float volumeDb = computeVolume(curves, volumeSource, index, device);
if (outputDesc->isFixedVolume(device) ||
// Force VoIP volume to max for bluetooth SCO
- ((stream == AUDIO_STREAM_VOICE_CALL || stream == AUDIO_STREAM_BLUETOOTH_SCO) &&
- (device & AUDIO_DEVICE_OUT_ALL_SCO) != 0)) {
+ ((isVoiceVolSrc || isBtScoVolSrc) && (device & AUDIO_DEVICE_OUT_ALL_SCO) != 0)) {
volumeDb = 0.0f;
}
+ outputDesc->setVolume(volumeDb, volumeSource, curves.getStreamTypes(), device, delayMs, force);
- outputDesc->setVolume(volumeDb, stream, device, delayMs, force);
-
- if (stream == AUDIO_STREAM_VOICE_CALL ||
- stream == AUDIO_STREAM_BLUETOOTH_SCO) {
+ if (isVoiceVolSrc || isBtScoVolSrc) {
float voiceVolume;
// Force voice volume to max for bluetooth SCO as volume is managed by the headset
- if (stream == AUDIO_STREAM_VOICE_CALL) {
- voiceVolume = (float)index/(float)mVolumeCurves->getVolumeIndexMax(stream);
+ if (isVoiceVolSrc) {
+ voiceVolume = (float)index/(float)curves.getVolumeIndexMax();
} else {
voiceVolume = 1.0;
}
-
if (voiceVolume != mLastVoiceVolume) {
mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
mLastVoiceVolume = voiceVolume;
}
}
-
return NO_ERROR;
}
@@ -5693,14 +5819,10 @@
bool force)
{
ALOGVV("applyStreamVolumes() for device %08x", device);
-
- for (int stream = 0; stream < AUDIO_STREAM_FOR_POLICY_CNT; stream++) {
- checkAndSetVolume((audio_stream_type_t)stream,
- mVolumeCurves->getVolumeIndex((audio_stream_type_t)stream, device),
- outputDesc,
- device,
- delayMs,
- force);
+ for (const auto &volumeGroup : mEngine->getVolumeGroups()) {
+ auto &curves = getVolumeCurves(toVolumeSource(volumeGroup));
+ checkAndSetVolume(curves, toVolumeSource(volumeGroup),
+ curves.getVolumeIndex(device), outputDesc, device, delayMs, force);
}
}
@@ -5710,44 +5832,55 @@
int delayMs,
audio_devices_t device)
{
- for (auto stream: mEngine->getStreamTypesForProductStrategy(strategy)) {
- ALOGVV("%s() stream %d, mute %d, output ID %d", __FUNCTION__, stream, on,
- outputDesc->getId());
- setStreamMute(stream, on, outputDesc, delayMs, device);
+ std::vector<VolumeSource> sourcesToMute;
+ for (auto attributes: mEngine->getAllAttributesForProductStrategy(strategy)) {
+ ALOGVV("%s() attributes %s, mute %d, output ID %d", __func__,
+ toString(attributes).c_str(), on, outputDesc->getId());
+ VolumeSource source = toVolumeSource(attributes);
+ if (std::find(begin(sourcesToMute), end(sourcesToMute), source) == end(sourcesToMute)) {
+ sourcesToMute.push_back(source);
+ }
}
+ for (auto source : sourcesToMute) {
+ setVolumeSourceMute(source, on, outputDesc, delayMs, device);
+ }
+
}
-void AudioPolicyManager::setStreamMute(audio_stream_type_t stream,
- bool on,
- const sp<AudioOutputDescriptor>& outputDesc,
- int delayMs,
- audio_devices_t device)
+void AudioPolicyManager::setVolumeSourceMute(VolumeSource volumeSource,
+ bool on,
+ const sp<AudioOutputDescriptor>& outputDesc,
+ int delayMs,
+ audio_devices_t device,
+ bool activeOnly)
{
+ if (activeOnly && !outputDesc->isActive(volumeSource)) {
+ return;
+ }
if (device == AUDIO_DEVICE_NONE) {
device = outputDesc->devices().types();
}
-
- ALOGVV("setStreamMute() stream %d, mute %d, mMuteCount %d device %04x",
- stream, on, outputDesc->getMuteCount(stream), device);
-
+ auto &curves = getVolumeCurves(volumeSource);
if (on) {
- if (!outputDesc->isMuted(streamToVolumeSource(stream))) {
- if (mVolumeCurves->canBeMuted(stream) &&
- ((stream != AUDIO_STREAM_ENFORCED_AUDIBLE) ||
- (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) {
- checkAndSetVolume(stream, 0, outputDesc, device, delayMs);
+ if (!outputDesc->isMuted(volumeSource)) {
+ if (curves.canBeMuted() &&
+ (volumeSource != toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE) ||
+ (mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) ==
+ AUDIO_POLICY_FORCE_NONE))) {
+ checkAndSetVolume(curves, volumeSource, 0, outputDesc, device, delayMs);
}
}
- // increment mMuteCount after calling checkAndSetVolume() so that volume change is not ignored
- outputDesc->incMuteCount(streamToVolumeSource(stream));
+ // increment mMuteCount after calling checkAndSetVolume() so that volume change is not
+ // ignored
+ outputDesc->incMuteCount(volumeSource);
} else {
- if (!outputDesc->isMuted(streamToVolumeSource(stream))) {
- ALOGV("setStreamMute() unmuting non muted stream!");
+ if (!outputDesc->isMuted(volumeSource)) {
+ ALOGV("%s unmuting non muted attributes!", __func__);
return;
}
- if (outputDesc->decMuteCount(streamToVolumeSource(stream)) == 0) {
- checkAndSetVolume(stream,
- mVolumeCurves->getVolumeIndex(stream, device),
+ if (outputDesc->decMuteCount(volumeSource) == 0) {
+ checkAndSetVolume(curves, volumeSource,
+ curves.getVolumeIndex(device),
outputDesc,
device,
delayMs);
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index 70ad6ac..1fc61e5 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -49,7 +49,7 @@
#include <AudioPolicyMix.h>
#include <EffectDescriptor.h>
#include <SoundTriggerSession.h>
-#include <VolumeCurve.h>
+#include "TypeConverter.h"
namespace android {
@@ -143,9 +143,17 @@
virtual status_t stopInput(audio_port_handle_t portId);
virtual void releaseInput(audio_port_handle_t portId);
virtual void closeAllInputs();
- virtual void initStreamVolume(audio_stream_type_t stream,
- int indexMin,
- int indexMax);
+ /**
+ * @brief initStreamVolume: even if the engine volume files provides min and max, keep this
+ * api for compatibility reason.
+ * AudioServer will get the min and max and may overwrite them if:
+ * -using property (highest priority)
+ * -not defined (-1 by convention), case when still using apm volume tables XML files
+ * @param stream to be considered
+ * @param indexMin to set
+ * @param indexMax to set
+ */
+ virtual void initStreamVolume(audio_stream_type_t stream, int indexMin, int indexMax);
virtual status_t setStreamVolumeIndex(audio_stream_type_t stream,
int index,
audio_devices_t device);
@@ -153,6 +161,26 @@
int *index,
audio_devices_t device);
+ virtual status_t setVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int index,
+ audio_devices_t device);
+ virtual status_t getVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index,
+ audio_devices_t device);
+ virtual status_t getMaxVolumeIndexForAttributes(const audio_attributes_t &attr, int &index);
+
+ virtual status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr, int &index);
+
+ status_t setVolumeGroupIndex(IVolumeCurves &volumeCurves, volume_group_t group, int index,
+ audio_devices_t device, const audio_attributes_t attributes);
+
+ status_t setVolumeCurveIndex(int index,
+ audio_devices_t device,
+ IVolumeCurves &volumeCurves);
+
+ status_t getVolumeIndex(const IVolumeCurves &curves, int &index,
+ audio_devices_t device) const;
+
// return the strategy corresponding to a given stream type
virtual uint32_t getStrategyForStream(audio_stream_type_t stream)
{
@@ -260,9 +288,23 @@
return mEngine->listAudioProductStrategies(strategies);
}
- virtual product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa)
+ virtual status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy)
{
- return mEngine->getProductStrategyForAttributes(aa.getAttributes());
+ productStrategy = mEngine->getProductStrategyForAttributes(aa.getAttributes());
+ return productStrategy != PRODUCT_STRATEGY_NONE ? NO_ERROR : BAD_VALUE;
+ }
+
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups)
+ {
+ return mEngine->listAudioVolumeGroups(groups);
+ }
+
+ virtual status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup)
+ {
+ volumeGroup = mEngine->getVolumeGroupForAttributes(aa.getAttributes());
+ return volumeGroup != VOLUME_GROUP_NONE ? NO_ERROR : BAD_VALUE;
}
protected:
@@ -310,12 +352,48 @@
{
return mAvailableInputDevices;
}
- virtual IVolumeCurvesCollection &getVolumeCurves() { return *mVolumeCurves; }
virtual const sp<DeviceDescriptor> &getDefaultOutputDevice() const
{
return mDefaultOutputDevice;
}
+ std::vector<volume_group_t> getVolumeGroups() const
+ {
+ return mEngine->getVolumeGroups();
+ }
+
+ VolumeSource toVolumeSource(volume_group_t volumeGroup) const
+ {
+ return static_cast<VolumeSource>(volumeGroup);
+ }
+ VolumeSource toVolumeSource(const audio_attributes_t &attributes) const
+ {
+ return toVolumeSource(mEngine->getVolumeGroupForAttributes(attributes));
+ }
+ VolumeSource toVolumeSource(audio_stream_type_t stream) const
+ {
+ return toVolumeSource(mEngine->getVolumeGroupForStreamType(stream));
+ }
+ IVolumeCurves &getVolumeCurves(VolumeSource volumeSource)
+ {
+ auto *curves = mEngine->getVolumeCurvesForVolumeGroup(
+ static_cast<volume_group_t>(volumeSource));
+ ALOG_ASSERT(curves != nullptr, "No curves for volume source %d", volumeSource);
+ return *curves;
+ }
+ IVolumeCurves &getVolumeCurves(const audio_attributes_t &attr)
+ {
+ auto *curves = mEngine->getVolumeCurvesForAttributes(attr);
+ ALOG_ASSERT(curves != nullptr, "No curves for attributes %s", toString(attr).c_str());
+ return *curves;
+ }
+ IVolumeCurves &getVolumeCurves(audio_stream_type_t stream)
+ {
+ auto *curves = mEngine->getVolumeCurvesForStreamType(stream);
+ ALOG_ASSERT(curves != nullptr, "No curves for stream %s", toString(stream).c_str());
+ return *curves;
+ }
+
void addOutput(audio_io_handle_t output, const sp<SwAudioOutputDescriptor>& outputDesc);
void removeOutput(audio_io_handle_t output);
void addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc);
@@ -340,7 +418,8 @@
// compute the actual volume for a given stream according to the requested index and a particular
// device
- virtual float computeVolume(audio_stream_type_t stream,
+ virtual float computeVolume(IVolumeCurves &curves,
+ VolumeSource volumeSource,
int index,
audio_devices_t device);
@@ -349,7 +428,8 @@
audio_stream_type_t srcStream,
audio_stream_type_t dstStream);
// check that volume change is permitted, compute and send new volume to audio hardware
- virtual status_t checkAndSetVolume(audio_stream_type_t stream, int index,
+ virtual status_t checkAndSetVolume(IVolumeCurves &curves,
+ VolumeSource volumeSource, int index,
const sp<AudioOutputDescriptor>& outputDesc,
audio_devices_t device,
int delayMs = 0, bool force = false);
@@ -373,12 +453,22 @@
int delayMs = 0,
audio_devices_t device = AUDIO_DEVICE_NONE);
- // Mute or unmute the stream on the specified output
- void setStreamMute(audio_stream_type_t stream,
- bool on,
- const sp<AudioOutputDescriptor>& outputDesc,
- int delayMs = 0,
- audio_devices_t device = (audio_devices_t)0);
+ /**
+ * @brief setVolumeSourceMute Mute or unmute the volume source on the specified output
+ * @param volumeSource to be muted/unmute (may host legacy streams or by extension set of
+ * audio attributes)
+ * @param on true to mute, false to umute
+ * @param outputDesc on which the client following the volume group shall be muted/umuted
+ * @param delayMs
+ * @param device
+ * @param activeOnly if true, mute only if the volume group is active on the output.
+ */
+ void setVolumeSourceMute(VolumeSource volumeSource,
+ bool on,
+ const sp<AudioOutputDescriptor>& outputDesc,
+ int delayMs = 0,
+ audio_devices_t device = AUDIO_DEVICE_NONE,
+ bool activeOnly = false);
audio_mode_t getPhoneState();
@@ -624,12 +714,12 @@
float mLastVoiceVolume; // last voice volume value sent to audio HAL
bool mA2dpSuspended; // true if A2DP output is suspended
- std::unique_ptr<IVolumeCurvesCollection> mVolumeCurves; // Volume Curves per use case and device category
EffectDescriptorCollection mEffects; // list of registered audio effects
sp<DeviceDescriptor> mDefaultOutputDevice; // output device selected by default at boot time
HwModuleCollection mHwModules; // contains only modules that have been loaded successfully
HwModuleCollection mHwModulesAll; // normally not needed, used during construction and for
// dumps
+
AudioPolicyConfig mConfig;
std::atomic<uint32_t> mAudioPortGeneration;
@@ -724,7 +814,8 @@
audio_session_t session,
audio_stream_type_t stream,
const audio_config_t *config,
- audio_output_flags_t *flags);
+ audio_output_flags_t *flags,
+ bool forceMutingHaptic = false);
/**
* @brief getInputForDevice selects an input handle for a given input device and
@@ -743,7 +834,7 @@
const audio_attributes_t &attributes,
const audio_config_base_t *config,
audio_input_flags_t flags,
- AudioMix *policyMix);
+ const sp<AudioPolicyMix> &policyMix);
// event is one of STARTING_OUTPUT, STARTING_BEACON, STOPPING_OUTPUT, STOPPING_BEACON
// returns 0 if no mute/unmute event happened, the largest latency of the device where
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index d826192..5748334 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -222,6 +222,12 @@
clientConfig, clientEffects, deviceConfig, effects, patchHandle, source);
}
+void AudioPolicyService::AudioPolicyClient::onAudioVolumeGroupChanged(volume_group_t group,
+ int flags)
+{
+ mAudioPolicyService->onAudioVolumeGroupChanged(group, flags);
+}
+
audio_unique_id_t AudioPolicyService::AudioPolicyClient::newAudioUniqueId(audio_unique_id_use_t use)
{
return AudioSystem::newAudioUniqueId(use);
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 8ddf824..a672521 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -20,7 +20,7 @@
#include "AudioPolicyService.h"
#include "TypeConverter.h"
#include <media/MediaAnalyticsItem.h>
-#include <mediautils/ServiceUtilities.h>
+#include <media/AudioPolicy.h>
#include <utils/Log.h>
namespace android {
@@ -166,7 +166,7 @@
return mAudioPolicyManager->getOutput(stream);
}
-status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *attr,
+status_t AudioPolicyService::getOutputForAttr(const audio_attributes_t *originalAttr,
audio_io_handle_t *output,
audio_session_t session,
audio_stream_type_t *stream,
@@ -190,9 +190,13 @@
"%s uid %d tried to pass itself off as %d", __FUNCTION__, callingUid, uid);
uid = callingUid;
}
+ audio_attributes_t attr = *originalAttr;
+ if (!mPackageManager.allowPlaybackCapture(uid)) {
+ attr.flags |= AUDIO_FLAG_NO_CAPTURE;
+ }
audio_output_flags_t originalFlags = flags;
AutoCallerClear acc;
- status_t result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid,
+ status_t result = mAudioPolicyManager->getOutputForAttr(&attr, output, session, stream, uid,
config,
&flags, selectedDeviceId, portId,
secondaryOutputs);
@@ -208,14 +212,14 @@
*selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
*portId = AUDIO_PORT_HANDLE_NONE;
secondaryOutputs->clear();
- result = mAudioPolicyManager->getOutputForAttr(attr, output, session, stream, uid, config,
+ result = mAudioPolicyManager->getOutputForAttr(&attr, output, session, stream, uid, config,
&flags, selectedDeviceId, portId,
secondaryOutputs);
}
if (result == NO_ERROR) {
sp <AudioPlaybackClient> client =
- new AudioPlaybackClient(*attr, *output, uid, pid, session, *selectedDeviceId, *stream);
+ new AudioPlaybackClient(attr, *output, uid, pid, session, *selectedDeviceId, *stream);
mAudioPlaybackClients.add(*portId, client);
}
return result;
@@ -403,6 +407,9 @@
if (status == NO_ERROR) {
// enforce permission (if any) required for each type of input
switch (inputType) {
+ case AudioPolicyInterface::API_INPUT_MIX_PUBLIC_CAPTURE_PLAYBACK:
+ // this use case has been validated in audio service with a MediaProjection token,
+ // and doesn't rely on regular permissions
case AudioPolicyInterface::API_INPUT_LEGACY:
break;
case AudioPolicyInterface::API_INPUT_TELEPHONY_RX:
@@ -449,28 +456,14 @@
return NO_ERROR;
}
-// this is replicated from frameworks/av/media/libaudioclient/AudioRecord.cpp
-// XXX -- figure out how to put it into a common, shared location
-
-static std::string audioSourceString(audio_source_t value) {
- std::string source;
- if (SourceTypeConverter::toString(value, source)) {
- return source;
- }
- char rawbuffer[16]; // room for "%d"
- snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
- return rawbuffer;
-}
-
std::string AudioPolicyService::getDeviceTypeStrForPortId(audio_port_handle_t portId) {
- std::string typeStr;
struct audio_port port = {};
port.id = portId;
status_t status = mAudioPolicyManager->getAudioPort(&port);
if (status == NO_ERROR && port.type == AUDIO_PORT_TYPE_DEVICE) {
- deviceToString(port.ext.device.type, typeStr);
+ return toString(port.ext.device.type);
}
- return typeStr;
+ return {};
}
status_t AudioPolicyService::startInput(audio_port_handle_t portId)
@@ -533,7 +526,7 @@
item->setInt32(kAudioPolicyStatus, status);
item->setCString(kAudioPolicyRqstSrc,
- audioSourceString(client->attributes.source).c_str());
+ toString(client->attributes.source).c_str());
item->setInt32(kAudioPolicyRqstSession, client->session);
if (client->opPackageName.size() != 0) {
item->setCString(kAudioPolicyRqstPkg,
@@ -553,7 +546,7 @@
if (other->active) {
// keeps the last of the clients marked active
item->setCString(kAudioPolicyActiveSrc,
- audioSourceString(other->attributes.source).c_str());
+ toString(other->attributes.source).c_str());
item->setInt32(kAudioPolicyActiveSession, other->session);
if (other->opPackageName.size() != 0) {
item->setCString(kAudioPolicyActivePkg,
@@ -702,6 +695,53 @@
device);
}
+status_t AudioPolicyService::setVolumeIndexForAttributes(const audio_attributes_t &attributes,
+ int index, audio_devices_t device)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ if (!settingsAllowed()) {
+ return PERMISSION_DENIED;
+ }
+ Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
+ return mAudioPolicyManager->setVolumeIndexForAttributes(attributes, index, device);
+}
+
+status_t AudioPolicyService::getVolumeIndexForAttributes(const audio_attributes_t &attributes,
+ int &index, audio_devices_t device)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
+ return mAudioPolicyManager->getVolumeIndexForAttributes(attributes, index, device);
+}
+
+status_t AudioPolicyService::getMinVolumeIndexForAttributes(const audio_attributes_t &attributes,
+ int &index)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
+ return mAudioPolicyManager->getMinVolumeIndexForAttributes(attributes, index);
+}
+
+status_t AudioPolicyService::getMaxVolumeIndexForAttributes(const audio_attributes_t &attributes,
+ int &index)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ AutoCallerClear acc;
+ return mAudioPolicyManager->getMaxVolumeIndexForAttributes(attributes, index);
+}
+
uint32_t AudioPolicyService::getStrategyForStream(audio_stream_type_t stream)
{
if (uint32_t(stream) >= AUDIO_STREAM_PUBLIC_CNT) {
@@ -1032,9 +1072,14 @@
status_t AudioPolicyService::registerPolicyMixes(const Vector<AudioMix>& mixes, bool registration)
{
Mutex::Autolock _l(mLock);
- if(!modifyAudioRoutingAllowed()) {
+
+ // loopback|render only need a MediaProjection (checked in caller AudioService.java)
+ bool needModifyAudioRouting = std::any_of(mixes.begin(), mixes.end(), [](auto& mix) {
+ return !is_mix_loopback_render(mix.mRouteFlags); });
+ if (needModifyAudioRouting && !modifyAudioRoutingAllowed()) {
return PERMISSION_DENIED;
}
+
if (mAudioPolicyManager == NULL) {
return NO_INIT;
}
@@ -1198,13 +1243,32 @@
return mAudioPolicyManager->listAudioProductStrategies(strategies);
}
-product_strategy_t AudioPolicyService::getProductStrategyFromAudioAttributes(
- const AudioAttributes &aa)
+status_t AudioPolicyService::getProductStrategyFromAudioAttributes(
+ const AudioAttributes &aa, product_strategy_t &productStrategy)
{
if (mAudioPolicyManager == NULL) {
- return PRODUCT_STRATEGY_NONE;
+ return NO_INIT;
}
Mutex::Autolock _l(mLock);
- return mAudioPolicyManager->getProductStrategyFromAudioAttributes(aa);
+ return mAudioPolicyManager->getProductStrategyFromAudioAttributes(aa, productStrategy);
+}
+
+status_t AudioPolicyService::listAudioVolumeGroups(AudioVolumeGroupVector &groups)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->listAudioVolumeGroups(groups);
+}
+
+status_t AudioPolicyService::getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup)
+{
+ if (mAudioPolicyManager == NULL) {
+ return NO_INIT;
+ }
+ Mutex::Autolock _l(mLock);
+ return mAudioPolicyManager->getVolumeGroupFromAudioAttributes(aa, volumeGroup);
}
} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 76ac191..8cbf3af 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -150,6 +150,20 @@
mNotificationClients.valueFor(token)->setAudioPortCallbacksEnabled(enabled);
}
+void AudioPolicyService::setAudioVolumeGroupCallbacksEnabled(bool enabled)
+{
+ Mutex::Autolock _l(mNotificationClientsLock);
+
+ uid_t uid = IPCThreadState::self()->getCallingUid();
+ pid_t pid = IPCThreadState::self()->getCallingPid();
+ int64_t token = ((int64_t)uid<<32) | pid;
+
+ if (mNotificationClients.indexOfKey(token) < 0) {
+ return;
+ }
+ mNotificationClients.valueFor(token)->setAudioVolumeGroupCallbacksEnabled(enabled);
+}
+
// removeNotificationClient() is called when the client process dies.
void AudioPolicyService::removeNotificationClient(uid_t uid, pid_t pid)
{
@@ -200,6 +214,19 @@
}
}
+void AudioPolicyService::onAudioVolumeGroupChanged(volume_group_t group, int flags)
+{
+ mOutputCommandThread->changeAudioVolumeGroupCommand(group, flags);
+}
+
+void AudioPolicyService::doOnAudioVolumeGroupChanged(volume_group_t group, int flags)
+{
+ Mutex::Autolock _l(mNotificationClientsLock);
+ for (size_t i = 0; i < mNotificationClients.size(); i++) {
+ mNotificationClients.valueAt(i)->onAudioVolumeGroupChanged(group, flags);
+ }
+}
+
void AudioPolicyService::onDynamicPolicyMixStateUpdate(const String8& regId, int32_t state)
{
ALOGV("AudioPolicyService::onDynamicPolicyMixStateUpdate(%s, %d)",
@@ -270,7 +297,7 @@
uid_t uid,
pid_t pid)
: mService(service), mUid(uid), mPid(pid), mAudioPolicyServiceClient(client),
- mAudioPortCallbacksEnabled(false)
+ mAudioPortCallbacksEnabled(false), mAudioVolumeGroupCallbacksEnabled(false)
{
}
@@ -301,6 +328,15 @@
}
}
+void AudioPolicyService::NotificationClient::onAudioVolumeGroupChanged(volume_group_t group,
+ int flags)
+{
+ if (mAudioPolicyServiceClient != 0 && mAudioVolumeGroupCallbacksEnabled) {
+ mAudioPolicyServiceClient->onAudioVolumeGroupChanged(group, flags);
+ }
+}
+
+
void AudioPolicyService::NotificationClient::onDynamicPolicyMixStateUpdate(
const String8& regId, int32_t state)
{
@@ -330,6 +366,10 @@
mAudioPortCallbacksEnabled = enabled;
}
+void AudioPolicyService::NotificationClient::setAudioVolumeGroupCallbacksEnabled(bool enabled)
+{
+ mAudioVolumeGroupCallbacksEnabled = enabled;
+}
void AudioPolicyService::binderDied(const wp<IBinder>& who) {
ALOGW("binderDied() %p, calling pid %d", who.unsafe_get(),
@@ -410,7 +450,7 @@
for (size_t i =0; i < mAudioRecordClients.size(); i++) {
sp<AudioRecordClient> current = mAudioRecordClients[i];
if (!current->active) continue;
- if (isPrivacySensitive(current->attributes.source)) {
+ if (isPrivacySensitiveSource(current->attributes.source)) {
if (current->startTimeNs > latestSensitiveStartNs) {
latestSensitiveActive = current;
latestSensitiveStartNs = current->startTimeNs;
@@ -449,7 +489,10 @@
bool isLatest = current == latestActive;
bool isLatestSensitive = current == latestSensitiveActive;
bool forceIdle = true;
- if (mUidPolicy->isAssistantUid(current->uid)) {
+
+ if (isVirtualSource(source)) {
+ forceIdle = false;
+ } else if (mUidPolicy->isAssistantUid(current->uid)) {
if (isA11yOnTop) {
if (source == AUDIO_SOURCE_HOTWORD || source == AUDIO_SOURCE_VOICE_RECOGNITION) {
forceIdle = false;
@@ -465,10 +508,6 @@
(source == AUDIO_SOURCE_VOICE_RECOGNITION || source == AUDIO_SOURCE_HOTWORD)) {
forceIdle = false;
}
- } else if (source == AUDIO_SOURCE_VOICE_DOWNLINK ||
- source == AUDIO_SOURCE_VOICE_CALL ||
- (source == AUDIO_SOURCE_VOICE_UPLINK)) {
- forceIdle = false;
} else {
if (!isAssistantOnTop && (isOnTop || isLatest) &&
(!isSensitiveActive || isLatestSensitive)) {
@@ -502,14 +541,27 @@
}
/* static */
-bool AudioPolicyService::isPrivacySensitive(audio_source_t source)
+bool AudioPolicyService::isPrivacySensitiveSource(audio_source_t source)
+{
+ switch (source) {
+ case AUDIO_SOURCE_CAMCORDER:
+ case AUDIO_SOURCE_VOICE_COMMUNICATION:
+ return true;
+ default:
+ break;
+ }
+ return false;
+}
+
+/* static */
+bool AudioPolicyService::isVirtualSource(audio_source_t source)
{
switch (source) {
case AUDIO_SOURCE_VOICE_UPLINK:
case AUDIO_SOURCE_VOICE_DOWNLINK:
case AUDIO_SOURCE_VOICE_CALL:
- case AUDIO_SOURCE_CAMCORDER:
- case AUDIO_SOURCE_VOICE_COMMUNICATION:
+ case AUDIO_SOURCE_REMOTE_SUBMIX:
+ case AUDIO_SOURCE_FM_TUNER:
return true;
default:
break;
@@ -551,6 +603,8 @@
mAudioPolicyManager->dump(fd);
}
+ mPackageManager.dump(fd);
+
if (locked) mLock.unlock();
}
return NO_ERROR;
@@ -1058,6 +1112,18 @@
svc->doOnAudioPatchListUpdate();
mLock.lock();
}break;
+ case CHANGED_AUDIOVOLUMEGROUP: {
+ AudioVolumeGroupData *data =
+ static_cast<AudioVolumeGroupData *>(command->mParam.get());
+ ALOGV("AudioCommandThread() processing update audio volume group");
+ svc = mService.promote();
+ if (svc == 0) {
+ break;
+ }
+ mLock.unlock();
+ svc->doOnAudioVolumeGroupChanged(data->mGroup, data->mFlags);
+ mLock.lock();
+ }break;
case SET_AUDIOPORT_CONFIG: {
SetAudioPortConfigData *data = (SetAudioPortConfigData *)command->mParam.get();
ALOGV("AudioCommandThread() processing set port config");
@@ -1302,6 +1368,19 @@
sendCommand(command);
}
+void AudioPolicyService::AudioCommandThread::changeAudioVolumeGroupCommand(volume_group_t group,
+ int flags)
+{
+ sp<AudioCommand>command = new AudioCommand();
+ command->mCommand = CHANGED_AUDIOVOLUMEGROUP;
+ AudioVolumeGroupData *data= new AudioVolumeGroupData();
+ data->mGroup = group;
+ data->mFlags = flags;
+ command->mParam = data;
+ ALOGV("AudioCommandThread() adding audio volume group changed");
+ sendCommand(command);
+}
+
status_t AudioPolicyService::AudioCommandThread::setAudioPortConfigCommand(
const struct audio_port_config *config, int delayMs)
{
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 8cd6e81..a2e75cd 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -31,6 +31,7 @@
#include <media/ToneGenerator.h>
#include <media/AudioEffect.h>
#include <media/AudioPolicy.h>
+#include <mediautils/ServiceUtilities.h>
#include "AudioPolicyEffects.h"
#include "managerdefault/AudioPolicyManager.h"
#include <android/hardware/BnSensorPrivacyListener.h>
@@ -111,6 +112,17 @@
int *index,
audio_devices_t device);
+ virtual status_t setVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int index,
+ audio_devices_t device);
+ virtual status_t getVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index,
+ audio_devices_t device);
+ virtual status_t getMinVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index);
+ virtual status_t getMaxVolumeIndexForAttributes(const audio_attributes_t &attr,
+ int &index);
+
virtual uint32_t getStrategyForStream(audio_stream_type_t stream);
virtual audio_devices_t getDevicesForStream(audio_stream_type_t stream);
@@ -192,6 +204,8 @@
virtual void setAudioPortCallbacksEnabled(bool enabled);
+ virtual void setAudioVolumeGroupCallbacksEnabled(bool enabled);
+
virtual status_t acquireSoundTriggerSession(audio_session_t *session,
audio_io_handle_t *ioHandle,
audio_devices_t *device);
@@ -231,7 +245,13 @@
virtual bool isHapticPlaybackSupported();
virtual status_t listAudioProductStrategies(AudioProductStrategyVector &strategies);
- virtual product_strategy_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa);
+ virtual status_t getProductStrategyFromAudioAttributes(const AudioAttributes &aa,
+ product_strategy_t &productStrategy);
+
+ virtual status_t listAudioVolumeGroups(AudioVolumeGroupVector &groups);
+
+ virtual status_t getVolumeGroupFromAudioAttributes(const AudioAttributes &aa,
+ volume_group_t &volumeGroup);
status_t doStopOutput(audio_port_handle_t portId);
void doReleaseOutput(audio_port_handle_t portId);
@@ -269,6 +289,9 @@
audio_patch_handle_t patchHandle,
audio_source_t source);
+ void onAudioVolumeGroupChanged(volume_group_t group, int flags);
+ void doOnAudioVolumeGroupChanged(volume_group_t group, int flags);
+
private:
AudioPolicyService() ANDROID_API;
virtual ~AudioPolicyService();
@@ -304,7 +327,8 @@
void silenceAllRecordings_l();
- static bool isPrivacySensitive(audio_source_t source);
+ static bool isPrivacySensitiveSource(audio_source_t source);
+ static bool isVirtualSource(audio_source_t source);
// If recording we need to make sure the UID is allowed to do that. If the UID is idle
// then it cannot record and gets buffers with zeros - silence. As soon as the UID
@@ -398,6 +422,7 @@
RELEASE_AUDIO_PATCH,
UPDATE_AUDIOPORT_LIST,
UPDATE_AUDIOPATCH_LIST,
+ CHANGED_AUDIOVOLUMEGROUP,
SET_AUDIOPORT_CONFIG,
DYN_POLICY_MIX_STATE_UPDATE,
RECORDING_CONFIGURATION_UPDATE
@@ -429,6 +454,7 @@
int delayMs);
void updateAudioPortListCommand();
void updateAudioPatchListCommand();
+ void changeAudioVolumeGroupCommand(volume_group_t group, int flags);
status_t setAudioPortConfigCommand(const struct audio_port_config *config,
int delayMs);
void dynamicPolicyMixStateUpdateCommand(const String8& regId,
@@ -510,6 +536,12 @@
audio_patch_handle_t mHandle;
};
+ class AudioVolumeGroupData : public AudioCommandData {
+ public:
+ volume_group_t mGroup;
+ int mFlags;
+ };
+
class SetAudioPortConfigData : public AudioCommandData {
public:
struct audio_port_config mConfig;
@@ -642,6 +674,8 @@
audio_patch_handle_t patchHandle,
audio_source_t source);
+ virtual void onAudioVolumeGroupChanged(volume_group_t group, int flags);
+
virtual audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t use);
private:
@@ -660,6 +694,7 @@
void onAudioPatchListUpdate();
void onDynamicPolicyMixStateUpdate(const String8& regId,
int32_t state);
+ void onAudioVolumeGroupChanged(volume_group_t group, int flags);
void onRecordingConfigurationUpdate(
int event,
const record_client_info_t *clientInfo,
@@ -670,6 +705,7 @@
audio_patch_handle_t patchHandle,
audio_source_t source);
void setAudioPortCallbacksEnabled(bool enabled);
+ void setAudioVolumeGroupCallbacksEnabled(bool enabled);
uid_t uid() {
return mUid;
@@ -687,6 +723,7 @@
const pid_t mPid;
const sp<IAudioPolicyServiceClient> mAudioPolicyServiceClient;
bool mAudioPortCallbacksEnabled;
+ bool mAudioVolumeGroupCallbacksEnabled;
};
class AudioClient : public virtual RefBase {
@@ -784,6 +821,8 @@
DefaultKeyedVector< audio_port_handle_t, sp<AudioRecordClient> > mAudioRecordClients;
DefaultKeyedVector< audio_port_handle_t, sp<AudioPlaybackClient> > mAudioPlaybackClients;
+
+ MediaPackageManager mPackageManager; // To check allowPlaybackCapture
};
} // namespace android
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index 6ae354b..8854eb2 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -73,6 +73,7 @@
int /*delayMs*/) override { return NO_INIT; }
void onAudioPortListUpdate() override { }
void onAudioPatchListUpdate() override { }
+ void onAudioVolumeGroupChanged(volume_group_t /*group*/, int /*flags*/) override { }
audio_unique_id_t newAudioUniqueId(audio_unique_id_use_t /*use*/) override { return 0; }
void onDynamicPolicyMixStateUpdate(String8 /*regId*/, int32_t /*state*/) override { }
void onRecordingConfigurationUpdate(int event __unused,
diff --git a/services/camera/OWNERS b/services/camera/OWNERS
index 18acfee..f48a95c 100644
--- a/services/camera/OWNERS
+++ b/services/camera/OWNERS
@@ -1,6 +1 @@
-cychen@google.com
-epeev@google.com
-etalvala@google.com
-shuzhenwang@google.com
-yinchiayeh@google.com
-zhijunhe@google.com
+include platform/frameworks/av:/camera/OWNERS
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index e06897f..62ec955 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -227,7 +227,7 @@
Mutex::Autolock lock(mStatusListenerLock);
for (auto& i : mListenerList) {
- i.second->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
+ i.second->getListener()->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
}
}
@@ -1654,6 +1654,18 @@
return Status::ok();
}
+void CameraService::notifyMonitoredUids() {
+ Mutex::Autolock lock(mStatusListenerLock);
+
+ for (const auto& it : mListenerList) {
+ auto ret = it.second->getListener()->onCameraAccessPrioritiesChanged();
+ if (!ret.isOk()) {
+ ALOGE("%s: Failed to trigger permission callback: %d", __FUNCTION__,
+ ret.exceptionCode());
+ }
+ }
+}
+
Status CameraService::notifyDeviceStateChange(int64_t newState) {
const int pid = CameraThreadState::getCallingPid();
const int selfPid = getpid();
@@ -1721,15 +1733,25 @@
{
Mutex::Autolock lock(mStatusListenerLock);
- for (auto& it : mListenerList) {
- if (IInterface::asBinder(it.second) == IInterface::asBinder(listener)) {
+ for (const auto &it : mListenerList) {
+ if (IInterface::asBinder(it.second->getListener()) == IInterface::asBinder(listener)) {
ALOGW("%s: Tried to add listener %p which was already subscribed",
__FUNCTION__, listener.get());
return STATUS_ERROR(ERROR_ALREADY_EXISTS, "Listener already registered");
}
}
- mListenerList.emplace_back(isVendorListener, listener);
+ auto clientUid = CameraThreadState::getCallingUid();
+ sp<ServiceListener> serviceListener = new ServiceListener(this, listener, clientUid);
+ auto ret = serviceListener->initialize();
+ if (ret != NO_ERROR) {
+ String8 msg = String8::format("Failed to initialize service listener: %s (%d)",
+ strerror(-ret), ret);
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ mListenerList.emplace_back(isVendorListener, serviceListener);
+ mUidPolicy->registerMonitorUid(clientUid);
}
/* Collect current devices and status */
@@ -1776,7 +1798,9 @@
{
Mutex::Autolock lock(mStatusListenerLock);
for (auto it = mListenerList.begin(); it != mListenerList.end(); it++) {
- if (IInterface::asBinder(it->second) == IInterface::asBinder(listener)) {
+ if (IInterface::asBinder(it->second->getListener()) == IInterface::asBinder(listener)) {
+ mUidPolicy->unregisterMonitorUid(it->second->getListenerUid());
+ IInterface::asBinder(listener)->unlinkToDeath(it->second);
mListenerList.erase(it);
return Status::ok();
}
@@ -2396,6 +2420,8 @@
sCameraService->updateProxyDeviceState(ICameraServiceProxy::CAMERA_STATE_OPEN,
mCameraIdStr, mCameraFacing, mClientPackageName, apiLevel);
+ sCameraService->mUidPolicy->registerMonitorUid(mClientUid);
+
return OK;
}
@@ -2433,6 +2459,8 @@
}
mOpsCallback.clear();
+ sCameraService->mUidPolicy->unregisterMonitorUid(mClientUid);
+
return OK;
}
@@ -2523,7 +2551,7 @@
if (mRegistered) return;
am.registerUidObserver(this, ActivityManager::UID_OBSERVER_GONE
| ActivityManager::UID_OBSERVER_IDLE
- | ActivityManager::UID_OBSERVER_ACTIVE,
+ | ActivityManager::UID_OBSERVER_ACTIVE | ActivityManager::UID_OBSERVER_PROCSTATE,
ActivityManager::PROCESS_STATE_UNKNOWN,
String16("cameraserver"));
status_t res = am.linkToDeath(this);
@@ -2569,6 +2597,51 @@
}
}
+void CameraService::UidPolicy::onUidStateChanged(uid_t uid, int32_t procState,
+ int64_t /*procStateSeq*/) {
+ bool procStateChange = false;
+ {
+ Mutex::Autolock _l(mUidLock);
+ if ((mMonitoredUids.find(uid) != mMonitoredUids.end()) &&
+ (mMonitoredUids[uid].first != procState)) {
+ mMonitoredUids[uid].first = procState;
+ procStateChange = true;
+ }
+ }
+
+ if (procStateChange) {
+ sp<CameraService> service = mService.promote();
+ if (service != nullptr) {
+ service->notifyMonitoredUids();
+ }
+ }
+}
+
+void CameraService::UidPolicy::registerMonitorUid(uid_t uid) {
+ Mutex::Autolock _l(mUidLock);
+ auto it = mMonitoredUids.find(uid);
+ if (it != mMonitoredUids.end()) {
+ it->second.second++;
+ } else {
+ mMonitoredUids.emplace(
+ std::pair<uid_t, std::pair<int32_t, size_t>> (uid,
+ std::pair<int32_t, size_t> (ActivityManager::PROCESS_STATE_NONEXISTENT, 1)));
+ }
+}
+
+void CameraService::UidPolicy::unregisterMonitorUid(uid_t uid) {
+ Mutex::Autolock _l(mUidLock);
+ auto it = mMonitoredUids.find(uid);
+ if (it != mMonitoredUids.end()) {
+ it->second.second--;
+ if (it->second.second == 0) {
+ mMonitoredUids.erase(it);
+ }
+ } else {
+ ALOGE("%s: Trying to unregister uid: %d which is not monitored!", __FUNCTION__, uid);
+ }
+}
+
bool CameraService::UidPolicy::isUidActive(uid_t uid, String16 callingPackage) {
Mutex::Autolock _l(mUidLock);
return isUidActiveLocked(uid, callingPackage);
@@ -3118,7 +3191,8 @@
cameraId.c_str());
continue;
}
- listener.second->onStatusChanged(mapToInterface(status), String16(cameraId));
+ listener.second->getListener()->onStatusChanged(mapToInterface(status),
+ String16(cameraId));
}
});
}
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index cf0cef8..65727ec 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -179,6 +179,9 @@
/*out*/
std::vector<hardware::CameraStatus>* cameraStatuses, bool isVendor = false);
+ // Monitored UIDs availability notification
+ void notifyMonitoredUids();
+
/////////////////////////////////////////////////////////////////////
// Client functionality
@@ -543,11 +546,14 @@
void onUidGone(uid_t uid, bool disabled);
void onUidActive(uid_t uid);
void onUidIdle(uid_t uid, bool disabled);
- void onUidStateChanged(uid_t uid __unused, int32_t procState __unused, int64_t procStateSeq __unused) {}
+ void onUidStateChanged(uid_t uid, int32_t procState, int64_t procStateSeq);
void addOverrideUid(uid_t uid, String16 callingPackage, bool active);
void removeOverrideUid(uid_t uid, String16 callingPackage);
+ void registerMonitorUid(uid_t uid);
+ void unregisterMonitorUid(uid_t uid);
+
// IBinder::DeathRecipient implementation
virtual void binderDied(const wp<IBinder> &who);
private:
@@ -558,6 +564,8 @@
bool mRegistered;
wp<CameraService> mService;
std::unordered_set<uid_t> mActiveUids;
+ // Monitored uid map to cached procState and refCount pair
+ std::unordered_map<uid_t, std::pair<int32_t, size_t>> mMonitoredUids;
std::unordered_map<uid_t, bool> mOverrideUids;
}; // class UidPolicy
@@ -790,8 +798,33 @@
sp<CameraProviderManager> mCameraProviderManager;
+ class ServiceListener : public virtual IBinder::DeathRecipient {
+ public:
+ ServiceListener(sp<CameraService> parent, sp<hardware::ICameraServiceListener> listener,
+ int uid) : mParent(parent), mListener(listener), mListenerUid(uid) {}
+
+ status_t initialize() {
+ return IInterface::asBinder(mListener)->linkToDeath(this);
+ }
+
+ virtual void binderDied(const wp<IBinder> &/*who*/) {
+ auto parent = mParent.promote();
+ if (parent.get() != nullptr) {
+ parent->removeListener(mListener);
+ }
+ }
+
+ int getListenerUid() { return mListenerUid; }
+ sp<hardware::ICameraServiceListener> getListener() { return mListener; }
+
+ private:
+ wp<CameraService> mParent;
+ sp<hardware::ICameraServiceListener> mListener;
+ int mListenerUid;
+ };
+
// Guarded by mStatusListenerMutex
- std::vector<std::pair<bool, sp<hardware::ICameraServiceListener>>> mListenerList;
+ std::vector<std::pair<bool, sp<ServiceListener>>> mListenerList;
Mutex mStatusListenerLock;
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
index 8c8b97a..d6bf83e 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.cpp
@@ -237,7 +237,7 @@
}
status_t Camera3BufferManager::getBufferForStream(int streamId, int streamSetId,
- sp<GraphicBuffer>* gb, int* fenceFd) {
+ sp<GraphicBuffer>* gb, int* fenceFd, bool noFreeBufferAtConsumer) {
ATRACE_CALL();
Mutex::Autolock l(mLock);
@@ -253,14 +253,19 @@
StreamSet &streamSet = mStreamSetMap.editValueFor(streamSetId);
BufferCountMap& handOutBufferCounts = streamSet.handoutBufferCountMap;
size_t& bufferCount = handOutBufferCounts.editValueFor(streamId);
+ BufferCountMap& attachedBufferCounts = streamSet.attachedBufferCountMap;
+ size_t& attachedBufferCount = attachedBufferCounts.editValueFor(streamId);
+
+ if (noFreeBufferAtConsumer) {
+ attachedBufferCount = bufferCount;
+ }
+
if (bufferCount >= streamSet.maxAllowedBufferCount) {
ALOGE("%s: bufferCount (%zu) exceeds the max allowed buffer count (%zu) of this stream set",
__FUNCTION__, bufferCount, streamSet.maxAllowedBufferCount);
return INVALID_OPERATION;
}
- BufferCountMap& attachedBufferCounts = streamSet.attachedBufferCountMap;
- size_t& attachedBufferCount = attachedBufferCounts.editValueFor(streamId);
if (attachedBufferCount > bufferCount) {
// We've already attached more buffers to this stream than we currently have
// outstanding, so have the stream just use an already-attached buffer
diff --git a/services/camera/libcameraservice/device3/Camera3BufferManager.h b/services/camera/libcameraservice/device3/Camera3BufferManager.h
index 025062e..f0de1c1 100644
--- a/services/camera/libcameraservice/device3/Camera3BufferManager.h
+++ b/services/camera/libcameraservice/device3/Camera3BufferManager.h
@@ -112,6 +112,10 @@
*
* After this call, the client takes over the ownership of this buffer if it is not freed.
*
+ * Sometimes free buffers are discarded from consumer side and the dequeueBuffer call returns
+ * TIMED_OUT, in this case calling getBufferForStream again with noFreeBufferAtConsumer set to
+ * true will notify buffer manager to update its states and also tries to allocate a new buffer.
+ *
* Return values:
*
* OK: Getting buffer for this stream was successful.
@@ -122,7 +126,9 @@
* to this buffer manager before.
* NO_MEMORY: Unable to allocate a buffer for this stream at this time.
*/
- status_t getBufferForStream(int streamId, int streamSetId, sp<GraphicBuffer>* gb, int* fenceFd);
+ status_t getBufferForStream(
+ int streamId, int streamSetId, sp<GraphicBuffer>* gb, int* fenceFd,
+ bool noFreeBufferAtConsumer = false);
/**
* This method notifies the manager that a buffer has been released by the consumer.
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 923d17a..22e09e4 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -58,6 +58,8 @@
#include "CameraService.h"
#include "utils/CameraThreadState.h"
+#include <tuple>
+
using namespace android::camera3;
using namespace android::hardware::camera;
using namespace android::hardware::camera::device::V3_2;
@@ -1094,7 +1096,7 @@
hBuf.acquireFence.setTo(acquireFence, /*shouldOwn*/true);
hBuf.releaseFence = nullptr;
- res = mInterface->pushInflightRequestBuffer(bufferId, buffer);
+ res = mInterface->pushInflightRequestBuffer(bufferId, buffer, streamId);
if (res != OK) {
ALOGE("%s: Can't get register request buffers for stream %d: %s (%d)",
__FUNCTION__, streamId, strerror(-res), res);
@@ -2847,12 +2849,19 @@
}
streams.add(outputStream);
- if (outputStream->format == HAL_PIXEL_FORMAT_BLOB &&
- outputStream->data_space == HAL_DATASPACE_V0_JFIF) {
+ if (outputStream->format == HAL_PIXEL_FORMAT_BLOB) {
size_t k = i + ((mInputStream != nullptr) ? 1 : 0); // Input stream if present should
// always occupy the initial entry.
- bufferSizes[k] = static_cast<uint32_t>(
- getJpegBufferSize(outputStream->width, outputStream->height));
+ if (outputStream->data_space == HAL_DATASPACE_V0_JFIF) {
+ bufferSizes[k] = static_cast<uint32_t>(
+ getJpegBufferSize(outputStream->width, outputStream->height));
+ } else if (outputStream->data_space ==
+ static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
+ bufferSizes[k] = outputStream->width * outputStream->height;
+ } else {
+ ALOGW("%s: Blob dataSpace %d not supported",
+ __FUNCTION__, outputStream->data_space);
+ }
}
}
@@ -3270,7 +3279,15 @@
std::vector<std::pair<int32_t, int32_t>> inflightKeys;
mInterface->getInflightBufferKeys(&inflightKeys);
- int32_t inputStreamId = (mInputStream != nullptr) ? mInputStream->getId() : -1;
+ // Inflight buffers for HAL buffer manager
+ std::vector<uint64_t> inflightRequestBufferKeys;
+ mInterface->getInflightRequestBufferKeys(&inflightRequestBufferKeys);
+
+ // (streamId, frameNumber, buffer_handle_t*) tuple for all inflight buffers.
+ // frameNumber will be -1 for buffers from HAL buffer manager
+ std::vector<std::tuple<int32_t, int32_t, buffer_handle_t*>> inflightBuffers;
+ inflightBuffers.reserve(inflightKeys.size() + inflightRequestBufferKeys.size());
+
for (auto& pair : inflightKeys) {
int32_t frameNumber = pair.first;
int32_t streamId = pair.second;
@@ -3281,6 +3298,26 @@
__FUNCTION__, frameNumber, streamId);
continue;
}
+ inflightBuffers.push_back(std::make_tuple(streamId, frameNumber, buffer));
+ }
+
+ for (auto& bufferId : inflightRequestBufferKeys) {
+ int32_t streamId = -1;
+ buffer_handle_t* buffer = nullptr;
+ status_t res = mInterface->popInflightRequestBuffer(bufferId, &buffer, &streamId);
+ if (res != OK) {
+ ALOGE("%s: cannot find in-flight buffer %" PRIu64, __FUNCTION__, bufferId);
+ continue;
+ }
+ inflightBuffers.push_back(std::make_tuple(streamId, /*frameNumber*/-1, buffer));
+ }
+
+ int32_t inputStreamId = (mInputStream != nullptr) ? mInputStream->getId() : -1;
+ for (auto& tuple : inflightBuffers) {
+ status_t res = OK;
+ int32_t streamId = std::get<0>(tuple);
+ int32_t frameNumber = std::get<1>(tuple);
+ buffer_handle_t* buffer = std::get<2>(tuple);
camera3_stream_buffer_t streamBuffer;
streamBuffer.buffer = buffer;
@@ -4354,8 +4391,11 @@
dst.status = BufferStatus::OK;
dst.releaseFence = nullptr;
- pushInflightBufferLocked(captureRequest->frameNumber, streamId,
- src->buffer, src->acquire_fence);
+ // Output buffers are empty when using HAL buffer manager
+ if (!mUseHalBufManager) {
+ pushInflightBufferLocked(captureRequest->frameNumber, streamId,
+ src->buffer, src->acquire_fence);
+ }
}
}
return OK;
@@ -4583,6 +4623,17 @@
return;
}
+void Camera3Device::HalInterface::getInflightRequestBufferKeys(
+ std::vector<uint64_t>* out) {
+ std::lock_guard<std::mutex> lock(mRequestedBuffersLock);
+ out->clear();
+ out->reserve(mRequestedBuffers.size());
+ for (auto& pair : mRequestedBuffers) {
+ out->push_back(pair.first);
+ }
+ return;
+}
+
status_t Camera3Device::HalInterface::pushInflightBufferLocked(
int32_t frameNumber, int32_t streamId, buffer_handle_t *buffer, int acquireFence) {
uint64_t key = static_cast<uint64_t>(frameNumber) << 32 | static_cast<uint64_t>(streamId);
@@ -4610,9 +4661,9 @@
}
status_t Camera3Device::HalInterface::pushInflightRequestBuffer(
- uint64_t bufferId, buffer_handle_t* buf) {
+ uint64_t bufferId, buffer_handle_t* buf, int32_t streamId) {
std::lock_guard<std::mutex> lock(mRequestedBuffersLock);
- auto pair = mRequestedBuffers.insert({bufferId, buf});
+ auto pair = mRequestedBuffers.insert({bufferId, {streamId, buf}});
if (!pair.second) {
ALOGE("%s: bufId %" PRIu64 " is already inflight!",
__FUNCTION__, bufferId);
@@ -4623,7 +4674,13 @@
// Find and pop a buffer_handle_t based on bufferId
status_t Camera3Device::HalInterface::popInflightRequestBuffer(
- uint64_t bufferId, /*out*/ buffer_handle_t **buffer) {
+ uint64_t bufferId,
+ /*out*/ buffer_handle_t** buffer,
+ /*optional out*/ int32_t* streamId) {
+ if (buffer == nullptr) {
+ ALOGE("%s: buffer (%p) must not be null", __FUNCTION__, buffer);
+ return BAD_VALUE;
+ }
std::lock_guard<std::mutex> lock(mRequestedBuffersLock);
auto it = mRequestedBuffers.find(bufferId);
if (it == mRequestedBuffers.end()) {
@@ -4631,7 +4688,10 @@
__FUNCTION__, bufferId);
return BAD_VALUE;
}
- *buffer = it->second;
+ *buffer = it->second.second;
+ if (streamId != nullptr) {
+ *streamId = it->second.first;
+ }
mRequestedBuffers.erase(it);
return OK;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index b25d89d..d3bb212 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -320,16 +320,22 @@
status_t popInflightBuffer(int32_t frameNumber, int32_t streamId,
/*out*/ buffer_handle_t **buffer);
- // Register a bufId/buffer_handle_t to inflight request buffer
- status_t pushInflightRequestBuffer(uint64_t bufferId, buffer_handle_t* buf);
+ // Register a bufId (streamId, buffer_handle_t) to inflight request buffer
+ status_t pushInflightRequestBuffer(
+ uint64_t bufferId, buffer_handle_t* buf, int32_t streamId);
// Find a buffer_handle_t based on bufferId
- status_t popInflightRequestBuffer(uint64_t bufferId, /*out*/ buffer_handle_t **buffer);
+ status_t popInflightRequestBuffer(uint64_t bufferId,
+ /*out*/ buffer_handle_t** buffer,
+ /*optional out*/ int32_t* streamId = nullptr);
// Get a vector of (frameNumber, streamId) pair of currently inflight
// buffers
void getInflightBufferKeys(std::vector<std::pair<int32_t, int32_t>>* out);
+ // Get a vector of bufferId of currently inflight buffers
+ void getInflightRequestBufferKeys(std::vector<uint64_t>* out);
+
static const uint64_t BUFFER_ID_NO_BUFFER = 0;
private:
// Always valid
@@ -398,7 +404,7 @@
// Buffers given to HAL through requestStreamBuffer API
std::mutex mRequestedBuffersLock;
- std::unordered_map<uint64_t, buffer_handle_t*> mRequestedBuffers;
+ std::unordered_map<uint64_t, std::pair<int32_t, buffer_handle_t*>> mRequestedBuffers;
uint32_t mNextStreamConfigCounter = 1;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 8cd575d..baba856 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -359,7 +359,18 @@
// Set dequeueBuffer/attachBuffer timeout if the consumer is not hw composer or hw texture.
// We need skip these cases as timeout will disable the non-blocking (async) mode.
if (!(isConsumedByHWComposer() || isConsumedByHWTexture())) {
- mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+ if (mUseBufferManager) {
+ // When buffer manager is handling the buffer, we should have available buffers in
+ // buffer queue before we calls into dequeueBuffer because buffer manager is tracking
+ // free buffers.
+ // There are however some consumer side feature (ImageReader::discardFreeBuffers) that
+ // can discard free buffers without notifying buffer manager. We want the timeout to
+ // happen immediately here so buffer manager can try to update its internal state and
+ // try to allocate a buffer instead of waiting.
+ mConsumer->setDequeueTimeout(0);
+ } else {
+ mConsumer->setDequeueTimeout(kDequeueBufferTimeout);
+ }
}
return OK;
@@ -526,6 +537,8 @@
if (res != OK) {
ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
+
+ checkRetAndSetAbandonedLocked(res);
return res;
}
gotBufferFromManager = true;
@@ -562,35 +575,70 @@
mDequeueBufferLatency.add(dequeueStart, dequeueEnd);
mLock.lock();
- if (res != OK) {
+
+ if (mUseBufferManager && res == TIMED_OUT) {
+ checkRemovedBuffersLocked();
+
+ sp<GraphicBuffer> gb;
+ res = mBufferManager->getBufferForStream(
+ getId(), getStreamSetId(), &gb, fenceFd, /*noFreeBuffer*/true);
+
+ if (res == OK) {
+ // Attach this buffer to the bufferQueue: the buffer will be in dequeue state after
+ // a successful return.
+ *anb = gb.get();
+ res = mConsumer->attachBuffer(*anb);
+ gotBufferFromManager = true;
+ ALOGV("Stream %d: Attached new buffer", getId());
+
+ if (res != OK) {
+ ALOGE("%s: Stream %d: Can't attach the output buffer to this surface: %s (%d)",
+ __FUNCTION__, mId, strerror(-res), res);
+
+ checkRetAndSetAbandonedLocked(res);
+ return res;
+ }
+ } else {
+ ALOGE("%s: Stream %d: Can't get next output buffer from buffer manager:"
+ " %s (%d)", __FUNCTION__, mId, strerror(-res), res);
+ return res;
+ }
+ } else if (res != OK) {
ALOGE("%s: Stream %d: Can't dequeue next output buffer: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
- // Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is STATE_PREPARING,
- // let prepareNextBuffer handle the error.)
- if ((res == NO_INIT || res == DEAD_OBJECT) && mState == STATE_CONFIGURED) {
- mState = STATE_ABANDONED;
- }
-
+ checkRetAndSetAbandonedLocked(res);
return res;
}
}
if (res == OK) {
- std::vector<sp<GraphicBuffer>> removedBuffers;
- res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
- if (res == OK) {
- onBuffersRemovedLocked(removedBuffers);
-
- if (mUseBufferManager && removedBuffers.size() > 0) {
- mBufferManager->onBuffersRemoved(getId(), getStreamSetId(), removedBuffers.size());
- }
- }
+ checkRemovedBuffersLocked();
}
return res;
}
+void Camera3OutputStream::checkRemovedBuffersLocked(bool notifyBufferManager) {
+ std::vector<sp<GraphicBuffer>> removedBuffers;
+ status_t res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
+ if (res == OK) {
+ onBuffersRemovedLocked(removedBuffers);
+
+ if (notifyBufferManager && mUseBufferManager && removedBuffers.size() > 0) {
+ mBufferManager->onBuffersRemoved(getId(), getStreamSetId(), removedBuffers.size());
+ }
+ }
+}
+
+void Camera3OutputStream::checkRetAndSetAbandonedLocked(status_t res) {
+ // Only transition to STATE_ABANDONED from STATE_CONFIGURED. (If it is
+ // STATE_PREPARING, let prepareNextBuffer handle the error.)
+ if ((res == NO_INIT || res == DEAD_OBJECT) && mState == STATE_CONFIGURED) {
+ mState = STATE_ABANDONED;
+ }
+}
+
status_t Camera3OutputStream::disconnectLocked() {
status_t res;
@@ -803,11 +851,8 @@
}
}
- std::vector<sp<GraphicBuffer>> removedBuffers;
- res = mConsumer->getAndFlushRemovedBuffers(&removedBuffers);
- if (res == OK) {
- onBuffersRemovedLocked(removedBuffers);
- }
+ // Here we assume detachBuffer is called by buffer manager so it doesn't need to be notified
+ checkRemovedBuffersLocked(/*notifyBufferManager*/false);
return res;
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 2128da2..30fc2f7 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -309,6 +309,13 @@
*/
void onBuffersRemovedLocked(const std::vector<sp<GraphicBuffer>>&);
status_t detachBufferLocked(sp<GraphicBuffer>* buffer, int* fenceFd);
+ // Call this after each dequeueBuffer/attachBuffer/detachNextBuffer call to get update on
+ // removed buffers. Set notifyBufferManager to false when the call is initiated by buffer
+ // manager so buffer manager doesn't need to be notified.
+ void checkRemovedBuffersLocked(bool notifyBufferManager = true);
+
+ // Check return status of IGBP calls and set abandoned state accordingly
+ void checkRetAndSetAbandonedLocked(status_t res);
static const int32_t kDequeueLatencyBinSize = 5; // in ms
CameraLatencyHistogram mDequeueBufferLatency;
diff --git a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
index ca9143d..0f6be79 100644
--- a/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
+++ b/services/camera/libcameraservice/hidl/AidlCameraServiceListener.h
@@ -50,6 +50,10 @@
virtual ::android::binder::Status onTorchStatusChanged(
int32_t status, const ::android::String16& cameraId) override;
+ virtual binder::Status onCameraAccessPrioritiesChanged() {
+ // TODO: no implementation yet.
+ return binder::Status::ok();
+ }
};
} // implementation
diff --git a/services/mediacodec/Android.bp b/services/mediacodec/Android.bp
new file mode 100644
index 0000000..25c36fa
--- /dev/null
+++ b/services/mediacodec/Android.bp
@@ -0,0 +1,62 @@
+cc_binary {
+ name: "mediaswcodec",
+ vendor_available: true,
+
+ srcs: [
+ "main_swcodecservice.cpp",
+ ],
+
+ shared_libs: [
+ "libavservices_minijail",
+ "libbase",
+ "libhidltransport",
+ "libhwbinder",
+ "liblog",
+ "libmedia_codecserviceregistrant",
+ ],
+
+ target: {
+ vendor: {
+ exclude_shared_libs: ["libavservices_minijail"],
+ shared_libs: ["libavservices_minijail_vendor"],
+ },
+ },
+
+ header_libs: [
+ "libmedia_headers",
+ ],
+
+ init_rc: ["mediaswcodec.rc"],
+
+ required: ["mediaswcodec.policy"],
+
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-Wno-error=deprecated-declarations",
+ ],
+
+ sanitize: {
+ scudo: true,
+ },
+}
+
+prebuilt_etc {
+ name: "mediaswcodec.policy",
+ sub_dir: "seccomp_policy",
+ arch: {
+ arm: {
+ src: "seccomp_policy/mediaswcodec-arm.policy",
+ },
+ arm64: {
+ src: "seccomp_policy/mediaswcodec-arm64.policy",
+ },
+ x86: {
+ src: "seccomp_policy/mediacodec-x86.policy",
+ },
+ x86_64: {
+ src: "seccomp_policy/mediacodec-x86.policy",
+ },
+ },
+ required: ["crash_dump.policy"],
+}
diff --git a/services/mediacodec/Android.mk b/services/mediacodec/Android.mk
index f78c671..9cc19a3 100644
--- a/services/mediacodec/Android.mk
+++ b/services/mediacodec/Android.mk
@@ -27,8 +27,8 @@
include $(CLEAR_VARS)
# seccomp is not required for coverage build.
ifneq ($(NATIVE_COVERAGE),true)
-LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediacodec.policy
-LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediacodec.policy
+LOCAL_REQUIRED_MODULES_arm := mediacodec.policy
+LOCAL_REQUIRED_MODULES_x86 := mediacodec.policy
endif
LOCAL_SRC_FILES := main_codecservice.cpp
LOCAL_SHARED_LIBRARIES := \
@@ -65,74 +65,13 @@
####################################################################
-# service executable
-include $(CLEAR_VARS)
-# seccomp is not required for coverage build.
-ifneq ($(NATIVE_COVERAGE),true)
-LOCAL_REQUIRED_MODULES_arm := crash_dump.policy mediaswcodec.policy
-LOCAL_REQUIRED_MODULES_arm64 := crash_dump.policy mediaswcodec.policy
-LOCAL_REQUIRED_MODULES_x86 := crash_dump.policy mediaswcodec.policy
-LOCAL_REQUIRED_MODULES_x86_64 := crash_dump.policy mediaswcodec.policy
-endif
-
-LOCAL_SRC_FILES := \
- main_swcodecservice.cpp \
- MediaCodecUpdateService.cpp \
-
-sanitizer_runtime_libraries := $(call normalize-path-list,$(addsuffix .so,\
- $(ADDRESS_SANITIZER_RUNTIME_LIBRARY) \
- $(UBSAN_RUNTIME_LIBRARY) \
- $(TSAN_RUNTIME_LIBRARY) \
- $(2ND_ADDRESS_SANITIZER_RUNTIME_LIBRARY) \
- $(2ND_UBSAN_RUNTIME_LIBRARY) \
- $(2ND_TSAN_RUNTIME_LIBRARY)))
-
-# $(info Sanitizer: $(sanitizer_runtime_libraries))
-
-llndk_libraries := $(call normalize-path-list,$(addsuffix .so,\
- $(LLNDK_LIBRARIES)))
-
-# $(info LLNDK: $(llndk_libraries))
-
-LOCAL_CFLAGS := -DLINKED_LIBRARIES='"$(sanitizer_runtime_libraries):$(llndk_libraries)"'
-
-LOCAL_SHARED_LIBRARIES := \
- libavservices_minijail \
- libbase \
- libbinder \
- libcutils \
- libhidltransport \
- libhwbinder \
- liblog \
- libmedia \
- libutils \
- libziparchive \
-
-LOCAL_HEADER_LIBRARIES := \
- libnativeloader-dummy-headers \
-
-LOCAL_MODULE := mediaswcodec
-LOCAL_INIT_RC := mediaswcodec.rc
-LOCAL_SANITIZE := scudo
-ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), x86_64 arm64))
- LOCAL_MULTILIB := both
- LOCAL_MODULE_STEM_32 := $(LOCAL_MODULE)32
- LOCAL_MODULE_STEM_64 := $(LOCAL_MODULE)
-endif
-
-sanitizer_runtime_libraries :=
-llndk_libraries :=
-
-include $(BUILD_EXECUTABLE)
-
-####################################################################
-
# service seccomp policy
ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), x86 x86_64 arm arm64))
include $(CLEAR_VARS)
LOCAL_MODULE := mediacodec.policy
LOCAL_MODULE_CLASS := ETC
LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
+LOCAL_REQUIRED_MODULES := crash_dump.policy
# mediacodec runs in 32-bit combatibility mode. For 64 bit architectures,
# use the 32 bit policy
ifdef TARGET_2ND_ARCH
@@ -149,14 +88,5 @@
####################################################################
-# sw service seccomp policy
-ifeq ($(TARGET_ARCH), $(filter $(TARGET_ARCH), x86 x86_64 arm arm64))
-include $(CLEAR_VARS)
-LOCAL_MODULE := mediaswcodec.policy
-LOCAL_MODULE_CLASS := ETC
-LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/seccomp_policy
-LOCAL_SRC_FILES := seccomp_policy/mediaswcodec-$(TARGET_ARCH).policy
-include $(BUILD_PREBUILT)
-endif
include $(call all-makefiles-under, $(LOCAL_PATH))
diff --git a/services/mediacodec/MediaCodecUpdateService.cpp b/services/mediacodec/MediaCodecUpdateService.cpp
deleted file mode 100644
index 50ccbce..0000000
--- a/services/mediacodec/MediaCodecUpdateService.cpp
+++ /dev/null
@@ -1,79 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "MediaCodecUpdateService"
-//#define LOG_NDEBUG 0
-
-#include <android/dlext.h>
-#include <dlfcn.h>
-#include <media/CodecServiceRegistrant.h>
-#include <nativeloader/dlext_namespaces.h>
-#include <utils/Log.h>
-#include <utils/String8.h>
-
-#include "MediaCodecUpdateService.h"
-
-namespace android {
-
-void loadFromApex(const char *libDirPath) {
- ALOGV("loadFromApex: path=%s", libDirPath);
-
- String8 libPath = String8(libDirPath) + "/libmedia_codecserviceregistrant.so";
-
- android_namespace_t *codecNs = android_create_namespace("codecs",
- nullptr, // ld_library_path
- libDirPath,
- ANDROID_NAMESPACE_TYPE_ISOLATED,
- nullptr, // permitted_when_isolated_path
- nullptr); // parent
-
- if (codecNs == nullptr) {
- ALOGE("Failed to create codec namespace");
- return;
- }
-
- String8 linked_libraries(LINKED_LIBRARIES);
- if (!android_link_namespaces(codecNs, nullptr, linked_libraries.c_str())) {
- ALOGE("Failed to link namespace");
- return;
- }
-
- const android_dlextinfo dlextinfo = {
- .flags = ANDROID_DLEXT_USE_NAMESPACE,
- .library_namespace = codecNs,
- };
-
- void *registrantLib = android_dlopen_ext(
- libPath.string(),
- RTLD_NOW | RTLD_LOCAL, &dlextinfo);
-
- if (registrantLib == nullptr) {
- ALOGE("Failed to load lib from archive: %s", dlerror());
- }
-
- RegisterCodecServicesFunc registerCodecServices =
- reinterpret_cast<RegisterCodecServicesFunc>(
- dlsym(registrantLib, "RegisterCodecServices"));
-
- if (registerCodecServices == nullptr) {
- ALOGE("Cannot register codec services -- corrupted library.");
- return;
- }
-
- registerCodecServices();
-}
-
-} // namespace android
diff --git a/services/mediacodec/MediaCodecUpdateService.h b/services/mediacodec/MediaCodecUpdateService.h
deleted file mode 100644
index 09d6dbe..0000000
--- a/services/mediacodec/MediaCodecUpdateService.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright 2018 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ANDROID_MEDIA_CODEC_UPDATE_SERVICE_H
-#define ANDROID_MEDIA_CODEC_UPDATE_SERVICE_H
-
-namespace android {
-
-void loadFromApex(const char *libDirPath);
-
-} // namespace android
-
-#endif // ANDROID_MEDIA_CODEC_UPDATE_SERVICE_H
diff --git a/services/mediacodec/main_swcodecservice.cpp b/services/mediacodec/main_swcodecservice.cpp
index 05b5695..c44be28 100644
--- a/services/mediacodec/main_swcodecservice.cpp
+++ b/services/mediacodec/main_swcodecservice.cpp
@@ -19,24 +19,26 @@
// from LOCAL_C_INCLUDES
#include "minijail.h"
-
#include <hidl/HidlTransportSupport.h>
-#include "MediaCodecUpdateService.h"
-
using namespace android;
+// kSystemSeccompPolicyPath points to the policy for the swcodecs themselves and
+// is part of the updates. kVendorSeccompPolicyPath points to any additional
+// policies that the vendor may need for the device.
static const char kSystemSeccompPolicyPath[] =
- "/system/etc/seccomp_policy/mediaswcodec.policy";
+ "/apex/com.android.media.swcodec/etc/seccomp_policy/mediaswcodec.policy";
static const char kVendorSeccompPolicyPath[] =
"/vendor/etc/seccomp_policy/mediaswcodec.policy";
// Disable Scudo's mismatch allocation check, as it is being triggered
// by some third party code.
extern "C" const char *__scudo_default_options() {
- return "DeallocationTypeMismatch=false";
+ return "DeallocationTypeMismatch=false";
}
+extern "C" void RegisterCodecServices();
+
int main(int argc __unused, char** /*argv*/)
{
LOG(INFO) << "media swcodec service starting";
@@ -45,11 +47,7 @@
::android::hardware::configureRpcThreadpool(64, false);
-#ifdef __LP64__
- loadFromApex("/apex/com.android.media.swcodec/lib64");
-#else
- loadFromApex("/apex/com.android.media.swcodec/lib");
-#endif
+ RegisterCodecServices();
::android::hardware::joinRpcThreadpool();
}
diff --git a/services/mediaextractor/Android.mk b/services/mediaextractor/Android.mk
index 7654982..661a475 100644
--- a/services/mediaextractor/Android.mk
+++ b/services/mediaextractor/Android.mk
@@ -6,26 +6,8 @@
LOCAL_SRC_FILES := \
MediaExtractorService.cpp
-LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils liblog
+LOCAL_SHARED_LIBRARIES := libmedia libstagefright libbinder libutils
LOCAL_MODULE:= libmediaextractorservice
-
-sanitizer_runtime_libraries := $(call normalize-path-list,$(addsuffix .so,\
- $(ADDRESS_SANITIZER_RUNTIME_LIBRARY) \
- $(UBSAN_RUNTIME_LIBRARY) \
- $(TSAN_RUNTIME_LIBRARY)))
-
-# $(info Sanitizer: $(sanitizer_runtime_libraries))
-
-ndk_libraries := $(call normalize-path-list,$(addprefix lib,$(addsuffix .so,\
- $(NDK_PREBUILT_SHARED_LIBRARIES))))
-
-# $(info NDK: $(ndk_libraries))
-
-LOCAL_CFLAGS += -DLINKED_LIBRARIES='"$(sanitizer_runtime_libraries):$(ndk_libraries)"'
-
-sanitizer_runtime_libraries :=
-ndk_libraries :=
-
include $(BUILD_SHARED_LIBRARY)
@@ -39,7 +21,7 @@
LOCAL_SRC_FILES := main_extractorservice.cpp
LOCAL_SHARED_LIBRARIES := libmedia libmediaextractorservice libbinder libutils \
- liblog libbase libandroidicu libavservices_minijail
+ liblog libandroidicu libavservices_minijail
LOCAL_STATIC_LIBRARIES := libicuandroid_utils
LOCAL_MODULE:= mediaextractor
LOCAL_INIT_RC := mediaextractor.rc
diff --git a/services/mediaextractor/MediaExtractorService.cpp b/services/mediaextractor/MediaExtractorService.cpp
index 0665930..de5c3e4 100644
--- a/services/mediaextractor/MediaExtractorService.cpp
+++ b/services/mediaextractor/MediaExtractorService.cpp
@@ -30,9 +30,7 @@
namespace android {
MediaExtractorService::MediaExtractorService()
- : BnMediaExtractorService() {
- MediaExtractorFactory::SetLinkedLibraries(std::string(LINKED_LIBRARIES));
-}
+ : BnMediaExtractorService() { }
sp<IMediaExtractor> MediaExtractorService::makeExtractor(
const sp<IDataSource> &remoteSource, const char *mime) {
diff --git a/services/mediaresourcemanager/Android.bp b/services/mediaresourcemanager/Android.bp
index 1c63f64..f3339a0 100644
--- a/services/mediaresourcemanager/Android.bp
+++ b/services/mediaresourcemanager/Android.bp
@@ -16,8 +16,6 @@
"liblog",
],
- compile_multilib: "32",
-
include_dirs: ["frameworks/av/include"],
cflags: [