Merge "Correct return codes in documentation"
diff --git a/Android.bp b/Android.bp
index 0e0ea1f..5aa2029 100644
--- a/Android.bp
+++ b/Android.bp
@@ -75,10 +75,10 @@
"av-types-aidl-cpp",
],
header_libs: [
- "libaudioclient_aidl_conversion_util",
+ "libaudio_aidl_conversion_common_util_cpp",
],
export_header_lib_headers: [
- "libaudioclient_aidl_conversion_util",
+ "libaudio_aidl_conversion_common_util_cpp",
],
host_supported: true,
vendor_available: true,
diff --git a/apex/Android.bp b/apex/Android.bp
index 570ca01..b0d7c02 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -67,13 +67,15 @@
// Use a custom AndroidManifest.xml used for API targeting.
androidManifest: ":com.android.media-androidManifest",
- // IMPORTANT: q-launched-apex-module enables the build system to make
- // sure the package compatible to Android 10 in two ways:
+ // IMPORTANT: q-launched-dcla-enabled-apex-module enables the build system to make
+ // sure the package compatible to Android 10 in two ways(if flag APEX_BUILD_FOR_PRE_S_DEVICES=1
+ // is set):
// - build the APEX package compatible to Android 10
// so that the package can be installed.
// - build artifacts (lib/javalib/bin) against Android 10 SDK
// so that the artifacts can run.
- defaults: ["q-launched-apex-module"],
+ // If the flag is not set, the package is built to be compatible with Android 12.
+ defaults: ["q-launched-dcla-enabled-apex-module"],
// Indicates that pre-installed version of this apex can be compressed.
// Whether it actually will be compressed is controlled on per-device basis.
compressible: true,
@@ -191,13 +193,15 @@
// Use a custom AndroidManifest.xml used for API targeting.
androidManifest: ":com.android.media.swcodec-androidManifest",
- // IMPORTANT: q-launched-apex-module enables the build system to make
- // sure the package compatible to Android 10 in two ways:
+ // IMPORTANT: q-launched-dcla-enabled-apex-module enables the build system to make
+ // sure the package compatible to Android 10 in two ways(if flag APEX_BUILD_FOR_PRE_S_DEVICES=1
+ // is set):
// - build the APEX package compatible to Android 10
// so that the package can be installed.
// - build artifacts (lib/javalib/bin) against Android 10 SDK
// so that the artifacts can run.
- defaults: ["q-launched-apex-module"],
+ // If the flag is not set, the package is built to be compatible with Android 12.
+ defaults: ["q-launched-dcla-enabled-apex-module"],
// Indicates that pre-installed version of this apex can be compressed.
// Whether it actually will be compressed is controlled on per-device basis.
compressible: true,
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index be47898..bb880d1 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -52,7 +52,10 @@
parcel->readInt64(&lastCompletedRegularFrameNumber);
parcel->readInt64(&lastCompletedReprocessFrameNumber);
parcel->readInt64(&lastCompletedZslFrameNumber);
-
+ parcel->readBool(&hasReadoutTimestamp);
+ if (hasReadoutTimestamp) {
+ parcel->readInt64(&readoutTimestamp);
+ }
return OK;
}
@@ -82,6 +85,10 @@
parcel->writeInt64(lastCompletedRegularFrameNumber);
parcel->writeInt64(lastCompletedReprocessFrameNumber);
parcel->writeInt64(lastCompletedZslFrameNumber);
+ parcel->writeBool(hasReadoutTimestamp);
+ if (hasReadoutTimestamp) {
+ parcel->writeInt64(readoutTimestamp);
+ }
return OK;
}
diff --git a/camera/aidl/android/hardware/ICameraServiceProxy.aidl b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
index 88783fb..fefea13 100644
--- a/camera/aidl/android/hardware/ICameraServiceProxy.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
@@ -48,5 +48,5 @@
/**
* Checks if the camera has been disabled via device policy.
*/
- boolean isCameraDisabled();
+ boolean isCameraDisabled(int userId);
}
diff --git a/camera/include/camera/CaptureResult.h b/camera/include/camera/CaptureResult.h
index f163c1e..de534ab 100644
--- a/camera/include/camera/CaptureResult.h
+++ b/camera/include/camera/CaptureResult.h
@@ -103,6 +103,17 @@
*/
int64_t lastCompletedZslFrameNumber;
+ /**
+ * Whether the readoutTimestamp variable is valid and should be used.
+ */
+ bool hasReadoutTimestamp;
+
+ /**
+ * The readout timestamp of the capture. Its value is equal to the
+ * start-of-exposure timestamp plus the exposure time (and a possible fixed
+ * offset due to sensor crop).
+ */
+ int64_t readoutTimestamp;
/**
* Constructor initializes object as invalid by setting requestId to be -1.
@@ -118,7 +129,9 @@
errorPhysicalCameraId(),
lastCompletedRegularFrameNumber(-1),
lastCompletedReprocessFrameNumber(-1),
- lastCompletedZslFrameNumber(-1) {
+ lastCompletedZslFrameNumber(-1),
+ hasReadoutTimestamp(false),
+ readoutTimestamp(0) {
}
/**
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index b842885..b7c7f7f 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -43,7 +43,9 @@
TIMESTAMP_BASE_SENSOR = 1,
TIMESTAMP_BASE_MONOTONIC = 2,
TIMESTAMP_BASE_REALTIME = 3,
- TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED = 4
+ TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED = 4,
+ TIMESTAMP_BASE_READOUT_SENSOR = 5,
+ TIMESTAMP_BASE_MAX = TIMESTAMP_BASE_READOUT_SENSOR,
};
enum MirrorModeType {
MIRROR_MODE_AUTO = 0,
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index b6f8552..0d156a5 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -2198,6 +2198,10 @@
* <a href="https://developer.android.com/reference/android/hardware/camera2/CameraManager.html#turnOnTorchWithStrengthLevel">CameraManager#turnOnTorchWithStrengthLevel</a>.
* If this value is equal to 1, flashlight brightness control is not supported.
* The value for this key will be null for devices with no flash unit.</p>
+ * <p>The maximum value is guaranteed to be safe to use for an indefinite duration in
+ * terms of device flashlight lifespan, but may be too bright for comfort for many
+ * use cases. Use the default torch brightness value to avoid problems with an
+ * over-bright flashlight.</p>
*/
ACAMERA_FLASH_INFO_STRENGTH_MAXIMUM_LEVEL = // int32
ACAMERA_FLASH_INFO_START + 2,
diff --git a/drm/libmediadrm/Android.bp b/drm/libmediadrm/Android.bp
index 408d216..1e1a49d 100644
--- a/drm/libmediadrm/Android.bp
+++ b/drm/libmediadrm/Android.bp
@@ -35,6 +35,7 @@
"CryptoHalAidl.cpp",
"DrmUtils.cpp",
"DrmHalListener.cpp",
+ "DrmStatus.cpp",
],
local_include_dirs: [
@@ -74,6 +75,7 @@
static_libs: [
"resourcemanager_aidl_interface-ndk",
"libaidlcommonsupport",
+ "libjsoncpp",
],
export_shared_lib_headers: [
diff --git a/drm/libmediadrm/CryptoHal.cpp b/drm/libmediadrm/CryptoHal.cpp
index f95d527..fc1780d 100644
--- a/drm/libmediadrm/CryptoHal.cpp
+++ b/drm/libmediadrm/CryptoHal.cpp
@@ -71,7 +71,7 @@
mCryptoHalHidl->notifyResolution(width, height);
}
-status_t CryptoHal::setMediaDrmSession(const Vector<uint8_t>& sessionId) {
+DrmStatus CryptoHal::setMediaDrmSession(const Vector<uint8_t>& sessionId) {
// This requires plugin to be created.
if (mCryptoHalAidl->initCheck() == OK) return mCryptoHalAidl->setMediaDrmSession(sessionId);
return mCryptoHalHidl->setMediaDrmSession(sessionId);
diff --git a/drm/libmediadrm/CryptoHalAidl.cpp b/drm/libmediadrm/CryptoHalAidl.cpp
index 8b9d1de..c1fd797 100644
--- a/drm/libmediadrm/CryptoHalAidl.cpp
+++ b/drm/libmediadrm/CryptoHalAidl.cpp
@@ -41,7 +41,7 @@
using ::aidl::android::hardware::drm::DecryptArgs;
using ::android::sp;
-using ::android::DrmUtils::statusAidlToStatusT;
+using ::android::DrmUtils::statusAidlToDrmStatus;
using ::android::hardware::hidl_array;
using ::android::hardware::hidl_handle;
using ::android::hardware::hidl_memory;
@@ -260,7 +260,7 @@
}
}
-status_t CryptoHalAidl::setMediaDrmSession(const Vector<uint8_t>& sessionId) {
+DrmStatus CryptoHalAidl::setMediaDrmSession(const Vector<uint8_t>& sessionId) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
@@ -268,7 +268,7 @@
}
auto err = mPlugin->setMediaDrmSession(toStdVec(sessionId));
- return statusAidlToStatusT(err);
+ return statusAidlToDrmStatus(err);
}
ssize_t CryptoHalAidl::decrypt(const uint8_t keyId[16], const uint8_t iv[16],
@@ -352,7 +352,7 @@
int32_t result = 0;
::ndk::ScopedAStatus statusAidl = mPlugin->decrypt(args, &result);
- err = statusAidlToStatusT(statusAidl);
+ err = statusAidlToDrmStatus(statusAidl);
std::string msgStr(statusAidl.getMessage());
if (errorDetailMsg != nullptr) {
*errorDetailMsg = toString8(msgStr);
diff --git a/drm/libmediadrm/CryptoHalHidl.cpp b/drm/libmediadrm/CryptoHalHidl.cpp
index 55364b5..458a1ae 100644
--- a/drm/libmediadrm/CryptoHalHidl.cpp
+++ b/drm/libmediadrm/CryptoHalHidl.cpp
@@ -386,7 +386,7 @@
ALOGE_IF(!hResult.isOk(), "notifyResolution txn failed %s", hResult.description().c_str());
}
-status_t CryptoHalHidl::setMediaDrmSession(const Vector<uint8_t>& sessionId) {
+DrmStatus CryptoHalHidl::setMediaDrmSession(const Vector<uint8_t>& sessionId) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
diff --git a/drm/libmediadrm/DrmHal.cpp b/drm/libmediadrm/DrmHal.cpp
index c394d5a..754f066 100644
--- a/drm/libmediadrm/DrmHal.cpp
+++ b/drm/libmediadrm/DrmHal.cpp
@@ -20,6 +20,7 @@
#include <mediadrm/DrmHal.h>
#include <mediadrm/DrmHalAidl.h>
#include <mediadrm/DrmHalHidl.h>
+#include <mediadrm/DrmStatus.h>
#include <mediadrm/DrmUtils.h>
namespace android {
@@ -31,49 +32,49 @@
DrmHal::~DrmHal() {}
-status_t DrmHal::initCheck() const {
- if (mDrmHalAidl->initCheck() == OK || mDrmHalHidl->initCheck() == OK) return OK;
- if (mDrmHalAidl->initCheck() == NO_INIT || mDrmHalHidl->initCheck() == NO_INIT) return NO_INIT;
+DrmStatus DrmHal::initCheck() const {
+ if (mDrmHalAidl->initCheck() == OK || mDrmHalHidl->initCheck() == OK) return DrmStatus(OK);
+ if (mDrmHalAidl->initCheck() == NO_INIT || mDrmHalHidl->initCheck() == NO_INIT)
+ return DrmStatus(NO_INIT);
return mDrmHalHidl->initCheck();
}
-status_t DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
- DrmPlugin::SecurityLevel securityLevel, bool* result) {
- status_t statusResult;
- statusResult = mDrmHalAidl->isCryptoSchemeSupported(uuid, mimeType, securityLevel, result);
+DrmStatus DrmHal::isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
+ DrmPlugin::SecurityLevel securityLevel, bool* result) {
+ DrmStatus statusResult =
+ mDrmHalAidl->isCryptoSchemeSupported(uuid, mimeType, securityLevel, result);
if (*result) return statusResult;
return mDrmHalHidl->isCryptoSchemeSupported(uuid, mimeType, securityLevel, result);
}
-status_t DrmHal::createPlugin(const uint8_t uuid[16], const String8& appPackageName) {
- status_t statusResult;
- statusResult = mDrmHalAidl->createPlugin(uuid, appPackageName);
- if (statusResult != OK) return mDrmHalHidl->createPlugin(uuid, appPackageName);
- return statusResult;
+DrmStatus DrmHal::createPlugin(const uint8_t uuid[16], const String8& appPackageName) {
+ return mDrmHalAidl->createPlugin(uuid, appPackageName) == OK
+ ? DrmStatus(OK)
+ : mDrmHalHidl->createPlugin(uuid, appPackageName);
}
-status_t DrmHal::destroyPlugin() {
- status_t statusResult = mDrmHalAidl->destroyPlugin();
- status_t statusResultHidl = mDrmHalHidl->destroyPlugin();
+DrmStatus DrmHal::destroyPlugin() {
+ DrmStatus statusResult = mDrmHalAidl->destroyPlugin();
+ DrmStatus statusResultHidl = mDrmHalHidl->destroyPlugin();
if (statusResult != OK) return statusResult;
return statusResultHidl;
}
-status_t DrmHal::openSession(DrmPlugin::SecurityLevel securityLevel, Vector<uint8_t>& sessionId) {
+DrmStatus DrmHal::openSession(DrmPlugin::SecurityLevel securityLevel, Vector<uint8_t>& sessionId) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->openSession(securityLevel, sessionId);
return mDrmHalHidl->openSession(securityLevel, sessionId);
}
-status_t DrmHal::closeSession(Vector<uint8_t> const& sessionId) {
+DrmStatus DrmHal::closeSession(Vector<uint8_t> const& sessionId) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->closeSession(sessionId);
return mDrmHalHidl->closeSession(sessionId);
}
-status_t DrmHal::getKeyRequest(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& initData,
- String8 const& mimeType, DrmPlugin::KeyType keyType,
- KeyedVector<String8, String8> const& optionalParameters,
- Vector<uint8_t>& request, String8& defaultUrl,
- DrmPlugin::KeyRequestType* keyRequestType) {
+DrmStatus DrmHal::getKeyRequest(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& initData,
+ String8 const& mimeType, DrmPlugin::KeyType keyType,
+ KeyedVector<String8, String8> const& optionalParameters,
+ Vector<uint8_t>& request, String8& defaultUrl,
+ DrmPlugin::KeyRequestType* keyRequestType) {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->getKeyRequest(sessionId, initData, mimeType, keyType,
optionalParameters, request, defaultUrl, keyRequestType);
@@ -81,212 +82,212 @@
request, defaultUrl, keyRequestType);
}
-status_t DrmHal::provideKeyResponse(Vector<uint8_t> const& sessionId,
- Vector<uint8_t> const& response, Vector<uint8_t>& keySetId) {
+DrmStatus DrmHal::provideKeyResponse(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& response, Vector<uint8_t>& keySetId) {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->provideKeyResponse(sessionId, response, keySetId);
return mDrmHalHidl->provideKeyResponse(sessionId, response, keySetId);
}
-status_t DrmHal::removeKeys(Vector<uint8_t> const& keySetId) {
+DrmStatus DrmHal::removeKeys(Vector<uint8_t> const& keySetId) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->removeKeys(keySetId);
return mDrmHalHidl->removeKeys(keySetId);
}
-status_t DrmHal::restoreKeys(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keySetId) {
+DrmStatus DrmHal::restoreKeys(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keySetId) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->restoreKeys(sessionId, keySetId);
return mDrmHalHidl->restoreKeys(sessionId, keySetId);
}
-status_t DrmHal::queryKeyStatus(Vector<uint8_t> const& sessionId,
- KeyedVector<String8, String8>& infoMap) const {
+DrmStatus DrmHal::queryKeyStatus(Vector<uint8_t> const& sessionId,
+ KeyedVector<String8, String8>& infoMap) const {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->queryKeyStatus(sessionId, infoMap);
return mDrmHalHidl->queryKeyStatus(sessionId, infoMap);
}
-status_t DrmHal::getProvisionRequest(String8 const& certType, String8 const& certAuthority,
- Vector<uint8_t>& request, String8& defaultUrl) {
+DrmStatus DrmHal::getProvisionRequest(String8 const& certType, String8 const& certAuthority,
+ Vector<uint8_t>& request, String8& defaultUrl) {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->getProvisionRequest(certType, certAuthority, request, defaultUrl);
return mDrmHalHidl->getProvisionRequest(certType, certAuthority, request, defaultUrl);
}
-status_t DrmHal::provideProvisionResponse(Vector<uint8_t> const& response,
- Vector<uint8_t>& certificate,
- Vector<uint8_t>& wrappedKey) {
+DrmStatus DrmHal::provideProvisionResponse(Vector<uint8_t> const& response,
+ Vector<uint8_t>& certificate,
+ Vector<uint8_t>& wrappedKey) {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->provideProvisionResponse(response, certificate, wrappedKey);
return mDrmHalHidl->provideProvisionResponse(response, certificate, wrappedKey);
}
-status_t DrmHal::getSecureStops(List<Vector<uint8_t>>& secureStops) {
+DrmStatus DrmHal::getSecureStops(List<Vector<uint8_t>>& secureStops) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getSecureStops(secureStops);
return mDrmHalHidl->getSecureStops(secureStops);
}
-status_t DrmHal::getSecureStopIds(List<Vector<uint8_t>>& secureStopIds) {
+DrmStatus DrmHal::getSecureStopIds(List<Vector<uint8_t>>& secureStopIds) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getSecureStopIds(secureStopIds);
return mDrmHalHidl->getSecureStopIds(secureStopIds);
}
-status_t DrmHal::getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop) {
+DrmStatus DrmHal::getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getSecureStop(ssid, secureStop);
return mDrmHalHidl->getSecureStop(ssid, secureStop);
}
-status_t DrmHal::releaseSecureStops(Vector<uint8_t> const& ssRelease) {
+DrmStatus DrmHal::releaseSecureStops(Vector<uint8_t> const& ssRelease) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->releaseSecureStops(ssRelease);
return mDrmHalHidl->releaseSecureStops(ssRelease);
}
-status_t DrmHal::removeSecureStop(Vector<uint8_t> const& ssid) {
+DrmStatus DrmHal::removeSecureStop(Vector<uint8_t> const& ssid) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->removeSecureStop(ssid);
return mDrmHalHidl->removeSecureStop(ssid);
}
-status_t DrmHal::removeAllSecureStops() {
+DrmStatus DrmHal::removeAllSecureStops() {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->removeAllSecureStops();
return mDrmHalHidl->removeAllSecureStops();
}
-status_t DrmHal::getHdcpLevels(DrmPlugin::HdcpLevel* connectedLevel,
- DrmPlugin::HdcpLevel* maxLevel) const {
+DrmStatus DrmHal::getHdcpLevels(DrmPlugin::HdcpLevel* connectedLevel,
+ DrmPlugin::HdcpLevel* maxLevel) const {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getHdcpLevels(connectedLevel, maxLevel);
return mDrmHalHidl->getHdcpLevels(connectedLevel, maxLevel);
}
-status_t DrmHal::getNumberOfSessions(uint32_t* currentSessions, uint32_t* maxSessions) const {
+DrmStatus DrmHal::getNumberOfSessions(uint32_t* currentSessions, uint32_t* maxSessions) const {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->getNumberOfSessions(currentSessions, maxSessions);
return mDrmHalHidl->getNumberOfSessions(currentSessions, maxSessions);
}
-status_t DrmHal::getSecurityLevel(Vector<uint8_t> const& sessionId,
- DrmPlugin::SecurityLevel* level) const {
+DrmStatus DrmHal::getSecurityLevel(Vector<uint8_t> const& sessionId,
+ DrmPlugin::SecurityLevel* level) const {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getSecurityLevel(sessionId, level);
return mDrmHalHidl->getSecurityLevel(sessionId, level);
}
-status_t DrmHal::getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const {
+DrmStatus DrmHal::getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getOfflineLicenseKeySetIds(keySetIds);
return mDrmHalHidl->getOfflineLicenseKeySetIds(keySetIds);
}
-status_t DrmHal::removeOfflineLicense(Vector<uint8_t> const& keySetId) {
+DrmStatus DrmHal::removeOfflineLicense(Vector<uint8_t> const& keySetId) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->removeOfflineLicense(keySetId);
return mDrmHalHidl->removeOfflineLicense(keySetId);
}
-status_t DrmHal::getOfflineLicenseState(Vector<uint8_t> const& keySetId,
- DrmPlugin::OfflineLicenseState* licenseState) const {
+DrmStatus DrmHal::getOfflineLicenseState(Vector<uint8_t> const& keySetId,
+ DrmPlugin::OfflineLicenseState* licenseState) const {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->getOfflineLicenseState(keySetId, licenseState);
return mDrmHalHidl->getOfflineLicenseState(keySetId, licenseState);
}
-status_t DrmHal::getPropertyString(String8 const& name, String8& value) const {
+DrmStatus DrmHal::getPropertyString(String8 const& name, String8& value) const {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getPropertyString(name, value);
return mDrmHalHidl->getPropertyString(name, value);
}
-status_t DrmHal::getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const {
+DrmStatus DrmHal::getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getPropertyByteArray(name, value);
return mDrmHalHidl->getPropertyByteArray(name, value);
}
-status_t DrmHal::setPropertyString(String8 const& name, String8 const& value) const {
+DrmStatus DrmHal::setPropertyString(String8 const& name, String8 const& value) const {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->setPropertyString(name, value);
return mDrmHalHidl->setPropertyString(name, value);
}
-status_t DrmHal::setPropertyByteArray(String8 const& name, Vector<uint8_t> const& value) const {
+DrmStatus DrmHal::setPropertyByteArray(String8 const& name, Vector<uint8_t> const& value) const {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->setPropertyByteArray(name, value);
return mDrmHalHidl->setPropertyByteArray(name, value);
}
-status_t DrmHal::getMetrics(const sp<IDrmMetricsConsumer>& consumer) {
+DrmStatus DrmHal::getMetrics(const sp<IDrmMetricsConsumer>& consumer) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getMetrics(consumer);
return mDrmHalHidl->getMetrics(consumer);
}
-status_t DrmHal::setCipherAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
+DrmStatus DrmHal::setCipherAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->setCipherAlgorithm(sessionId, algorithm);
return mDrmHalHidl->setCipherAlgorithm(sessionId, algorithm);
}
-status_t DrmHal::setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
+DrmStatus DrmHal::setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->setMacAlgorithm(sessionId, algorithm);
return mDrmHalHidl->setMacAlgorithm(sessionId, algorithm);
}
-status_t DrmHal::encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
- Vector<uint8_t>& output) {
+DrmStatus DrmHal::encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output) {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->encrypt(sessionId, keyId, input, iv, output);
return mDrmHalHidl->encrypt(sessionId, keyId, input, iv, output);
}
-status_t DrmHal::decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
- Vector<uint8_t>& output) {
+DrmStatus DrmHal::decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output) {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->decrypt(sessionId, keyId, input, iv, output);
return mDrmHalHidl->decrypt(sessionId, keyId, input, iv, output);
}
-status_t DrmHal::sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& message, Vector<uint8_t>& signature) {
+DrmStatus DrmHal::sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t>& signature) {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->sign(sessionId, keyId, message, signature);
return mDrmHalHidl->sign(sessionId, keyId, message, signature);
}
-status_t DrmHal::verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
- bool& match) {
+DrmStatus DrmHal::verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
+ bool& match) {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->verify(sessionId, keyId, message, signature, match);
return mDrmHalHidl->verify(sessionId, keyId, message, signature, match);
}
-status_t DrmHal::signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
- Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
- Vector<uint8_t>& signature) {
+DrmStatus DrmHal::signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
+ Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
+ Vector<uint8_t>& signature) {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->signRSA(sessionId, algorithm, message, wrappedKey, signature);
return mDrmHalHidl->signRSA(sessionId, algorithm, message, wrappedKey, signature);
}
-status_t DrmHal::setListener(const sp<IDrmClient>& listener) {
+DrmStatus DrmHal::setListener(const sp<IDrmClient>& listener) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->setListener(listener);
return mDrmHalHidl->setListener(listener);
}
-status_t DrmHal::requiresSecureDecoder(const char* mime, bool* required) const {
+DrmStatus DrmHal::requiresSecureDecoder(const char* mime, bool* required) const {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->requiresSecureDecoder(mime, required);
return mDrmHalHidl->requiresSecureDecoder(mime, required);
}
-status_t DrmHal::requiresSecureDecoder(const char* mime, DrmPlugin::SecurityLevel securityLevel,
- bool* required) const {
+DrmStatus DrmHal::requiresSecureDecoder(const char* mime, DrmPlugin::SecurityLevel securityLevel,
+ bool* required) const {
if (mDrmHalAidl->initCheck() == OK)
return mDrmHalAidl->requiresSecureDecoder(mime, securityLevel, required);
return mDrmHalHidl->requiresSecureDecoder(mime, securityLevel, required);
}
-status_t DrmHal::setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId) {
+DrmStatus DrmHal::setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId) {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->setPlaybackId(sessionId, playbackId);
return mDrmHalHidl->setPlaybackId(sessionId, playbackId);
}
-status_t DrmHal::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
+DrmStatus DrmHal::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
if (mDrmHalAidl->initCheck() == OK) return mDrmHalAidl->getLogMessages(logs);
return mDrmHalHidl->getLogMessages(logs);
}
-status_t DrmHal::getSupportedSchemes(std::vector<uint8_t> &schemes) const {
+DrmStatus DrmHal::getSupportedSchemes(std::vector<uint8_t>& schemes) const {
status_t statusResult;
statusResult = mDrmHalAidl->getSupportedSchemes(schemes);
if (statusResult == OK) return statusResult;
diff --git a/drm/libmediadrm/DrmHalAidl.cpp b/drm/libmediadrm/DrmHalAidl.cpp
index bdd83e9..c369529 100644
--- a/drm/libmediadrm/DrmHalAidl.cpp
+++ b/drm/libmediadrm/DrmHalAidl.cpp
@@ -29,9 +29,9 @@
#include <media/stagefright/foundation/hexdump.h>
#include <mediadrm/DrmHalAidl.h>
#include <mediadrm/DrmSessionManager.h>
+#include <mediadrm/DrmStatus.h>
#include <mediadrm/DrmUtils.h>
-using ::android::DrmUtils::statusAidlToStatusT;
using ::aidl::android::hardware::drm::CryptoSchemes;
using ::aidl::android::hardware::drm::DrmMetricNamedValue;
using ::aidl::android::hardware::drm::DrmMetricValue;
@@ -55,6 +55,7 @@
using ::aidl::android::hardware::drm::Status;
using ::aidl::android::hardware::drm::SupportedContentType;
using ::aidl::android::hardware::drm::Uuid;
+using ::android::DrmUtils::statusAidlToDrmStatus;
using DrmMetricGroupAidl = ::aidl::android::hardware::drm::DrmMetricGroup;
using DrmMetricGroupHidl = ::android::hardware::drm::V1_1::DrmMetricGroup;
using DrmMetricAidl = ::aidl::android::hardware::drm::DrmMetric;
@@ -396,19 +397,19 @@
mFactories(DrmUtils::makeDrmFactoriesAidl()),
mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT) {}
-status_t DrmHalAidl::initCheck() const {
- return mInitCheck;
+DrmStatus DrmHalAidl::initCheck() const {
+ return DrmStatus(mInitCheck);
}
DrmHalAidl::~DrmHalAidl() {}
-status_t DrmHalAidl::setListener(const sp<IDrmClient>& listener) {
+DrmStatus DrmHalAidl::setListener(const sp<IDrmClient>& listener) {
mListener->setListener(listener);
- return NO_ERROR;
+ return DrmStatus(NO_ERROR);
}
-status_t DrmHalAidl::isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
- DrmPlugin::SecurityLevel level, bool* isSupported) {
+DrmStatus DrmHalAidl::isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
+ DrmPlugin::SecurityLevel level, bool* isSupported) {
Mutex::Autolock autoLock(mLock);
*isSupported = false;
Uuid uuidAidl = DrmUtils::toAidlUuid(uuid);
@@ -438,9 +439,9 @@
// isCryptoSchemeSupported(uuid, mimeType)
*isSupported = contentTypes.count(mimeTypeStr);
}
- return OK;
+ return DrmStatus(OK);
} else if (mimeType == "") {
- return BAD_VALUE;
+ return DrmStatus(BAD_VALUE);
}
auto ct = contentTypes[mimeTypeStr];
@@ -452,10 +453,10 @@
break;
}
- return OK;
+ return DrmStatus(OK);
}
-status_t DrmHalAidl::createPlugin(const uint8_t uuid[16], const String8& appPackageName) {
+DrmStatus DrmHalAidl::createPlugin(const uint8_t uuid[16], const String8& appPackageName) {
Mutex::Autolock autoLock(mLock);
Uuid uuidAidl = DrmUtils::toAidlUuid(uuid);
@@ -497,10 +498,10 @@
}
}
- return mInitCheck;
+ return DrmStatus(mInitCheck);
}
-status_t DrmHalAidl::openSession(DrmPlugin::SecurityLevel level, Vector<uint8_t>& sessionId) {
+DrmStatus DrmHalAidl::openSession(DrmPlugin::SecurityLevel level, Vector<uint8_t>& sessionId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -510,14 +511,14 @@
return ERROR_DRM_CANNOT_HANDLE;
}
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
bool retry = true;
do {
std::vector<uint8_t> aSessionId;
::ndk::ScopedAStatus status = mPlugin->openSession(aSecurityLevel, &aSessionId);
if (status.isOk()) sessionId = toVector(aSessionId);
- err = statusAidlToStatusT(status);
+ err = statusAidlToDrmStatus(status);
if (err == ERROR_DRM_RESOURCE_BUSY && retry) {
mLock.unlock();
@@ -545,13 +546,13 @@
return err;
}
-status_t DrmHalAidl::closeSession(Vector<uint8_t> const& sessionId) {
+DrmStatus DrmHalAidl::closeSession(Vector<uint8_t> const& sessionId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
std::vector<uint8_t> sessionIdAidl = toStdVec(sessionId);
::ndk::ScopedAStatus status = mPlugin->closeSession(sessionIdAidl);
- status_t response = statusAidlToStatusT(status);
+ DrmStatus response = statusAidlToDrmStatus(status);
if (status.isOk()) {
DrmSessionManager::Instance()->removeSession(sessionId);
for (auto i = mOpenSessions.begin(); i != mOpenSessions.end(); i++) {
@@ -568,12 +569,12 @@
return response;
}
-status_t DrmHalAidl::getKeyRequest(Vector<uint8_t> const& sessionId,
- Vector<uint8_t> const& initData, String8 const& mimeType,
- DrmPlugin::KeyType keyType,
- KeyedVector<String8, String8> const& optionalParameters,
- Vector<uint8_t>& request, String8& defaultUrl,
- DrmPlugin::KeyRequestType* keyRequestType) {
+DrmStatus DrmHalAidl::getKeyRequest(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& initData, String8 const& mimeType,
+ DrmPlugin::KeyType keyType,
+ KeyedVector<String8, String8> const& optionalParameters,
+ Vector<uint8_t>& request, String8& defaultUrl,
+ DrmPlugin::KeyRequestType* keyRequestType) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
EventTimer<status_t> keyRequestTimer(&mMetrics.mGetKeyRequestTimeUs);
@@ -592,7 +593,7 @@
return BAD_VALUE;
}
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
std::vector<uint8_t> sessionIdAidl = toStdVec(sessionId);
std::vector<uint8_t> initDataAidl = toStdVec(initData);
@@ -607,21 +608,21 @@
*keyRequestType = toKeyRequestType(keyRequest.requestType);
}
- err = statusAidlToStatusT(status);
+ err = statusAidlToDrmStatus(status);
keyRequestTimer.SetAttribute(err);
return err;
}
-status_t DrmHalAidl::provideKeyResponse(Vector<uint8_t> const& sessionId,
- Vector<uint8_t> const& response,
- Vector<uint8_t>& keySetId) {
+DrmStatus DrmHalAidl::provideKeyResponse(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& response,
+ Vector<uint8_t>& keySetId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
EventTimer<status_t> keyResponseTimer(&mMetrics.mProvideKeyResponseTimeUs);
DrmSessionManager::Instance()->useSession(sessionId);
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
std::vector<uint8_t> sessionIdAidl = toStdVec(sessionId);
std::vector<uint8_t> responseAidl = toStdVec(response);
@@ -630,21 +631,21 @@
mPlugin->provideKeyResponse(sessionIdAidl, responseAidl, &keySetIdsAidl);
if (status.isOk()) keySetId = toVector(keySetIdsAidl.keySetId);
- err = statusAidlToStatusT(status);
+ err = statusAidlToDrmStatus(status);
keyResponseTimer.SetAttribute(err);
return err;
}
-status_t DrmHalAidl::removeKeys(Vector<uint8_t> const& keySetId) {
+DrmStatus DrmHalAidl::removeKeys(Vector<uint8_t> const& keySetId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
::ndk::ScopedAStatus status = mPlugin->removeKeys(toStdVec(keySetId));
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::restoreKeys(Vector<uint8_t> const& sessionId,
- Vector<uint8_t> const& keySetId) {
+DrmStatus DrmHalAidl::restoreKeys(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& keySetId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -653,11 +654,11 @@
KeySetId keySetIdsAidl;
keySetIdsAidl.keySetId = toStdVec(keySetId);
::ndk::ScopedAStatus status = mPlugin->restoreKeys(toStdVec(sessionId), keySetIdsAidl);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::queryKeyStatus(Vector<uint8_t> const& sessionId,
- KeyedVector<String8, String8>& infoMap) const {
+DrmStatus DrmHalAidl::queryKeyStatus(Vector<uint8_t> const& sessionId,
+ KeyedVector<String8, String8>& infoMap) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -668,15 +669,15 @@
infoMap = toKeyedVector(infoMapAidl);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getProvisionRequest(String8 const& certType, String8 const& certAuthority,
- Vector<uint8_t>& request, String8& defaultUrl) {
+DrmStatus DrmHalAidl::getProvisionRequest(String8 const& certType, String8 const& certAuthority,
+ Vector<uint8_t>& request, String8& defaultUrl) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
ProvisionRequest requestAidl;
::ndk::ScopedAStatus status = mPlugin->getProvisionRequest(
@@ -685,29 +686,29 @@
request = toVector(requestAidl.request);
defaultUrl = toString8(requestAidl.defaultUrl);
- err = statusAidlToStatusT(status);
+ err = statusAidlToDrmStatus(status);
mMetrics.mGetProvisionRequestCounter.Increment(err);
return err;
}
-status_t DrmHalAidl::provideProvisionResponse(Vector<uint8_t> const& response,
- Vector<uint8_t>& certificate,
- Vector<uint8_t>& wrappedKey) {
+DrmStatus DrmHalAidl::provideProvisionResponse(Vector<uint8_t> const& response,
+ Vector<uint8_t>& certificate,
+ Vector<uint8_t>& wrappedKey) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
ProvideProvisionResponseResult result;
::ndk::ScopedAStatus status = mPlugin->provideProvisionResponse(toStdVec(response), &result);
certificate = toVector(result.certificate);
wrappedKey = toVector(result.wrappedKey);
- err = statusAidlToStatusT(status);
+ err = statusAidlToDrmStatus(status);
mMetrics.mProvideProvisionResponseCounter.Increment(err);
return err;
}
-status_t DrmHalAidl::getSecureStops(List<Vector<uint8_t>>& secureStops) {
+DrmStatus DrmHalAidl::getSecureStops(List<Vector<uint8_t>>& secureStops) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -716,10 +717,10 @@
secureStops = toSecureStops(result);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getSecureStopIds(List<Vector<uint8_t>>& secureStopIds) {
+DrmStatus DrmHalAidl::getSecureStopIds(List<Vector<uint8_t>>& secureStopIds) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -728,10 +729,10 @@
secureStopIds = toSecureStopIds(result);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop) {
+DrmStatus DrmHalAidl::getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -743,10 +744,10 @@
secureStop = toVector(result.opaqueData);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::releaseSecureStops(Vector<uint8_t> const& ssRelease) {
+DrmStatus DrmHalAidl::releaseSecureStops(Vector<uint8_t> const& ssRelease) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -754,10 +755,10 @@
ssId.opaqueData = toStdVec(ssRelease);
::ndk::ScopedAStatus status = mPlugin->releaseSecureStops(ssId);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::removeSecureStop(Vector<uint8_t> const& ssid) {
+DrmStatus DrmHalAidl::removeSecureStop(Vector<uint8_t> const& ssid) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -765,19 +766,19 @@
SecureStopId ssidAidl;
ssidAidl.secureStopId = toStdVec(ssid);
::ndk::ScopedAStatus status = mPlugin->removeSecureStop(ssidAidl);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::removeAllSecureStops() {
+DrmStatus DrmHalAidl::removeAllSecureStops() {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
::ndk::ScopedAStatus status = mPlugin->releaseAllSecureStops();
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getHdcpLevels(DrmPlugin::HdcpLevel* connected,
- DrmPlugin::HdcpLevel* max) const {
+DrmStatus DrmHalAidl::getHdcpLevels(DrmPlugin::HdcpLevel* connected,
+ DrmPlugin::HdcpLevel* max) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -794,10 +795,10 @@
*connected = toHdcpLevel(lvlsAidl.connectedLevel);
*max = toHdcpLevel(lvlsAidl.maxLevel);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getNumberOfSessions(uint32_t* open, uint32_t* max) const {
+DrmStatus DrmHalAidl::getNumberOfSessions(uint32_t* open, uint32_t* max) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -814,11 +815,11 @@
*open = result.currentSessions;
*max = result.maxSessions;
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getSecurityLevel(Vector<uint8_t> const& sessionId,
- DrmPlugin::SecurityLevel* level) const {
+DrmStatus DrmHalAidl::getSecurityLevel(Vector<uint8_t> const& sessionId,
+ DrmPlugin::SecurityLevel* level) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -833,10 +834,10 @@
*level = toSecurityLevel(result);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const {
+DrmStatus DrmHalAidl::getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -845,21 +846,21 @@
keySetIds = toKeySetIds(result);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::removeOfflineLicense(Vector<uint8_t> const& keySetId) {
+DrmStatus DrmHalAidl::removeOfflineLicense(Vector<uint8_t> const& keySetId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
KeySetId keySetIdAidl;
keySetIdAidl.keySetId = toStdVec(keySetId);
::ndk::ScopedAStatus status = mPlugin->removeOfflineLicense(keySetIdAidl);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getOfflineLicenseState(Vector<uint8_t> const& keySetId,
- DrmPlugin::OfflineLicenseState* licenseState) const {
+DrmStatus DrmHalAidl::getOfflineLicenseState(Vector<uint8_t> const& keySetId,
+ DrmPlugin::OfflineLicenseState* licenseState) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -873,15 +874,15 @@
*licenseState = toOfflineLicenseState(result);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getPropertyString(String8 const& name, String8& value) const {
+DrmStatus DrmHalAidl::getPropertyString(String8 const& name, String8& value) const {
Mutex::Autolock autoLock(mLock);
- return getPropertyStringInternal(name, value);
+ return DrmStatus(getPropertyStringInternal(name, value));
}
-status_t DrmHalAidl::getPropertyStringInternal(String8 const& name, String8& value) const {
+DrmStatus DrmHalAidl::getPropertyStringInternal(String8 const& name, String8& value) const {
// This function is internal to the class and should only be called while
// mLock is already held.
INIT_CHECK();
@@ -891,52 +892,53 @@
value = toString8(result);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const {
+DrmStatus DrmHalAidl::getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const {
Mutex::Autolock autoLock(mLock);
- return getPropertyByteArrayInternal(name, value);
+ return DrmStatus(getPropertyByteArrayInternal(name, value));
}
-status_t DrmHalAidl::getPropertyByteArrayInternal(String8 const& name,
- Vector<uint8_t>& value) const {
+DrmStatus DrmHalAidl::getPropertyByteArrayInternal(String8 const& name,
+ Vector<uint8_t>& value) const {
// This function is internal to the class and should only be called while
// mLock is already held.
INIT_CHECK();
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
std::vector<uint8_t> result;
::ndk::ScopedAStatus status = mPlugin->getPropertyByteArray(toStdString(name), &result);
value = toVector(result);
- err = statusAidlToStatusT(status);
+ err = statusAidlToDrmStatus(status);
if (name == kPropertyDeviceUniqueId) {
mMetrics.mGetDeviceUniqueIdCounter.Increment(err);
}
return err;
}
-status_t DrmHalAidl::setPropertyString(String8 const& name, String8 const& value) const {
+DrmStatus DrmHalAidl::setPropertyString(String8 const& name, String8 const& value) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
::ndk::ScopedAStatus status = mPlugin->setPropertyString(toStdString(name), toStdString(value));
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::setPropertyByteArray(String8 const& name, Vector<uint8_t> const& value) const {
+DrmStatus DrmHalAidl::setPropertyByteArray(String8 const& name,
+ Vector<uint8_t> const& value) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
::ndk::ScopedAStatus status = mPlugin->setPropertyByteArray(toStdString(name), toStdVec(value));
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getMetrics(const sp<IDrmMetricsConsumer>& consumer) {
+DrmStatus DrmHalAidl::getMetrics(const sp<IDrmMetricsConsumer>& consumer) {
if (consumer == nullptr) {
- return UNEXPECTED_NULL;
+ return DrmStatus(UNEXPECTED_NULL);
}
consumer->consumeFrameworkMetrics(mMetrics);
@@ -957,7 +959,7 @@
vendor += description;
hidl_vec<DrmMetricGroupHidl> pluginMetrics;
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
std::vector<DrmMetricGroupAidl> result;
::ndk::ScopedAStatus status = mPlugin->getMetrics(&result);
@@ -967,13 +969,13 @@
consumer->consumeHidlMetrics(vendor, pluginMetrics);
}
- err = statusAidlToStatusT(status);
+ err = statusAidlToDrmStatus(status);
return err;
}
-status_t DrmHalAidl::setCipherAlgorithm(Vector<uint8_t> const& sessionId,
- String8 const& algorithm) {
+DrmStatus DrmHalAidl::setCipherAlgorithm(Vector<uint8_t> const& sessionId,
+ String8 const& algorithm) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -981,10 +983,10 @@
::ndk::ScopedAStatus status =
mPlugin->setCipherAlgorithm(toStdVec(sessionId), toStdString(algorithm));
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
+DrmStatus DrmHalAidl::setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -992,12 +994,12 @@
::ndk::ScopedAStatus status =
mPlugin->setMacAlgorithm(toStdVec(sessionId), toStdString(algorithm));
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
- Vector<uint8_t>& output) {
+DrmStatus DrmHalAidl::encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -1009,12 +1011,12 @@
output = toVector(result);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
- Vector<uint8_t>& output) {
+DrmStatus DrmHalAidl::decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -1026,11 +1028,11 @@
output = toVector(result);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& message, Vector<uint8_t>& signature) {
+DrmStatus DrmHalAidl::sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t>& signature) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -1042,12 +1044,12 @@
signature = toVector(result);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
- bool& match) {
+DrmStatus DrmHalAidl::verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
+ bool& match) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -1056,12 +1058,12 @@
::ndk::ScopedAStatus status = mPlugin->verify(toStdVec(sessionId), toStdVec(keyId),
toStdVec(message), toStdVec(signature), &match);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
- Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
- Vector<uint8_t>& signature) {
+DrmStatus DrmHalAidl::signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
+ Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
+ Vector<uint8_t>& signature) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -1074,10 +1076,10 @@
signature = toVector(result);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::requiresSecureDecoder(const char* mime, bool* required) const {
+DrmStatus DrmHalAidl::requiresSecureDecoder(const char* mime, bool* required) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -1086,14 +1088,15 @@
mPlugin->requiresSecureDecoder(mimeAidl, SecurityLevel::DEFAULT, required);
if (!status.isOk()) {
DrmUtils::LOG2BE("requiresSecureDecoder txn failed: %d", status.getServiceSpecificError());
- return DEAD_OBJECT;
+ return DrmStatus(DEAD_OBJECT);
}
- return OK;
+ return DrmStatus(OK);
}
-status_t DrmHalAidl::requiresSecureDecoder(const char* mime, DrmPlugin::SecurityLevel securityLevel,
- bool* required) const {
+DrmStatus DrmHalAidl::requiresSecureDecoder(const char* mime,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool* required) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -1101,7 +1104,7 @@
std::string mimeAidl(mime);
::ndk::ScopedAStatus status = mPlugin->requiresSecureDecoder(mimeAidl, aLevel, required);
- status_t err = statusAidlToStatusT(status);
+ DrmStatus err = statusAidlToDrmStatus(status);
if (!status.isOk()) {
DrmUtils::LOG2BE("requiresSecureDecoder txn failed: %d", status.getServiceSpecificError());
}
@@ -1109,17 +1112,17 @@
return err;
}
-status_t DrmHalAidl::setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId) {
+DrmStatus DrmHalAidl::setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
std::string playbackIdAidl(playbackId);
::ndk::ScopedAStatus status = mPlugin->setPlaybackId(toStdVec(sessionId), playbackIdAidl);
- return statusAidlToStatusT(status);
+ return statusAidlToDrmStatus(status);
}
-status_t DrmHalAidl::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
+DrmStatus DrmHalAidl::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
Mutex::Autolock autoLock(mLock);
- return DrmUtils::GetLogMessagesAidl<IDrmPluginAidl>(mPlugin, logs);
+ return DrmStatus(DrmUtils::GetLogMessagesAidl<IDrmPluginAidl>(mPlugin, logs));
}
void DrmHalAidl::closeOpenSessions() {
@@ -1189,7 +1192,7 @@
return serializedMetrics;
}
-status_t DrmHalAidl::getSupportedSchemes(std::vector<uint8_t> &schemes) const {
+DrmStatus DrmHalAidl::getSupportedSchemes(std::vector<uint8_t>& schemes) const {
Mutex::Autolock autoLock(mLock);
if (mFactories.empty()) return UNKNOWN_ERROR;
@@ -1205,7 +1208,7 @@
}
}
- return OK;
+ return DrmStatus(OK);
}
void DrmHalAidl::cleanup() {
@@ -1225,9 +1228,9 @@
mPlugin.reset();
}
-status_t DrmHalAidl::destroyPlugin() {
+DrmStatus DrmHalAidl::destroyPlugin() {
cleanup();
- return OK;
+ return DrmStatus(OK);
}
::ndk::ScopedAStatus DrmHalAidl::onEvent(EventTypeAidl eventTypeAidl,
diff --git a/drm/libmediadrm/DrmHalHidl.cpp b/drm/libmediadrm/DrmHalHidl.cpp
index c38dbef..7d045ac 100644
--- a/drm/libmediadrm/DrmHalHidl.cpp
+++ b/drm/libmediadrm/DrmHalHidl.cpp
@@ -35,6 +35,7 @@
#include <mediadrm/DrmHalHidl.h>
#include <mediadrm/DrmSessionClientInterface.h>
#include <mediadrm/DrmSessionManager.h>
+#include <mediadrm/DrmStatus.h>
#include <mediadrm/DrmUtils.h>
#include <mediadrm/IDrmMetricsConsumer.h>
#include <utils/Log.h>
@@ -368,14 +369,14 @@
return plugin;
}
-status_t DrmHalHidl::initCheck() const {
- return mInitCheck;
+DrmStatus DrmHalHidl::initCheck() const {
+ return DrmStatus(mInitCheck);
}
-status_t DrmHalHidl::setListener(const sp<IDrmClient>& listener) {
+DrmStatus DrmHalHidl::setListener(const sp<IDrmClient>& listener) {
Mutex::Autolock lock(mEventLock);
mListener = listener;
- return NO_ERROR;
+ return DrmStatus(NO_ERROR);
}
Return<void> DrmHalHidl::sendEvent(EventType hEventType, const hidl_vec<uint8_t>& sessionId,
@@ -502,10 +503,10 @@
return Void();
}
-status_t DrmHalHidl::matchMimeTypeAndSecurityLevel(const sp<IDrmFactory>& factory,
- const uint8_t uuid[16], const String8& mimeType,
- DrmPlugin::SecurityLevel level,
- bool* isSupported) {
+DrmStatus DrmHalHidl::matchMimeTypeAndSecurityLevel(const sp<IDrmFactory>& factory,
+ const uint8_t uuid[16], const String8& mimeType,
+ DrmPlugin::SecurityLevel level,
+ bool* isSupported) {
*isSupported = false;
// handle default value cases
@@ -517,23 +518,23 @@
// isCryptoSchemeSupported(uuid, mimeType)
*isSupported = factory->isContentTypeSupported(mimeType.string());
}
- return OK;
+ return DrmStatus(OK);
} else if (mimeType == "") {
- return BAD_VALUE;
+ return DrmStatus(BAD_VALUE);
}
sp<drm::V1_2::IDrmFactory> factoryV1_2 = drm::V1_2::IDrmFactory::castFrom(factory);
if (factoryV1_2 == NULL) {
- return ERROR_UNSUPPORTED;
+ return DrmStatus(ERROR_UNSUPPORTED);
} else {
*isSupported = factoryV1_2->isCryptoSchemeSupported_1_2(uuid, mimeType.string(),
toHidlSecurityLevel(level));
- return OK;
+ return DrmStatus(OK);
}
}
-status_t DrmHalHidl::isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
- DrmPlugin::SecurityLevel level, bool* isSupported) {
+DrmStatus DrmHalHidl::isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
+ DrmPlugin::SecurityLevel level, bool* isSupported) {
Mutex::Autolock autoLock(mLock);
*isSupported = false;
for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
@@ -541,10 +542,10 @@
return matchMimeTypeAndSecurityLevel(mFactories[i], uuid, mimeType, level, isSupported);
}
}
- return OK;
+ return DrmStatus(OK);
}
-status_t DrmHalHidl::createPlugin(const uint8_t uuid[16], const String8& appPackageName) {
+DrmStatus DrmHalHidl::createPlugin(const uint8_t uuid[16], const String8& appPackageName) {
Mutex::Autolock autoLock(mLock);
for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
@@ -581,15 +582,15 @@
}
}
- return mInitCheck;
+ return DrmStatus(mInitCheck);
}
-status_t DrmHalHidl::destroyPlugin() {
+DrmStatus DrmHalHidl::destroyPlugin() {
cleanup();
- return OK;
+ return DrmStatus(OK);
}
-status_t DrmHalHidl::openSession(DrmPlugin::SecurityLevel level, Vector<uint8_t>& sessionId) {
+DrmStatus DrmHalHidl::openSession(DrmPlugin::SecurityLevel level, Vector<uint8_t>& sessionId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -604,7 +605,7 @@
}
}
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
bool retry = true;
do {
hidl_vec<uint8_t> hSessionId;
@@ -657,7 +658,7 @@
return err;
}
-status_t DrmHalHidl::closeSession(Vector<uint8_t> const& sessionId) {
+DrmStatus DrmHalHidl::closeSession(Vector<uint8_t> const& sessionId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -672,13 +673,13 @@
}
}
}
- status_t response = toStatusT(status);
+ DrmStatus response = toStatusT(status);
mMetrics.SetSessionEnd(sessionId);
mMetrics.mCloseSessionCounter.Increment(response);
return response;
}
mMetrics.mCloseSessionCounter.Increment(DEAD_OBJECT);
- return DEAD_OBJECT;
+ return DrmStatus(DEAD_OBJECT);
}
static DrmPlugin::KeyRequestType toKeyRequestType(KeyRequestType keyRequestType) {
@@ -712,12 +713,12 @@
}
}
-status_t DrmHalHidl::getKeyRequest(Vector<uint8_t> const& sessionId,
- Vector<uint8_t> const& initData, String8 const& mimeType,
- DrmPlugin::KeyType keyType,
- KeyedVector<String8, String8> const& optionalParameters,
- Vector<uint8_t>& request, String8& defaultUrl,
- DrmPlugin::KeyRequestType* keyRequestType) {
+DrmStatus DrmHalHidl::getKeyRequest(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& initData, String8 const& mimeType,
+ DrmPlugin::KeyType keyType,
+ KeyedVector<String8, String8> const& optionalParameters,
+ Vector<uint8_t>& request, String8& defaultUrl,
+ DrmPlugin::KeyRequestType* keyRequestType) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
EventTimer<status_t> keyRequestTimer(&mMetrics.mGetKeyRequestTimeUs);
@@ -738,7 +739,7 @@
::KeyedVector hOptionalParameters = toHidlKeyedVector(optionalParameters);
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult;
if (mPluginV1_2 != NULL) {
@@ -787,16 +788,16 @@
return err;
}
-status_t DrmHalHidl::provideKeyResponse(Vector<uint8_t> const& sessionId,
- Vector<uint8_t> const& response,
- Vector<uint8_t>& keySetId) {
+DrmStatus DrmHalHidl::provideKeyResponse(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& response,
+ Vector<uint8_t>& keySetId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
EventTimer<status_t> keyResponseTimer(&mMetrics.mProvideKeyResponseTimeUs);
DrmSessionManager::Instance()->useSession(sessionId);
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult =
mPlugin->provideKeyResponse(toHidlVec(sessionId), toHidlVec(response),
@@ -811,27 +812,27 @@
return err;
}
-status_t DrmHalHidl::removeKeys(Vector<uint8_t> const& keySetId) {
+DrmStatus DrmHalHidl::removeKeys(Vector<uint8_t> const& keySetId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
Return<Status> status = mPlugin->removeKeys(toHidlVec(keySetId));
- return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+ return status.isOk() ? DrmStatus(toStatusT(status)) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::restoreKeys(Vector<uint8_t> const& sessionId,
- Vector<uint8_t> const& keySetId) {
+DrmStatus DrmHalHidl::restoreKeys(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& keySetId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
Return<Status> status = mPlugin->restoreKeys(toHidlVec(sessionId), toHidlVec(keySetId));
- return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+ return status.isOk() ? DrmStatus(toStatusT(status)) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::queryKeyStatus(Vector<uint8_t> const& sessionId,
- KeyedVector<String8, String8>& infoMap) const {
+DrmStatus DrmHalHidl::queryKeyStatus(Vector<uint8_t> const& sessionId,
+ KeyedVector<String8, String8>& infoMap) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -839,7 +840,7 @@
::KeyedVector hInfoMap;
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult = mPlugin->queryKeyStatus(
toHidlVec(sessionId), [&](Status status, const hidl_vec<KeyValue>& map) {
@@ -849,15 +850,15 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? DrmStatus(err) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getProvisionRequest(String8 const& certType, String8 const& certAuthority,
- Vector<uint8_t>& request, String8& defaultUrl) {
+DrmStatus DrmHalHidl::getProvisionRequest(String8 const& certType, String8 const& certAuthority,
+ Vector<uint8_t>& request, String8& defaultUrl) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult;
if (mPluginV1_2 != NULL) {
@@ -888,13 +889,13 @@
return err;
}
-status_t DrmHalHidl::provideProvisionResponse(Vector<uint8_t> const& response,
- Vector<uint8_t>& certificate,
- Vector<uint8_t>& wrappedKey) {
+DrmStatus DrmHalHidl::provideProvisionResponse(Vector<uint8_t> const& response,
+ Vector<uint8_t>& certificate,
+ Vector<uint8_t>& wrappedKey) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult = mPlugin->provideProvisionResponse(
toHidlVec(response), [&](Status status, const hidl_vec<uint8_t>& hCertificate,
@@ -911,11 +912,11 @@
return err;
}
-status_t DrmHalHidl::getSecureStops(List<Vector<uint8_t>>& secureStops) {
+DrmStatus DrmHalHidl::getSecureStops(List<Vector<uint8_t>>& secureStops) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult =
mPlugin->getSecureStops([&](Status status, const hidl_vec<SecureStop>& hSecureStops) {
@@ -925,10 +926,10 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getSecureStopIds(List<Vector<uint8_t>>& secureStopIds) {
+DrmStatus DrmHalHidl::getSecureStopIds(List<Vector<uint8_t>>& secureStopIds) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
@@ -939,7 +940,7 @@
return ERROR_DRM_CANNOT_HANDLE;
}
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult = mPluginV1_1->getSecureStopIds(
[&](Status status, const hidl_vec<SecureStopId>& hSecureStopIds) {
@@ -949,14 +950,14 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop) {
+DrmStatus DrmHalHidl::getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult = mPlugin->getSecureStop(
toHidlVec(ssid), [&](Status status, const SecureStop& hSecureStop) {
@@ -966,10 +967,10 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::releaseSecureStops(Vector<uint8_t> const& ssRelease) {
+DrmStatus DrmHalHidl::releaseSecureStops(Vector<uint8_t> const& ssRelease) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -981,10 +982,10 @@
} else {
status = mPlugin->releaseSecureStop(toHidlVec(ssRelease));
}
- return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+ return status.isOk() ? DrmStatus(toStatusT(status)) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::removeSecureStop(Vector<uint8_t> const& ssid) {
+DrmStatus DrmHalHidl::removeSecureStop(Vector<uint8_t> const& ssid) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
@@ -996,10 +997,10 @@
}
Return<Status> status = mPluginV1_1->removeSecureStop(toHidlVec(ssid));
- return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+ return status.isOk() ? DrmStatus(toStatusT(status)) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::removeAllSecureStops() {
+DrmStatus DrmHalHidl::removeAllSecureStops() {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -1009,18 +1010,18 @@
} else {
status = mPlugin->releaseAllSecureStops();
}
- return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+ return status.isOk() ? DrmStatus(toStatusT(status)) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getHdcpLevels(DrmPlugin::HdcpLevel* connected,
- DrmPlugin::HdcpLevel* max) const {
+DrmStatus DrmHalHidl::getHdcpLevels(DrmPlugin::HdcpLevel* connected,
+ DrmPlugin::HdcpLevel* max) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
if (connected == NULL || max == NULL) {
return BAD_VALUE;
}
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
*connected = DrmPlugin::kHdcpLevelUnknown;
*max = DrmPlugin::kHdcpLevelUnknown;
@@ -1046,26 +1047,26 @@
err = toStatusT(status);
});
} else {
- return ERROR_DRM_CANNOT_HANDLE;
+ return DrmStatus(ERROR_DRM_CANNOT_HANDLE);
}
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getNumberOfSessions(uint32_t* open, uint32_t* max) const {
+DrmStatus DrmHalHidl::getNumberOfSessions(uint32_t* open, uint32_t* max) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
if (open == NULL || max == NULL) {
return BAD_VALUE;
}
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
*open = 0;
*max = 0;
if (mPluginV1_1 == NULL) {
- return ERROR_DRM_CANNOT_HANDLE;
+ return DrmStatus(ERROR_DRM_CANNOT_HANDLE);
}
Return<void> hResult =
@@ -1077,21 +1078,21 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getSecurityLevel(Vector<uint8_t> const& sessionId,
- DrmPlugin::SecurityLevel* level) const {
+DrmStatus DrmHalHidl::getSecurityLevel(Vector<uint8_t> const& sessionId,
+ DrmPlugin::SecurityLevel* level) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
if (level == NULL) {
- return BAD_VALUE;
+ return DrmStatus(BAD_VALUE);
}
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
if (mPluginV1_1 == NULL) {
- return ERROR_DRM_CANNOT_HANDLE;
+ return DrmStatus(ERROR_DRM_CANNOT_HANDLE);
}
*level = DrmPlugin::kSecurityLevelUnknown;
@@ -1104,21 +1105,21 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const {
+DrmStatus DrmHalHidl::getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
- return mInitCheck;
+ return DrmStatus(mInitCheck);
}
if (mPluginV1_2 == NULL) {
- return ERROR_UNSUPPORTED;
+ return DrmStatus(ERROR_UNSUPPORTED);
}
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult = mPluginV1_2->getOfflineLicenseKeySetIds(
[&](Status status, const hidl_vec<KeySetId>& hKeySetIds) {
@@ -1128,38 +1129,38 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::removeOfflineLicense(Vector<uint8_t> const& keySetId) {
+DrmStatus DrmHalHidl::removeOfflineLicense(Vector<uint8_t> const& keySetId) {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
- return mInitCheck;
+ return DrmStatus(mInitCheck);
}
if (mPluginV1_2 == NULL) {
- return ERROR_UNSUPPORTED;
+ return DrmStatus(ERROR_UNSUPPORTED);
}
Return<Status> status = mPluginV1_2->removeOfflineLicense(toHidlVec(keySetId));
- return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+ return status.isOk() ? DrmStatus(toStatusT(status)) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getOfflineLicenseState(Vector<uint8_t> const& keySetId,
- DrmPlugin::OfflineLicenseState* licenseState) const {
+DrmStatus DrmHalHidl::getOfflineLicenseState(Vector<uint8_t> const& keySetId,
+ DrmPlugin::OfflineLicenseState* licenseState) const {
Mutex::Autolock autoLock(mLock);
if (mInitCheck != OK) {
- return mInitCheck;
+ return DrmStatus(mInitCheck);
}
if (mPluginV1_2 == NULL) {
- return ERROR_UNSUPPORTED;
+ return DrmStatus(ERROR_UNSUPPORTED);
}
*licenseState = DrmPlugin::kOfflineLicenseStateUnknown;
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult = mPluginV1_2->getOfflineLicenseState(
toHidlVec(keySetId), [&](Status status, OfflineLicenseState hLicenseState) {
@@ -1169,20 +1170,20 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getPropertyString(String8 const& name, String8& value) const {
+DrmStatus DrmHalHidl::getPropertyString(String8 const& name, String8& value) const {
Mutex::Autolock autoLock(mLock);
return getPropertyStringInternal(name, value);
}
-status_t DrmHalHidl::getPropertyStringInternal(String8 const& name, String8& value) const {
+DrmStatus DrmHalHidl::getPropertyStringInternal(String8 const& name, String8& value) const {
// This function is internal to the class and should only be called while
// mLock is already held.
INIT_CHECK();
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult = mPlugin->getPropertyString(
toHidlString(name), [&](Status status, const hidl_string& hValue) {
@@ -1192,21 +1193,21 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const {
+DrmStatus DrmHalHidl::getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const {
Mutex::Autolock autoLock(mLock);
return getPropertyByteArrayInternal(name, value);
}
-status_t DrmHalHidl::getPropertyByteArrayInternal(String8 const& name,
- Vector<uint8_t>& value) const {
+DrmStatus DrmHalHidl::getPropertyByteArrayInternal(String8 const& name,
+ Vector<uint8_t>& value) const {
// This function is internal to the class and should only be called while
// mLock is already held.
INIT_CHECK();
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult = mPlugin->getPropertyByteArray(
toHidlString(name), [&](Status status, const hidl_vec<uint8_t>& hValue) {
@@ -1223,25 +1224,26 @@
return err;
}
-status_t DrmHalHidl::setPropertyString(String8 const& name, String8 const& value) const {
+DrmStatus DrmHalHidl::setPropertyString(String8 const& name, String8 const& value) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
Return<Status> status = mPlugin->setPropertyString(toHidlString(name), toHidlString(value));
- return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+ return status.isOk() ? DrmStatus(toStatusT(status)) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::setPropertyByteArray(String8 const& name, Vector<uint8_t> const& value) const {
+DrmStatus DrmHalHidl::setPropertyByteArray(String8 const& name,
+ Vector<uint8_t> const& value) const {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
Return<Status> status = mPlugin->setPropertyByteArray(toHidlString(name), toHidlVec(value));
- return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+ return status.isOk() ? DrmStatus(toStatusT(status)) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getMetrics(const sp<IDrmMetricsConsumer>& consumer) {
+DrmStatus DrmHalHidl::getMetrics(const sp<IDrmMetricsConsumer>& consumer) {
if (consumer == nullptr) {
- return UNEXPECTED_NULL;
+ return DrmStatus(UNEXPECTED_NULL);
}
consumer->consumeFrameworkMetrics(mMetrics);
@@ -1262,7 +1264,7 @@
vendor += description;
hidl_vec<DrmMetricGroup> pluginMetrics;
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> status =
mPluginV1_1->getMetrics([&](Status status, hidl_vec<DrmMetricGroup> pluginMetrics) {
@@ -1273,14 +1275,14 @@
}
err = toStatusT(status);
});
- return status.isOk() ? err : DEAD_OBJECT;
+ return status.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
- return OK;
+ return DrmStatus(OK);
}
-status_t DrmHalHidl::setCipherAlgorithm(Vector<uint8_t> const& sessionId,
- String8 const& algorithm) {
+DrmStatus DrmHalHidl::setCipherAlgorithm(Vector<uint8_t> const& sessionId,
+ String8 const& algorithm) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
@@ -1288,28 +1290,28 @@
Return<Status> status =
mPlugin->setCipherAlgorithm(toHidlVec(sessionId), toHidlString(algorithm));
- return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+ return status.isOk() ? DrmStatus(toStatusT(status)) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
+DrmStatus DrmHalHidl::setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
Return<Status> status = mPlugin->setMacAlgorithm(toHidlVec(sessionId), toHidlString(algorithm));
- return status.isOk() ? toStatusT(status) : DEAD_OBJECT;
+ return status.isOk() ? DrmStatus(toStatusT(status)) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
- Vector<uint8_t>& output) {
+DrmStatus DrmHalHidl::encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult =
mPlugin->encrypt(toHidlVec(sessionId), toHidlVec(keyId), toHidlVec(input),
@@ -1320,18 +1322,18 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
- Vector<uint8_t>& output) {
+DrmStatus DrmHalHidl::decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult =
mPlugin->decrypt(toHidlVec(sessionId), toHidlVec(keyId), toHidlVec(input),
@@ -1342,17 +1344,17 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& message, Vector<uint8_t>& signature) {
+DrmStatus DrmHalHidl::sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t>& signature) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult = mPlugin->sign(toHidlVec(sessionId), toHidlVec(keyId), toHidlVec(message),
[&](Status status, const hidl_vec<uint8_t>& hSignature) {
@@ -1362,18 +1364,18 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
- bool& match) {
+DrmStatus DrmHalHidl::verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
+ bool& match) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult =
mPlugin->verify(toHidlVec(sessionId), toHidlVec(keyId), toHidlVec(message),
@@ -1386,18 +1388,18 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
- Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
- Vector<uint8_t>& signature) {
+DrmStatus DrmHalHidl::signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
+ Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
+ Vector<uint8_t>& signature) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
DrmSessionManager::Instance()->useSession(sessionId);
- status_t err = UNKNOWN_ERROR;
+ DrmStatus err = UNKNOWN_ERROR;
Return<void> hResult = mPlugin->signRSA(
toHidlVec(sessionId), toHidlString(algorithm), toHidlVec(message),
@@ -1408,7 +1410,7 @@
err = toStatusT(status);
});
- return hResult.isOk() ? err : DEAD_OBJECT;
+ return hResult.isOk() ? err : DrmStatus(DEAD_OBJECT);
}
std::string DrmHalHidl::reportFrameworkMetrics(const std::string& pluginMetrics) const {
@@ -1467,55 +1469,56 @@
return metricsString;
}
-status_t DrmHalHidl::requiresSecureDecoder(const char* mime, bool* required) const {
+DrmStatus DrmHalHidl::requiresSecureDecoder(const char* mime, bool* required) const {
Mutex::Autolock autoLock(mLock);
if (mPluginV1_4 == NULL) {
- return false;
+ return DrmStatus(false);
}
auto hResult = mPluginV1_4->requiresSecureDecoderDefault(hidl_string(mime));
if (!hResult.isOk()) {
DrmUtils::LOG2BE("requiresSecureDecoder txn failed: %s", hResult.description().c_str());
- return DEAD_OBJECT;
+ return DrmStatus(DEAD_OBJECT);
}
if (required) {
*required = hResult;
}
- return OK;
+ return DrmStatus(OK);
}
-status_t DrmHalHidl::requiresSecureDecoder(const char* mime, DrmPlugin::SecurityLevel securityLevel,
- bool* required) const {
+DrmStatus DrmHalHidl::requiresSecureDecoder(const char* mime,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool* required) const {
Mutex::Autolock autoLock(mLock);
if (mPluginV1_4 == NULL) {
- return false;
+ return DrmStatus(false);
}
auto hLevel = toHidlSecurityLevel(securityLevel);
auto hResult = mPluginV1_4->requiresSecureDecoder(hidl_string(mime), hLevel);
if (!hResult.isOk()) {
DrmUtils::LOG2BE("requiresSecureDecoder txn failed: %s", hResult.description().c_str());
- return DEAD_OBJECT;
+ return DrmStatus(DEAD_OBJECT);
}
if (required) {
*required = hResult;
}
- return OK;
+ return DrmStatus(OK);
}
-status_t DrmHalHidl::setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId) {
+DrmStatus DrmHalHidl::setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId) {
Mutex::Autolock autoLock(mLock);
if (mPluginV1_4 == NULL) {
- return ERROR_UNSUPPORTED;
+ return DrmStatus(ERROR_UNSUPPORTED);
}
auto err = mPluginV1_4->setPlaybackId(toHidlVec(sessionId), hidl_string(playbackId));
- return err.isOk() ? toStatusT(err) : DEAD_OBJECT;
+ return err.isOk() ? DrmStatus(toStatusT(err)) : DrmStatus(DEAD_OBJECT);
}
-status_t DrmHalHidl::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
+DrmStatus DrmHalHidl::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
Mutex::Autolock autoLock(mLock);
- return DrmUtils::GetLogMessages<drm::V1_4::IDrmPlugin>(mPlugin, logs);
+ return DrmStatus(DrmUtils::GetLogMessages<drm::V1_4::IDrmPlugin>(mPlugin, logs));
}
-status_t DrmHalHidl::getSupportedSchemes(std::vector<uint8_t> &schemes) const {
+DrmStatus DrmHalHidl::getSupportedSchemes(std::vector<uint8_t>& schemes) const {
Mutex::Autolock autoLock(mLock);
for (auto &factory : mFactories) {
sp<drm::V1_3::IDrmFactory> factoryV1_3 = drm::V1_3::IDrmFactory::castFrom(factory);
@@ -1531,7 +1534,7 @@
});
}
- return OK;
+ return DrmStatus(OK);
}
} // namespace android
diff --git a/drm/libmediadrm/DrmStatus.cpp b/drm/libmediadrm/DrmStatus.cpp
new file mode 100644
index 0000000..0258801
--- /dev/null
+++ b/drm/libmediadrm/DrmStatus.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <mediadrm/DrmStatus.h>
+#include <json/json.h>
+
+namespace android {
+
+DrmStatus::DrmStatus(status_t err, const char *msg) : mStatus(err) {
+ Json::Value errorDetails;
+ Json::Reader reader;
+ if (!reader.parse(msg, errorDetails)) {
+ mErrMsg = msg;
+ return;
+ }
+
+ std::string errMsg;
+ auto val = errorDetails["cdmError"];
+ if (!val.isNull()) {
+ mCdmErr = val.asInt();
+ }
+ val = errorDetails["oemError"];
+ if (!val.isNull()) {
+ mOemErr = val.asInt();
+ }
+ val = errorDetails["context"];
+ if (!val.isNull()) {
+ mCtx = val.asInt();
+ }
+ val = errorDetails["errorMessage"];
+ if (!val.isNull()) {
+ mErrMsg = val.asString();
+ } else {
+ mErrMsg = msg;
+ }
+}
+
+} // namespace android
diff --git a/drm/libmediadrm/DrmUtils.cpp b/drm/libmediadrm/DrmUtils.cpp
index be0cd4b..cb103f7 100644
--- a/drm/libmediadrm/DrmUtils.cpp
+++ b/drm/libmediadrm/DrmUtils.cpp
@@ -32,6 +32,7 @@
#include <android/hardware/drm/1.4/IDrmFactory.h>
#include <android/hidl/manager/1.2/IServiceManager.h>
#include <hidl/HidlSupport.h>
+#include <json/json.h>
#include <cutils/properties.h>
#include <utils/Errors.h>
@@ -362,18 +363,23 @@
}
} // namespace
-std::string GetExceptionMessage(status_t err, const char* msg,
+std::string GetExceptionMessage(const DrmStatus &err, const char* defaultMsg,
const Vector<::V1_4::LogMessage>& logs) {
std::string ruler("==============================");
std::string header("Beginning of DRM Plugin Log");
std::string footer("End of DRM Plugin Log");
+ std::string msg(err.getErrorMessage());
String8 msg8;
- if (msg) {
- msg8 += msg;
+ if (!msg.empty()) {
+ msg8 += msg.c_str();
+ msg8 += ": ";
+ } else if (defaultMsg) {
+ msg8 += defaultMsg;
msg8 += ": ";
}
- auto errStr = StrCryptoError(err);
- msg8 += errStr.c_str();
+ msg8 += StrCryptoError(err).c_str();
+ msg8 += String8::format("\ncdm err: %d, oem err: %d, ctx: %d",
+ err.getCdmErr(), err.getOemErr(), err.getContext());
msg8 += String8::format("\n%s %s %s", ruler.c_str(), header.c_str(), ruler.c_str());
for (auto log : logs) {
@@ -410,6 +416,141 @@
return logs;
}
+DrmStatus statusAidlToDrmStatus(::ndk::ScopedAStatus& statusAidl) {
+ if (statusAidl.isOk()) return OK;
+ if (statusAidl.getExceptionCode() != EX_SERVICE_SPECIFIC) return DEAD_OBJECT;
+ auto astatus = static_cast<StatusAidl>(statusAidl.getServiceSpecificError());
+ status_t status{};
+ switch (astatus) {
+ case StatusAidl::OK:
+ status = OK;
+ break;
+ case StatusAidl::BAD_VALUE:
+ status = BAD_VALUE;
+ break;
+ case StatusAidl::ERROR_DRM_CANNOT_HANDLE:
+ status = ERROR_DRM_CANNOT_HANDLE;
+ break;
+ case StatusAidl::ERROR_DRM_DECRYPT:
+ status = ERROR_DRM_DECRYPT;
+ break;
+ case StatusAidl::ERROR_DRM_DEVICE_REVOKED:
+ status = ERROR_DRM_DEVICE_REVOKED;
+ break;
+ case StatusAidl::ERROR_DRM_FRAME_TOO_LARGE:
+ status = ERROR_DRM_FRAME_TOO_LARGE;
+ break;
+ case StatusAidl::ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION:
+ status = ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION;
+ break;
+ case StatusAidl::ERROR_DRM_INSUFFICIENT_SECURITY:
+ status = ERROR_DRM_INSUFFICIENT_SECURITY;
+ break;
+ case StatusAidl::ERROR_DRM_INVALID_STATE:
+ status = ERROR_DRM_INVALID_STATE;
+ break;
+ case StatusAidl::ERROR_DRM_LICENSE_EXPIRED:
+ status = ERROR_DRM_LICENSE_EXPIRED;
+ break;
+ case StatusAidl::ERROR_DRM_NO_LICENSE:
+ status = ERROR_DRM_NO_LICENSE;
+ break;
+ case StatusAidl::ERROR_DRM_NOT_PROVISIONED:
+ status = ERROR_DRM_NOT_PROVISIONED;
+ break;
+ case StatusAidl::ERROR_DRM_RESOURCE_BUSY:
+ status = ERROR_DRM_RESOURCE_BUSY;
+ break;
+ case StatusAidl::ERROR_DRM_RESOURCE_CONTENTION:
+ status = ERROR_DRM_RESOURCE_CONTENTION;
+ break;
+ case StatusAidl::ERROR_DRM_SESSION_LOST_STATE:
+ status = ERROR_DRM_SESSION_LOST_STATE;
+ break;
+ case StatusAidl::ERROR_DRM_SESSION_NOT_OPENED:
+ status = ERROR_DRM_SESSION_NOT_OPENED;
+ break;
+
+ // New in S / drm@1.4:
+ case StatusAidl::CANNOT_DECRYPT_ZERO_SUBSAMPLES:
+ status = ERROR_DRM_ZERO_SUBSAMPLES;
+ break;
+ case StatusAidl::CRYPTO_LIBRARY_ERROR:
+ status = ERROR_DRM_CRYPTO_LIBRARY;
+ break;
+ case StatusAidl::GENERAL_OEM_ERROR:
+ status = ERROR_DRM_GENERIC_OEM;
+ break;
+ case StatusAidl::GENERAL_PLUGIN_ERROR:
+ status = ERROR_DRM_GENERIC_PLUGIN;
+ break;
+ case StatusAidl::INIT_DATA_INVALID:
+ status = ERROR_DRM_INIT_DATA;
+ break;
+ case StatusAidl::KEY_NOT_LOADED:
+ status = ERROR_DRM_KEY_NOT_LOADED;
+ break;
+ case StatusAidl::LICENSE_PARSE_ERROR:
+ status = ERROR_DRM_LICENSE_PARSE;
+ break;
+ case StatusAidl::LICENSE_POLICY_ERROR:
+ status = ERROR_DRM_LICENSE_POLICY;
+ break;
+ case StatusAidl::LICENSE_RELEASE_ERROR:
+ status = ERROR_DRM_LICENSE_RELEASE;
+ break;
+ case StatusAidl::LICENSE_REQUEST_REJECTED:
+ status = ERROR_DRM_LICENSE_REQUEST_REJECTED;
+ break;
+ case StatusAidl::LICENSE_RESTORE_ERROR:
+ status = ERROR_DRM_LICENSE_RESTORE;
+ break;
+ case StatusAidl::LICENSE_STATE_ERROR:
+ status = ERROR_DRM_LICENSE_STATE;
+ break;
+ case StatusAidl::MALFORMED_CERTIFICATE:
+ status = ERROR_DRM_CERTIFICATE_MALFORMED;
+ break;
+ case StatusAidl::MEDIA_FRAMEWORK_ERROR:
+ status = ERROR_DRM_MEDIA_FRAMEWORK;
+ break;
+ case StatusAidl::MISSING_CERTIFICATE:
+ status = ERROR_DRM_CERTIFICATE_MISSING;
+ break;
+ case StatusAidl::PROVISIONING_CERTIFICATE_ERROR:
+ status = ERROR_DRM_PROVISIONING_CERTIFICATE;
+ break;
+ case StatusAidl::PROVISIONING_CONFIGURATION_ERROR:
+ status = ERROR_DRM_PROVISIONING_CONFIG;
+ break;
+ case StatusAidl::PROVISIONING_PARSE_ERROR:
+ status = ERROR_DRM_PROVISIONING_PARSE;
+ break;
+ case StatusAidl::PROVISIONING_REQUEST_REJECTED:
+ status = ERROR_DRM_PROVISIONING_REQUEST_REJECTED;
+ break;
+ case StatusAidl::RETRYABLE_PROVISIONING_ERROR:
+ status = ERROR_DRM_PROVISIONING_RETRY;
+ break;
+ case StatusAidl::SECURE_STOP_RELEASE_ERROR:
+ status = ERROR_DRM_SECURE_STOP_RELEASE;
+ break;
+ case StatusAidl::STORAGE_READ_FAILURE:
+ status = ERROR_DRM_STORAGE_READ;
+ break;
+ case StatusAidl::STORAGE_WRITE_FAILURE:
+ status = ERROR_DRM_STORAGE_WRITE;
+ break;
+
+ case StatusAidl::ERROR_DRM_UNKNOWN:
+ default:
+ status = ERROR_DRM_UNKNOWN;
+ break;
+ }
+
+ return DrmStatus(status, statusAidl.getMessage());
+}
+
LogBuffer gLogBuf;
} // namespace DrmUtils
} // namespace android
diff --git a/drm/libmediadrm/fuzzer/Android.bp b/drm/libmediadrm/fuzzer/Android.bp
index a85e3cf..deda9ef 100644
--- a/drm/libmediadrm/fuzzer/Android.bp
+++ b/drm/libmediadrm/fuzzer/Android.bp
@@ -37,6 +37,7 @@
"liblog",
"resourcemanager_aidl_interface-ndk",
"libaidlcommonsupport",
+ "libjsoncpp",
],
header_libs: [
"libmedia_headers",
diff --git a/drm/libmediadrm/include/mediadrm/CryptoHal.h b/drm/libmediadrm/include/mediadrm/CryptoHal.h
index 32a6741..60cbbf8 100644
--- a/drm/libmediadrm/include/mediadrm/CryptoHal.h
+++ b/drm/libmediadrm/include/mediadrm/CryptoHal.h
@@ -38,7 +38,7 @@
virtual bool requiresSecureDecoderComponent(
const char *mime) const;
virtual void notifyResolution(uint32_t width, uint32_t height);
- virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
+ virtual DrmStatus setMediaDrmSession(const Vector<uint8_t> &sessionId);
virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
const drm::V1_0::SharedBuffer &source, size_t offset,
diff --git a/drm/libmediadrm/include/mediadrm/CryptoHalAidl.h b/drm/libmediadrm/include/mediadrm/CryptoHalAidl.h
index 50878a6..eec4585 100644
--- a/drm/libmediadrm/include/mediadrm/CryptoHalAidl.h
+++ b/drm/libmediadrm/include/mediadrm/CryptoHalAidl.h
@@ -49,7 +49,7 @@
virtual status_t destroyPlugin();
virtual bool requiresSecureDecoderComponent(const char* mime) const;
virtual void notifyResolution(uint32_t width, uint32_t height);
- virtual status_t setMediaDrmSession(const Vector<uint8_t>& sessionId);
+ virtual DrmStatus setMediaDrmSession(const Vector<uint8_t>& sessionId);
virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16], CryptoPlugin::Mode mode,
const CryptoPlugin::Pattern& pattern, const ::SharedBuffer& source,
size_t offset, const CryptoPlugin::SubSample* subSamples,
diff --git a/drm/libmediadrm/include/mediadrm/CryptoHalHidl.h b/drm/libmediadrm/include/mediadrm/CryptoHalHidl.h
index 6db1e89..0150d7c 100644
--- a/drm/libmediadrm/include/mediadrm/CryptoHalHidl.h
+++ b/drm/libmediadrm/include/mediadrm/CryptoHalHidl.h
@@ -57,7 +57,7 @@
virtual void notifyResolution(uint32_t width, uint32_t height);
- virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId);
+ virtual DrmStatus setMediaDrmSession(const Vector<uint8_t> &sessionId);
virtual ssize_t decrypt(const uint8_t key[16], const uint8_t iv[16],
CryptoPlugin::Mode mode, const CryptoPlugin::Pattern &pattern,
diff --git a/drm/libmediadrm/include/mediadrm/DrmHal.h b/drm/libmediadrm/include/mediadrm/DrmHal.h
index eab597b..16e5fe1 100644
--- a/drm/libmediadrm/include/mediadrm/DrmHal.h
+++ b/drm/libmediadrm/include/mediadrm/DrmHal.h
@@ -15,6 +15,7 @@
*/
#include <mediadrm/IDrm.h>
+#include <mediadrm/DrmStatus.h>
#ifndef DRM_HAL_H_
#define DRM_HAL_H_
@@ -24,100 +25,99 @@
struct DrmHal : public IDrm {
DrmHal();
virtual ~DrmHal();
- virtual status_t initCheck() const;
- virtual status_t isCryptoSchemeSupported(const uint8_t uuid[16],
- const String8 &mimeType,
- DrmPlugin::SecurityLevel securityLevel,
- bool *result);
- virtual status_t createPlugin(const uint8_t uuid[16],
- const String8 &appPackageName);
- virtual status_t destroyPlugin();
- virtual status_t openSession(DrmPlugin::SecurityLevel securityLevel,
+ virtual DrmStatus initCheck() const;
+ virtual DrmStatus isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
+ DrmPlugin::SecurityLevel securityLevel, bool* result);
+ virtual DrmStatus createPlugin(const uint8_t uuid[16],
+ const String8 &appPackageName);
+ virtual DrmStatus destroyPlugin();
+ virtual DrmStatus openSession(DrmPlugin::SecurityLevel securityLevel,
Vector<uint8_t> &sessionId);
- virtual status_t closeSession(Vector<uint8_t> const &sessionId);
- virtual status_t
+ virtual DrmStatus closeSession(Vector<uint8_t> const &sessionId);
+ virtual DrmStatus
getKeyRequest(Vector<uint8_t> const &sessionId,
Vector<uint8_t> const &initData,
String8 const &mimeType, DrmPlugin::KeyType keyType,
KeyedVector<String8, String8> const &optionalParameters,
Vector<uint8_t> &request, String8 &defaultUrl,
DrmPlugin::KeyRequestType *keyRequestType);
- virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &response,
- Vector<uint8_t> &keySetId);
- virtual status_t removeKeys(Vector<uint8_t> const &keySetId);
- virtual status_t restoreKeys(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keySetId);
- virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId,
- KeyedVector<String8, String8> &infoMap) const;
- virtual status_t getProvisionRequest(String8 const &certType,
- String8 const &certAuthority,
- Vector<uint8_t> &request,
- String8 &defaultUrl);
- virtual status_t provideProvisionResponse(Vector<uint8_t> const &response,
- Vector<uint8_t> &certificate,
- Vector<uint8_t> &wrappedKey);
- virtual status_t getSecureStops(List<Vector<uint8_t>> &secureStops);
- virtual status_t getSecureStopIds(List<Vector<uint8_t>> &secureStopIds);
- virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
- virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
- virtual status_t removeSecureStop(Vector<uint8_t> const &ssid);
- virtual status_t removeAllSecureStops();
- virtual status_t getHdcpLevels(DrmPlugin::HdcpLevel *connectedLevel,
+ virtual DrmStatus provideKeyResponse(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &response,
+ Vector<uint8_t> &keySetId);
+ virtual DrmStatus removeKeys(Vector<uint8_t> const &keySetId);
+ virtual DrmStatus restoreKeys(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keySetId);
+ virtual DrmStatus queryKeyStatus(Vector<uint8_t> const &sessionId,
+ KeyedVector<String8, String8> &infoMap) const;
+ virtual DrmStatus getProvisionRequest(String8 const &certType,
+ String8 const &certAuthority,
+ Vector<uint8_t> &request,
+ String8 &defaultUrl);
+ virtual DrmStatus provideProvisionResponse(Vector<uint8_t> const &response,
+ Vector<uint8_t> &certificate,
+ Vector<uint8_t> &wrappedKey);
+ virtual DrmStatus getSecureStops(List<Vector<uint8_t>> &secureStops);
+ virtual DrmStatus getSecureStopIds(List<Vector<uint8_t>> &secureStopIds);
+ virtual DrmStatus getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
+ virtual DrmStatus releaseSecureStops(Vector<uint8_t> const &ssRelease);
+ virtual DrmStatus removeSecureStop(Vector<uint8_t> const &ssid);
+ virtual DrmStatus removeAllSecureStops();
+ virtual DrmStatus getHdcpLevels(DrmPlugin::HdcpLevel *connectedLevel,
DrmPlugin::HdcpLevel *maxLevel) const;
- virtual status_t getNumberOfSessions(uint32_t *currentSessions,
+ virtual DrmStatus getNumberOfSessions(uint32_t *currentSessions,
uint32_t *maxSessions) const;
- virtual status_t getSecurityLevel(Vector<uint8_t> const &sessionId,
+ virtual DrmStatus getSecurityLevel(Vector<uint8_t> const &sessionId,
DrmPlugin::SecurityLevel *level) const;
- virtual status_t getOfflineLicenseKeySetIds(List<Vector<uint8_t>> &keySetIds) const;
- virtual status_t removeOfflineLicense(Vector<uint8_t> const &keySetId);
- virtual status_t getOfflineLicenseState(Vector<uint8_t> const &keySetId,
+ virtual DrmStatus getOfflineLicenseKeySetIds(List<Vector<uint8_t>> &keySetIds) const;
+ virtual DrmStatus removeOfflineLicense(Vector<uint8_t> const &keySetId);
+ virtual DrmStatus getOfflineLicenseState(Vector<uint8_t> const &keySetId,
DrmPlugin::OfflineLicenseState *licenseState) const;
- virtual status_t getPropertyString(String8 const &name, String8 &value) const;
- virtual status_t getPropertyByteArray(String8 const &name,
- Vector<uint8_t> &value) const;
- virtual status_t setPropertyString(String8 const &name,
- String8 const &value ) const;
- virtual status_t setPropertyByteArray(String8 const &name,
- Vector<uint8_t> const &value) const;
- virtual status_t getMetrics(const sp<IDrmMetricsConsumer> &consumer);
- virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm);
- virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm);
- virtual status_t encrypt(Vector<uint8_t> const &sessionId,
+ virtual DrmStatus getPropertyString(String8 const &name, String8 &value) const;
+ virtual DrmStatus getPropertyByteArray(String8 const &name,
+ Vector<uint8_t> &value) const;
+ virtual DrmStatus setPropertyString(String8 const &name,
+ String8 const &value ) const;
+ virtual DrmStatus setPropertyByteArray(String8 const &name,
+ Vector<uint8_t> const &value) const;
+ virtual DrmStatus getMetrics(const sp<IDrmMetricsConsumer> &consumer);
+ virtual DrmStatus setCipherAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm);
+ virtual DrmStatus setMacAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm);
+ virtual DrmStatus encrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output);
+ virtual DrmStatus decrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output);
+ virtual DrmStatus sign(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> &signature);
+ virtual DrmStatus verify(Vector<uint8_t> const &sessionId,
Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output);
- virtual status_t decrypt(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output);
- virtual status_t sign(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> &signature);
- virtual status_t verify(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> const &signature,
- bool &match);
- virtual status_t signRSA(Vector<uint8_t> const &sessionId,
- String8 const &algorithm,
Vector<uint8_t> const &message,
- Vector<uint8_t> const &wrappedKey,
- Vector<uint8_t> &signature);
- virtual status_t setListener(const sp<IDrmClient>& listener);
- virtual status_t requiresSecureDecoder(const char *mime, bool *required) const;
- virtual status_t requiresSecureDecoder(const char *mime, DrmPlugin::SecurityLevel securityLevel,
- bool *required) const;
- virtual status_t setPlaybackId(
+ Vector<uint8_t> const &signature,
+ bool &match);
+ virtual DrmStatus signRSA(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> const &wrappedKey,
+ Vector<uint8_t> &signature);
+ virtual DrmStatus setListener(const sp<IDrmClient>& listener);
+ virtual DrmStatus requiresSecureDecoder(const char *mime, bool *required) const;
+ virtual DrmStatus requiresSecureDecoder(const char *mime,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool *required) const;
+ virtual DrmStatus setPlaybackId(
Vector<uint8_t> const &sessionId,
const char *playbackId);
- virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const;
- virtual status_t getSupportedSchemes(std::vector<uint8_t> &schemes) const;
+ virtual DrmStatus getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const;
+ virtual DrmStatus getSupportedSchemes(std::vector<uint8_t> &schemes) const;
private:
sp<IDrm> mDrmHalHidl;
diff --git a/drm/libmediadrm/include/mediadrm/DrmHalAidl.h b/drm/libmediadrm/include/mediadrm/DrmHalAidl.h
index 0f51ce9..e0b8341 100644
--- a/drm/libmediadrm/include/mediadrm/DrmHalAidl.h
+++ b/drm/libmediadrm/include/mediadrm/DrmHalAidl.h
@@ -25,6 +25,7 @@
#include <mediadrm/DrmMetrics.h>
#include <mediadrm/DrmSessionManager.h>
#include <mediadrm/DrmHalListener.h>
+#include <mediadrm/DrmStatus.h>
#include <mediadrm/IDrm.h>
using IDrmPluginAidl = ::aidl::android::hardware::drm::IDrmPlugin;
@@ -38,74 +39,78 @@
struct DrmSessionClient;
DrmHalAidl();
virtual ~DrmHalAidl();
- virtual status_t initCheck() const;
- virtual status_t isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
- DrmPlugin::SecurityLevel securityLevel, bool* result);
- virtual status_t createPlugin(const uint8_t uuid[16], const String8& appPackageName);
- virtual status_t destroyPlugin();
- virtual status_t openSession(DrmPlugin::SecurityLevel securityLevel,
- Vector<uint8_t>& sessionId);
- virtual status_t closeSession(Vector<uint8_t> const& sessionId);
- virtual status_t getKeyRequest(Vector<uint8_t> const& sessionId,
- Vector<uint8_t> const& initData, String8 const& mimeType,
- DrmPlugin::KeyType keyType,
- KeyedVector<String8, String8> const& optionalParameters,
- Vector<uint8_t>& request, String8& defaultUrl,
- DrmPlugin::KeyRequestType* keyRequestType);
- virtual status_t provideKeyResponse(Vector<uint8_t> const& sessionId,
- Vector<uint8_t> const& response, Vector<uint8_t>& keySetId);
- virtual status_t removeKeys(Vector<uint8_t> const& keySetId);
- virtual status_t restoreKeys(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keySetId);
- virtual status_t queryKeyStatus(Vector<uint8_t> const& sessionId,
- KeyedVector<String8, String8>& infoMap) const;
- virtual status_t getProvisionRequest(String8 const& certType, String8 const& certAuthority,
- Vector<uint8_t>& request, String8& defaultUrl);
- virtual status_t provideProvisionResponse(Vector<uint8_t> const& response,
- Vector<uint8_t>& certificate,
- Vector<uint8_t>& wrappedKey);
- virtual status_t getSecureStops(List<Vector<uint8_t>>& secureStops);
- virtual status_t getSecureStopIds(List<Vector<uint8_t>>& secureStopIds);
- virtual status_t getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop);
- virtual status_t releaseSecureStops(Vector<uint8_t> const& ssRelease);
- virtual status_t removeSecureStop(Vector<uint8_t> const& ssid);
- virtual status_t removeAllSecureStops();
- virtual status_t getHdcpLevels(DrmPlugin::HdcpLevel* connectedLevel,
- DrmPlugin::HdcpLevel* maxLevel) const;
- virtual status_t getNumberOfSessions(uint32_t* currentSessions, uint32_t* maxSessions) const;
- virtual status_t getSecurityLevel(Vector<uint8_t> const& sessionId,
- DrmPlugin::SecurityLevel* level) const;
- virtual status_t getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const;
- virtual status_t removeOfflineLicense(Vector<uint8_t> const& keySetId);
- virtual status_t getOfflineLicenseState(Vector<uint8_t> const& keySetId,
- DrmPlugin::OfflineLicenseState* licenseState) const;
- virtual status_t getPropertyString(String8 const& name, String8& value) const;
- virtual status_t getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const;
- virtual status_t setPropertyString(String8 const& name, String8 const& value) const;
- virtual status_t setPropertyByteArray(String8 const& name, Vector<uint8_t> const& value) const;
- virtual status_t getMetrics(const sp<IDrmMetricsConsumer>& consumer);
- virtual status_t setCipherAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm);
- virtual status_t setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm);
- virtual status_t encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
- Vector<uint8_t>& output);
- virtual status_t decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
- Vector<uint8_t>& output);
- virtual status_t sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& message, Vector<uint8_t>& signature);
- virtual status_t verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
- Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
- bool& match);
- virtual status_t signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
- Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
- Vector<uint8_t>& signature);
- virtual status_t setListener(const sp<IDrmClient>& listener);
- virtual status_t requiresSecureDecoder(const char* mime, bool* required) const;
- virtual status_t requiresSecureDecoder(const char* mime, DrmPlugin::SecurityLevel securityLevel,
- bool* required) const;
- virtual status_t setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId);
- virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const;
- virtual status_t getSupportedSchemes(std::vector<uint8_t> &schemes) const;
+ virtual DrmStatus initCheck() const;
+ virtual DrmStatus isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
+ DrmPlugin::SecurityLevel securityLevel, bool* result);
+ virtual DrmStatus createPlugin(const uint8_t uuid[16], const String8& appPackageName);
+ virtual DrmStatus destroyPlugin();
+ virtual DrmStatus openSession(DrmPlugin::SecurityLevel securityLevel,
+ Vector<uint8_t>& sessionId);
+ virtual DrmStatus closeSession(Vector<uint8_t> const& sessionId);
+ virtual DrmStatus getKeyRequest(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& initData, String8 const& mimeType,
+ DrmPlugin::KeyType keyType,
+ KeyedVector<String8, String8> const& optionalParameters,
+ Vector<uint8_t>& request, String8& defaultUrl,
+ DrmPlugin::KeyRequestType* keyRequestType);
+ virtual DrmStatus provideKeyResponse(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& response,
+ Vector<uint8_t>& keySetId);
+ virtual DrmStatus removeKeys(Vector<uint8_t> const& keySetId);
+ virtual DrmStatus restoreKeys(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& keySetId);
+ virtual DrmStatus queryKeyStatus(Vector<uint8_t> const& sessionId,
+ KeyedVector<String8, String8>& infoMap) const;
+ virtual DrmStatus getProvisionRequest(String8 const& certType, String8 const& certAuthority,
+ Vector<uint8_t>& request, String8& defaultUrl);
+ virtual DrmStatus provideProvisionResponse(Vector<uint8_t> const& response,
+ Vector<uint8_t>& certificate,
+ Vector<uint8_t>& wrappedKey);
+ virtual DrmStatus getSecureStops(List<Vector<uint8_t>>& secureStops);
+ virtual DrmStatus getSecureStopIds(List<Vector<uint8_t>>& secureStopIds);
+ virtual DrmStatus getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop);
+ virtual DrmStatus releaseSecureStops(Vector<uint8_t> const& ssRelease);
+ virtual DrmStatus removeSecureStop(Vector<uint8_t> const& ssid);
+ virtual DrmStatus removeAllSecureStops();
+ virtual DrmStatus getHdcpLevels(DrmPlugin::HdcpLevel* connectedLevel,
+ DrmPlugin::HdcpLevel* maxLevel) const;
+ virtual DrmStatus getNumberOfSessions(uint32_t* currentSessions, uint32_t* maxSessions) const;
+ virtual DrmStatus getSecurityLevel(Vector<uint8_t> const& sessionId,
+ DrmPlugin::SecurityLevel* level) const;
+ virtual DrmStatus getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const;
+ virtual DrmStatus removeOfflineLicense(Vector<uint8_t> const& keySetId);
+ virtual DrmStatus getOfflineLicenseState(Vector<uint8_t> const& keySetId,
+ DrmPlugin::OfflineLicenseState* licenseState) const;
+ virtual DrmStatus getPropertyString(String8 const& name, String8& value) const;
+ virtual DrmStatus getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const;
+ virtual DrmStatus setPropertyString(String8 const& name, String8 const& value) const;
+ virtual DrmStatus setPropertyByteArray(String8 const& name, Vector<uint8_t> const& value) const;
+ virtual DrmStatus getMetrics(const sp<IDrmMetricsConsumer>& consumer);
+ virtual DrmStatus setCipherAlgorithm(Vector<uint8_t> const& sessionId,
+ String8 const& algorithm);
+ virtual DrmStatus setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm);
+ virtual DrmStatus encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output);
+ virtual DrmStatus decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output);
+ virtual DrmStatus sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t>& signature);
+ virtual DrmStatus verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
+ bool& match);
+ virtual DrmStatus signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
+ Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
+ Vector<uint8_t>& signature);
+ virtual DrmStatus setListener(const sp<IDrmClient>& listener);
+ virtual DrmStatus requiresSecureDecoder(const char* mime, bool* required) const;
+ virtual DrmStatus requiresSecureDecoder(const char* mime,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool* required) const;
+ virtual DrmStatus setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId);
+ virtual DrmStatus getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const;
+ virtual DrmStatus getSupportedSchemes(std::vector<uint8_t>& schemes) const;
::ndk::ScopedAStatus onEvent(EventTypeAidl in_eventType,
const std::vector<uint8_t>& in_sessionId,
@@ -128,8 +133,8 @@
void closeOpenSessions();
std::string reportPluginMetrics() const;
std::string reportFrameworkMetrics(const std::string& pluginMetrics) const;
- status_t getPropertyStringInternal(String8 const& name, String8& value) const;
- status_t getPropertyByteArrayInternal(String8 const& name, Vector<uint8_t>& value) const;
+ DrmStatus getPropertyStringInternal(String8 const& name, String8& value) const;
+ DrmStatus getPropertyByteArrayInternal(String8 const& name, Vector<uint8_t>& value) const;
DISALLOW_EVIL_CONSTRUCTORS(DrmHalAidl);
};
diff --git a/drm/libmediadrm/include/mediadrm/DrmHalHidl.h b/drm/libmediadrm/include/mediadrm/DrmHalHidl.h
index 11f0608..45764a1 100644
--- a/drm/libmediadrm/include/mediadrm/DrmHalHidl.h
+++ b/drm/libmediadrm/include/mediadrm/DrmHalHidl.h
@@ -26,6 +26,7 @@
#include <media/drm/DrmAPI.h>
#include <mediadrm/DrmMetrics.h>
#include <mediadrm/DrmSessionManager.h>
+#include <mediadrm/DrmStatus.h>
#include <mediadrm/IDrm.h>
#include <mediadrm/IDrmClient.h>
#include <mediadrm/IDrmMetricsConsumer.h>
@@ -63,24 +64,22 @@
DrmHalHidl();
virtual ~DrmHalHidl();
- virtual status_t initCheck() const;
+ virtual DrmStatus initCheck() const;
- virtual status_t isCryptoSchemeSupported(const uint8_t uuid[16],
- const String8& mimeType,
- DrmPlugin::SecurityLevel level,
- bool *isSupported);
+ virtual DrmStatus isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
+ DrmPlugin::SecurityLevel level, bool* isSupported);
- virtual status_t createPlugin(const uint8_t uuid[16],
+ virtual DrmStatus createPlugin(const uint8_t uuid[16],
const String8 &appPackageName);
- virtual status_t destroyPlugin();
+ virtual DrmStatus destroyPlugin();
- virtual status_t openSession(DrmPlugin::SecurityLevel level,
+ virtual DrmStatus openSession(DrmPlugin::SecurityLevel level,
Vector<uint8_t> &sessionId);
- virtual status_t closeSession(Vector<uint8_t> const &sessionId);
+ virtual DrmStatus closeSession(Vector<uint8_t> const &sessionId);
- virtual status_t
+ virtual DrmStatus
getKeyRequest(Vector<uint8_t> const &sessionId,
Vector<uint8_t> const &initData,
String8 const &mimeType, DrmPlugin::KeyType keyType,
@@ -88,103 +87,104 @@
Vector<uint8_t> &request, String8 &defaultUrl,
DrmPlugin::KeyRequestType *keyRequestType);
- virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &response,
- Vector<uint8_t> &keySetId);
+ virtual DrmStatus provideKeyResponse(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &response,
+ Vector<uint8_t> &keySetId);
- virtual status_t removeKeys(Vector<uint8_t> const &keySetId);
+ virtual DrmStatus removeKeys(Vector<uint8_t> const &keySetId);
- virtual status_t restoreKeys(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keySetId);
+ virtual DrmStatus restoreKeys(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keySetId);
- virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId,
- KeyedVector<String8, String8> &infoMap) const;
+ virtual DrmStatus queryKeyStatus(Vector<uint8_t> const &sessionId,
+ KeyedVector<String8, String8> &infoMap) const;
- virtual status_t getProvisionRequest(String8 const &certType,
- String8 const &certAuthority,
- Vector<uint8_t> &request,
- String8 &defaultUrl);
+ virtual DrmStatus getProvisionRequest(String8 const &certType,
+ String8 const &certAuthority,
+ Vector<uint8_t> &request,
+ String8 &defaultUrl);
- virtual status_t provideProvisionResponse(Vector<uint8_t> const &response,
- Vector<uint8_t> &certificate,
- Vector<uint8_t> &wrappedKey);
+ virtual DrmStatus provideProvisionResponse(Vector<uint8_t> const &response,
+ Vector<uint8_t> &certificate,
+ Vector<uint8_t> &wrappedKey);
- virtual status_t getSecureStops(List<Vector<uint8_t>> &secureStops);
- virtual status_t getSecureStopIds(List<Vector<uint8_t>> &secureStopIds);
- virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
+ virtual DrmStatus getSecureStops(List<Vector<uint8_t>> &secureStops);
+ virtual DrmStatus getSecureStopIds(List<Vector<uint8_t>> &secureStopIds);
+ virtual DrmStatus getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop);
- virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease);
- virtual status_t removeSecureStop(Vector<uint8_t> const &ssid);
- virtual status_t removeAllSecureStops();
+ virtual DrmStatus releaseSecureStops(Vector<uint8_t> const &ssRelease);
+ virtual DrmStatus removeSecureStop(Vector<uint8_t> const &ssid);
+ virtual DrmStatus removeAllSecureStops();
- virtual status_t getHdcpLevels(DrmPlugin::HdcpLevel *connectedLevel,
+ virtual DrmStatus getHdcpLevels(DrmPlugin::HdcpLevel *connectedLevel,
DrmPlugin::HdcpLevel *maxLevel) const;
- virtual status_t getNumberOfSessions(uint32_t *currentSessions,
+ virtual DrmStatus getNumberOfSessions(uint32_t *currentSessions,
uint32_t *maxSessions) const;
- virtual status_t getSecurityLevel(Vector<uint8_t> const &sessionId,
+ virtual DrmStatus getSecurityLevel(Vector<uint8_t> const &sessionId,
DrmPlugin::SecurityLevel *level) const;
- virtual status_t getOfflineLicenseKeySetIds(List<Vector<uint8_t>> &keySetIds) const;
- virtual status_t removeOfflineLicense(Vector<uint8_t> const &keySetId);
- virtual status_t getOfflineLicenseState(Vector<uint8_t> const &keySetId,
+ virtual DrmStatus getOfflineLicenseKeySetIds(List<Vector<uint8_t>> &keySetIds) const;
+ virtual DrmStatus removeOfflineLicense(Vector<uint8_t> const &keySetId);
+ virtual DrmStatus getOfflineLicenseState(Vector<uint8_t> const &keySetId,
DrmPlugin::OfflineLicenseState *licenseState) const;
- virtual status_t getPropertyString(String8 const &name, String8 &value ) const;
- virtual status_t getPropertyByteArray(String8 const &name,
- Vector<uint8_t> &value ) const;
- virtual status_t setPropertyString(String8 const &name, String8 const &value ) const;
- virtual status_t setPropertyByteArray(String8 const &name,
- Vector<uint8_t> const &value ) const;
- virtual status_t getMetrics(const sp<IDrmMetricsConsumer> &consumer);
+ virtual DrmStatus getPropertyString(String8 const &name, String8 &value ) const;
+ virtual DrmStatus getPropertyByteArray(String8 const &name,
+ Vector<uint8_t> &value ) const;
+ virtual DrmStatus setPropertyString(String8 const &name, String8 const &value ) const;
+ virtual DrmStatus setPropertyByteArray(String8 const &name,
+ Vector<uint8_t> const &value ) const;
+ virtual DrmStatus getMetrics(const sp<IDrmMetricsConsumer> &consumer);
- virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm);
+ virtual DrmStatus setCipherAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm);
- virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm);
+ virtual DrmStatus setMacAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm);
- virtual status_t encrypt(Vector<uint8_t> const &sessionId,
+ virtual DrmStatus encrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output);
+
+ virtual DrmStatus decrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output);
+
+ virtual DrmStatus sign(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> &signature);
+
+ virtual DrmStatus verify(Vector<uint8_t> const &sessionId,
Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output);
-
- virtual status_t decrypt(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output);
-
- virtual status_t sign(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> &signature);
-
- virtual status_t verify(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> const &signature,
- bool &match);
-
- virtual status_t signRSA(Vector<uint8_t> const &sessionId,
- String8 const &algorithm,
Vector<uint8_t> const &message,
- Vector<uint8_t> const &wrappedKey,
- Vector<uint8_t> &signature);
+ Vector<uint8_t> const &signature,
+ bool &match);
- virtual status_t setListener(const sp<IDrmClient>& listener);
+ virtual DrmStatus signRSA(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> const &wrappedKey,
+ Vector<uint8_t> &signature);
- virtual status_t requiresSecureDecoder(const char *mime, bool *required) const;
+ virtual DrmStatus setListener(const sp<IDrmClient>& listener);
- virtual status_t requiresSecureDecoder(const char *mime, DrmPlugin::SecurityLevel securityLevel,
- bool *required) const;
+ virtual DrmStatus requiresSecureDecoder(const char *mime, bool *required) const;
- virtual status_t setPlaybackId(
+ virtual DrmStatus requiresSecureDecoder(const char *mime,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool *required) const;
+
+ virtual DrmStatus setPlaybackId(
Vector<uint8_t> const &sessionId,
const char *playbackId);
- virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const;
- virtual status_t getSupportedSchemes(std::vector<uint8_t> &schemes) const;
+ virtual DrmStatus getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const;
+ virtual DrmStatus getSupportedSchemes(std::vector<uint8_t> &schemes) const;
// Methods of IDrmPluginListener
Return<void> sendEvent(EventType eventType,
@@ -238,10 +238,10 @@
std::string reportPluginMetrics() const;
std::string reportFrameworkMetrics(const std::string& pluginMetrics) const;
- status_t getPropertyStringInternal(String8 const &name, String8 &value) const;
- status_t getPropertyByteArrayInternal(String8 const &name,
+ DrmStatus getPropertyStringInternal(String8 const &name, String8 &value) const;
+ DrmStatus getPropertyByteArrayInternal(String8 const &name,
Vector<uint8_t> &value) const;
- status_t matchMimeTypeAndSecurityLevel(const sp<IDrmFactory> &factory,
+ DrmStatus matchMimeTypeAndSecurityLevel(const sp<IDrmFactory> &factory,
const uint8_t uuid[16],
const String8 &mimeType,
DrmPlugin::SecurityLevel level,
diff --git a/drm/libmediadrm/include/mediadrm/IDrm.h b/drm/libmediadrm/include/mediadrm/IDrm.h
index ee2be6a..3dc7485 100644
--- a/drm/libmediadrm/include/mediadrm/IDrm.h
+++ b/drm/libmediadrm/include/mediadrm/IDrm.h
@@ -16,6 +16,7 @@
#include <media/stagefright/foundation/ABase.h>
#include <media/drm/DrmAPI.h>
+#include <mediadrm/DrmStatus.h>
#include <mediadrm/IDrmClient.h>
#include <mediadrm/IDrmMetricsConsumer.h>
@@ -40,24 +41,23 @@
virtual ~IDrm() {}
- virtual status_t initCheck() const = 0;
+ virtual DrmStatus initCheck() const = 0;
- virtual status_t isCryptoSchemeSupported(const uint8_t uuid[16],
- const String8 &mimeType,
- DrmPlugin::SecurityLevel securityLevel,
- bool *result) = 0;
+ virtual DrmStatus isCryptoSchemeSupported(const uint8_t uuid[16], const String8& mimeType,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool* result) = 0;
- virtual status_t createPlugin(const uint8_t uuid[16],
- const String8 &appPackageName) = 0;
+ virtual DrmStatus createPlugin(const uint8_t uuid[16],
+ const String8 &appPackageName) = 0;
- virtual status_t destroyPlugin() = 0;
+ virtual DrmStatus destroyPlugin() = 0;
- virtual status_t openSession(DrmPlugin::SecurityLevel securityLevel,
+ virtual DrmStatus openSession(DrmPlugin::SecurityLevel securityLevel,
Vector<uint8_t> &sessionId) = 0;
- virtual status_t closeSession(Vector<uint8_t> const &sessionId) = 0;
+ virtual DrmStatus closeSession(Vector<uint8_t> const &sessionId) = 0;
- virtual status_t
+ virtual DrmStatus
getKeyRequest(Vector<uint8_t> const &sessionId,
Vector<uint8_t> const &initData,
String8 const &mimeType, DrmPlugin::KeyType keyType,
@@ -65,107 +65,108 @@
Vector<uint8_t> &request, String8 &defaultUrl,
DrmPlugin::KeyRequestType *keyRequestType) = 0;
- virtual status_t provideKeyResponse(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &response,
- Vector<uint8_t> &keySetId) = 0;
+ virtual DrmStatus provideKeyResponse(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &response,
+ Vector<uint8_t> &keySetId) = 0;
- virtual status_t removeKeys(Vector<uint8_t> const &keySetId) = 0;
+ virtual DrmStatus removeKeys(Vector<uint8_t> const &keySetId) = 0;
- virtual status_t restoreKeys(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keySetId) = 0;
+ virtual DrmStatus restoreKeys(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keySetId) = 0;
- virtual status_t queryKeyStatus(Vector<uint8_t> const &sessionId,
- KeyedVector<String8, String8> &infoMap) const = 0;
+ virtual DrmStatus queryKeyStatus(Vector<uint8_t> const &sessionId,
+ KeyedVector<String8, String8> &infoMap) const = 0;
- virtual status_t getProvisionRequest(String8 const &certType,
- String8 const &certAuthority,
- Vector<uint8_t> &request,
- String8 &defaulUrl) = 0;
+ virtual DrmStatus getProvisionRequest(String8 const &certType,
+ String8 const &certAuthority,
+ Vector<uint8_t> &request,
+ String8 &defaultUrl) = 0;
- virtual status_t provideProvisionResponse(Vector<uint8_t> const &response,
- Vector<uint8_t> &certificate,
- Vector<uint8_t> &wrappedKey) = 0;
+ virtual DrmStatus provideProvisionResponse(Vector<uint8_t> const &response,
+ Vector<uint8_t> &certificate,
+ Vector<uint8_t> &wrappedKey) = 0;
- virtual status_t getSecureStops(List<Vector<uint8_t>> &secureStops) = 0;
- virtual status_t getSecureStopIds(List<Vector<uint8_t>> &secureStopIds) = 0;
- virtual status_t getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) = 0;
+ virtual DrmStatus getSecureStops(List<Vector<uint8_t>> &secureStops) = 0;
+ virtual DrmStatus getSecureStopIds(List<Vector<uint8_t>> &secureStopIds) = 0;
+ virtual DrmStatus getSecureStop(Vector<uint8_t> const &ssid, Vector<uint8_t> &secureStop) = 0;
- virtual status_t releaseSecureStops(Vector<uint8_t> const &ssRelease) = 0;
- virtual status_t removeSecureStop(Vector<uint8_t> const &ssid) = 0;
- virtual status_t removeAllSecureStops() = 0;
+ virtual DrmStatus releaseSecureStops(Vector<uint8_t> const &ssRelease) = 0;
+ virtual DrmStatus removeSecureStop(Vector<uint8_t> const &ssid) = 0;
+ virtual DrmStatus removeAllSecureStops() = 0;
- virtual status_t getHdcpLevels(DrmPlugin::HdcpLevel *connectedLevel,
+ virtual DrmStatus getHdcpLevels(DrmPlugin::HdcpLevel *connectedLevel,
DrmPlugin::HdcpLevel *maxLevel)
const = 0;
- virtual status_t getNumberOfSessions(uint32_t *currentSessions,
+ virtual DrmStatus getNumberOfSessions(uint32_t *currentSessions,
uint32_t *maxSessions) const = 0;
- virtual status_t getSecurityLevel(Vector<uint8_t> const &sessionId,
+ virtual DrmStatus getSecurityLevel(Vector<uint8_t> const &sessionId,
DrmPlugin::SecurityLevel *level) const = 0;
- virtual status_t getOfflineLicenseKeySetIds(List<Vector<uint8_t>> &keySetIds) const = 0;
- virtual status_t removeOfflineLicense(Vector<uint8_t> const &keySetId) = 0;
- virtual status_t getOfflineLicenseState(Vector<uint8_t> const &keySetId,
+ virtual DrmStatus getOfflineLicenseKeySetIds(List<Vector<uint8_t>> &keySetIds) const = 0;
+ virtual DrmStatus removeOfflineLicense(Vector<uint8_t> const &keySetId) = 0;
+ virtual DrmStatus getOfflineLicenseState(Vector<uint8_t> const &keySetId,
DrmPlugin::OfflineLicenseState *licenseState) const = 0;
- virtual status_t getPropertyString(String8 const &name, String8 &value) const = 0;
- virtual status_t getPropertyByteArray(String8 const &name,
- Vector<uint8_t> &value) const = 0;
- virtual status_t setPropertyString(String8 const &name,
- String8 const &value ) const = 0;
- virtual status_t setPropertyByteArray(String8 const &name,
- Vector<uint8_t> const &value) const = 0;
+ virtual DrmStatus getPropertyString(String8 const &name, String8 &value) const = 0;
+ virtual DrmStatus getPropertyByteArray(String8 const &name,
+ Vector<uint8_t> &value) const = 0;
+ virtual DrmStatus setPropertyString(String8 const &name,
+ String8 const &value ) const = 0;
+ virtual DrmStatus setPropertyByteArray(String8 const &name,
+ Vector<uint8_t> const &value) const = 0;
- virtual status_t getMetrics(const sp<IDrmMetricsConsumer> &consumer) = 0;
+ virtual DrmStatus getMetrics(const sp<IDrmMetricsConsumer> &consumer) = 0;
- virtual status_t setCipherAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm) = 0;
+ virtual DrmStatus setCipherAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm) = 0;
- virtual status_t setMacAlgorithm(Vector<uint8_t> const &sessionId,
- String8 const &algorithm) = 0;
+ virtual DrmStatus setMacAlgorithm(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm) = 0;
- virtual status_t encrypt(Vector<uint8_t> const &sessionId,
+ virtual DrmStatus encrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output) = 0;
+
+ virtual DrmStatus decrypt(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &input,
+ Vector<uint8_t> const &iv,
+ Vector<uint8_t> &output) = 0;
+
+ virtual DrmStatus sign(Vector<uint8_t> const &sessionId,
+ Vector<uint8_t> const &keyId,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> &signature) = 0;
+
+ virtual DrmStatus verify(Vector<uint8_t> const &sessionId,
Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output) = 0;
-
- virtual status_t decrypt(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &input,
- Vector<uint8_t> const &iv,
- Vector<uint8_t> &output) = 0;
-
- virtual status_t sign(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> &signature) = 0;
-
- virtual status_t verify(Vector<uint8_t> const &sessionId,
- Vector<uint8_t> const &keyId,
- Vector<uint8_t> const &message,
- Vector<uint8_t> const &signature,
- bool &match) = 0;
-
- virtual status_t signRSA(Vector<uint8_t> const &sessionId,
- String8 const &algorithm,
Vector<uint8_t> const &message,
- Vector<uint8_t> const &wrappedKey,
- Vector<uint8_t> &signature) = 0;
+ Vector<uint8_t> const &signature,
+ bool &match) = 0;
- virtual status_t setListener(const sp<IDrmClient>& listener) = 0;
+ virtual DrmStatus signRSA(Vector<uint8_t> const &sessionId,
+ String8 const &algorithm,
+ Vector<uint8_t> const &message,
+ Vector<uint8_t> const &wrappedKey,
+ Vector<uint8_t> &signature) = 0;
- virtual status_t requiresSecureDecoder(const char *mime, bool *required) const = 0;
+ virtual DrmStatus setListener(const sp<IDrmClient>& listener) = 0;
- virtual status_t requiresSecureDecoder(const char *mime, DrmPlugin::SecurityLevel securityLevel,
- bool *required) const = 0;
+ virtual DrmStatus requiresSecureDecoder(const char *mime, bool *required) const = 0;
- virtual status_t setPlaybackId(
+ virtual DrmStatus requiresSecureDecoder(const char *mime,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool *required) const = 0;
+
+ virtual DrmStatus setPlaybackId(
Vector<uint8_t> const &sessionId,
const char *playbackId) = 0;
- virtual status_t getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const = 0;
+ virtual DrmStatus getLogMessages(Vector<drm::V1_4::LogMessage> &logs) const = 0;
- virtual status_t getSupportedSchemes(std::vector<uint8_t> &schemes) const = 0;
+ virtual DrmStatus getSupportedSchemes(std::vector<uint8_t> &schemes) const = 0;
protected:
IDrm() {}
diff --git a/drm/libmediadrm/interface/mediadrm/DrmStatus.h b/drm/libmediadrm/interface/mediadrm/DrmStatus.h
new file mode 100644
index 0000000..15826ca
--- /dev/null
+++ b/drm/libmediadrm/interface/mediadrm/DrmStatus.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DRM_STATUS_
+#define DRM_STATUS_
+
+#include <media/stagefright/foundation/ABase.h>
+#include <media/stagefright/MediaErrors.h>
+#include <utils/Errors.h>
+
+#include <stdint.h>
+#include <string>
+
+namespace android {
+
+struct DrmStatus {
+ public:
+ DrmStatus(status_t status, int32_t cdmErr = 0, int32_t oemErr = 0,
+ int32_t ctx = 0, std::string errMsg = "")
+ : mStatus(status), mCdmErr(cdmErr), mOemErr(oemErr),
+ mCtx(ctx), mErrMsg(errMsg) {}
+ DrmStatus(status_t err, const char *msg);
+ operator status_t() const { return mStatus; }
+ int32_t getCdmErr() const { return mCdmErr; }
+ int32_t getOemErr() const { return mOemErr; }
+ int32_t getContext() const { return mCtx; }
+ std::string getErrorMessage() const { return mErrMsg; }
+ bool operator==(status_t other) const { return mStatus == other; }
+ bool operator!=(status_t other) const { return mStatus != other; }
+
+ private:
+ status_t mStatus{};
+ int32_t mCdmErr{}, mOemErr{}, mCtx{};
+ std::string mErrMsg;
+};
+
+} // namespace android
+
+#endif // DRM_STATUS_
diff --git a/drm/libmediadrm/interface/mediadrm/DrmUtils.h b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
index 980ce55..2632ebd 100644
--- a/drm/libmediadrm/interface/mediadrm/DrmUtils.h
+++ b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
@@ -22,6 +22,7 @@
#include <android/hardware/drm/1.4/IDrmPlugin.h>
#include <android/hardware/drm/1.4/types.h>
#include <media/stagefright/MediaErrors.h>
+#include <mediadrm/DrmStatus.h>
#include <utils/Errors.h> // for status_t
#include <utils/Log.h>
#include <utils/String8.h>
@@ -198,98 +199,7 @@
return toStatusT_1_4(err);
}
-inline status_t statusAidlToStatusT(::ndk::ScopedAStatus &statusAidl) {
- if (statusAidl.isOk()) return OK;
- if (statusAidl.getExceptionCode() != EX_SERVICE_SPECIFIC) return DEAD_OBJECT;
- auto status = static_cast<StatusAidl>(statusAidl.getServiceSpecificError());
- switch (status) {
- case StatusAidl::OK:
- return OK;
- case StatusAidl::BAD_VALUE:
- return BAD_VALUE;
- case StatusAidl::ERROR_DRM_CANNOT_HANDLE:
- return ERROR_DRM_CANNOT_HANDLE;
- case StatusAidl::ERROR_DRM_DECRYPT:
- return ERROR_DRM_DECRYPT;
- case StatusAidl::ERROR_DRM_DEVICE_REVOKED:
- return ERROR_DRM_DEVICE_REVOKED;
- case StatusAidl::ERROR_DRM_FRAME_TOO_LARGE:
- return ERROR_DRM_FRAME_TOO_LARGE;
- case StatusAidl::ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION:
- return ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION;
- case StatusAidl::ERROR_DRM_INSUFFICIENT_SECURITY:
- return ERROR_DRM_INSUFFICIENT_SECURITY;
- case StatusAidl::ERROR_DRM_INVALID_STATE:
- return ERROR_DRM_INVALID_STATE;
- case StatusAidl::ERROR_DRM_LICENSE_EXPIRED:
- return ERROR_DRM_LICENSE_EXPIRED;
- case StatusAidl::ERROR_DRM_NO_LICENSE:
- return ERROR_DRM_NO_LICENSE;
- case StatusAidl::ERROR_DRM_NOT_PROVISIONED:
- return ERROR_DRM_NOT_PROVISIONED;
- case StatusAidl::ERROR_DRM_RESOURCE_BUSY:
- return ERROR_DRM_RESOURCE_BUSY;
- case StatusAidl::ERROR_DRM_RESOURCE_CONTENTION:
- return ERROR_DRM_RESOURCE_CONTENTION;
- case StatusAidl::ERROR_DRM_SESSION_LOST_STATE:
- return ERROR_DRM_SESSION_LOST_STATE;
- case StatusAidl::ERROR_DRM_SESSION_NOT_OPENED:
- return ERROR_DRM_SESSION_NOT_OPENED;
-
- // New in S / drm@1.4:
- case StatusAidl::CANNOT_DECRYPT_ZERO_SUBSAMPLES:
- return ERROR_DRM_ZERO_SUBSAMPLES;
- case StatusAidl::CRYPTO_LIBRARY_ERROR:
- return ERROR_DRM_CRYPTO_LIBRARY;
- case StatusAidl::GENERAL_OEM_ERROR:
- return ERROR_DRM_GENERIC_OEM;
- case StatusAidl::GENERAL_PLUGIN_ERROR:
- return ERROR_DRM_GENERIC_PLUGIN;
- case StatusAidl::INIT_DATA_INVALID:
- return ERROR_DRM_INIT_DATA;
- case StatusAidl::KEY_NOT_LOADED:
- return ERROR_DRM_KEY_NOT_LOADED;
- case StatusAidl::LICENSE_PARSE_ERROR:
- return ERROR_DRM_LICENSE_PARSE;
- case StatusAidl::LICENSE_POLICY_ERROR:
- return ERROR_DRM_LICENSE_POLICY;
- case StatusAidl::LICENSE_RELEASE_ERROR:
- return ERROR_DRM_LICENSE_RELEASE;
- case StatusAidl::LICENSE_REQUEST_REJECTED:
- return ERROR_DRM_LICENSE_REQUEST_REJECTED;
- case StatusAidl::LICENSE_RESTORE_ERROR:
- return ERROR_DRM_LICENSE_RESTORE;
- case StatusAidl::LICENSE_STATE_ERROR:
- return ERROR_DRM_LICENSE_STATE;
- case StatusAidl::MALFORMED_CERTIFICATE:
- return ERROR_DRM_CERTIFICATE_MALFORMED;
- case StatusAidl::MEDIA_FRAMEWORK_ERROR:
- return ERROR_DRM_MEDIA_FRAMEWORK;
- case StatusAidl::MISSING_CERTIFICATE:
- return ERROR_DRM_CERTIFICATE_MISSING;
- case StatusAidl::PROVISIONING_CERTIFICATE_ERROR:
- return ERROR_DRM_PROVISIONING_CERTIFICATE;
- case StatusAidl::PROVISIONING_CONFIGURATION_ERROR:
- return ERROR_DRM_PROVISIONING_CONFIG;
- case StatusAidl::PROVISIONING_PARSE_ERROR:
- return ERROR_DRM_PROVISIONING_PARSE;
- case StatusAidl::PROVISIONING_REQUEST_REJECTED:
- return ERROR_DRM_PROVISIONING_REQUEST_REJECTED;
- case StatusAidl::RETRYABLE_PROVISIONING_ERROR:
- return ERROR_DRM_PROVISIONING_RETRY;
- case StatusAidl::SECURE_STOP_RELEASE_ERROR:
- return ERROR_DRM_SECURE_STOP_RELEASE;
- case StatusAidl::STORAGE_READ_FAILURE:
- return ERROR_DRM_STORAGE_READ;
- case StatusAidl::STORAGE_WRITE_FAILURE:
- return ERROR_DRM_STORAGE_WRITE;
-
- case StatusAidl::ERROR_DRM_UNKNOWN:
- default:
- return ERROR_DRM_UNKNOWN;
- }
- return ERROR_DRM_UNKNOWN;
-}
+DrmStatus statusAidlToDrmStatus(::ndk::ScopedAStatus& statusAidl);
template<typename T, typename U>
status_t GetLogMessagesAidl(const std::shared_ptr<U> &obj, Vector<::V1_4::LogMessage> &logs) {
@@ -373,14 +283,16 @@
return OK;
}
-std::string GetExceptionMessage(status_t err, const char *msg,
+std::string GetExceptionMessage(const DrmStatus & err, const char *defaultMsg,
const Vector<::V1_4::LogMessage> &logs);
template<typename T>
-std::string GetExceptionMessage(status_t err, const char *msg, const sp<T> &iface) {
+std::string GetExceptionMessage(const DrmStatus &err, const char *defaultMsg, const sp<T> &iface) {
Vector<::V1_4::LogMessage> logs;
- iface->getLogMessages(logs);
- return GetExceptionMessage(err, msg, logs);
+ if (iface != NULL) {
+ iface->getLogMessages(logs);
+ }
+ return GetExceptionMessage(err, defaultMsg, logs);
}
} // namespace DrmUtils
diff --git a/drm/libmediadrm/interface/mediadrm/ICrypto.h b/drm/libmediadrm/interface/mediadrm/ICrypto.h
index 2c4df60..4087186 100644
--- a/drm/libmediadrm/interface/mediadrm/ICrypto.h
+++ b/drm/libmediadrm/interface/mediadrm/ICrypto.h
@@ -17,6 +17,7 @@
#include <cutils/native_handle.h>
#include <media/hardware/CryptoAPI.h>
#include <media/stagefright/foundation/ABase.h>
+#include <mediadrm/DrmStatus.h>
#include <utils/RefBase.h>
#include <utils/StrongPointer.h>
@@ -65,7 +66,7 @@
virtual void notifyResolution(uint32_t width, uint32_t height) = 0;
- virtual status_t setMediaDrmSession(const Vector<uint8_t> &sessionId) = 0;
+ virtual DrmStatus setMediaDrmSession(const Vector<uint8_t> &sessionId) = 0;
enum DestinationType {
kDestinationTypeSharedMemory, // non-secure
diff --git a/drm/mediadrm/plugins/clearkey/aidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/aidl/CryptoPlugin.cpp
index 201cf02..afc9b6a 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/aidl/CryptoPlugin.cpp
@@ -144,6 +144,11 @@
clearDataLengths.push_back(ss.numBytesOfClearData);
encryptedDataLengths.push_back(ss.numBytesOfEncryptedData);
}
+ if (in_args.keyId.size() != kBlockSize || in_args.iv.size() != kBlockSize) {
+ android_errorWriteLog(0x534e4554, "244569759");
+ detailedError = "invalid decrypt parameter size";
+ return toNdkScopedAStatus(Status::ERROR_DRM_CANNOT_HANDLE, detailedError);
+ }
auto res =
mSession->decrypt(in_args.keyId.data(), in_args.iv.data(),
srcPtr, static_cast<uint8_t*>(destPtr),
diff --git a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
index 3a675f6..7bc320d 100644
--- a/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/hidl/CryptoPlugin.cpp
@@ -206,6 +206,11 @@
return Void();
} else if (mode == Mode::AES_CTR) {
size_t bytesDecrypted;
+ if (keyId.size() != kBlockSize || iv.size() != kBlockSize) {
+ android_errorWriteLog(0x534e4554, "244569759");
+ _hidl_cb(Status_V1_2::ERROR_DRM_CANNOT_HANDLE, 0, "invalid decrypt parameter size");
+ return Void();
+ }
Status_V1_2 res = mSession->decrypt(keyId.data(), iv.data(), srcPtr,
static_cast<uint8_t*>(destPtr), toVector(subSamples), &bytesDecrypted);
if (res == Status_V1_2::OK) {
diff --git a/media/TEST_MAPPING b/media/TEST_MAPPING
index 35205e5..a22ec19 100644
--- a/media/TEST_MAPPING
+++ b/media/TEST_MAPPING
@@ -41,8 +41,51 @@
{
"path": "frameworks/av/drm/mediadrm/plugins"
}
+ ],
+
+ "platinum-postsubmit": [
+ // runs regularly, independent of changes in this tree.
+ // signals if changes elsewhere break media functionality
+ // @FlakyTest: in staged-postsubmit, but not postsubmit
+ {
+ "name": "CtsMediaCodecTestCases",
+ "options": [
+ {
+ "include-filter": "android.media.codec.cts.EncodeDecodeTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsMediaCodecTestCases",
+ "options": [
+ {
+ "include-filter": "android.media.codec.cts.DecodeEditEncodeTest"
+ },
+ {
+ "exclude-annotation": "androidx.test.filters.FlakyTest"
+ }
+ ]
+ }
+ ],
+
+ "staged-platinum-postsubmit": [
+ // runs every four hours
+ {
+ "name": "CtsMediaCodecTestCases",
+ "options": [
+ {
+ "include-filter": "android.media.codec.cts.EncodeDecodeTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsMediaCodecTestCases",
+ "options": [
+ {
+ "include-filter": "android.media.codec.cts.DecodeEditEncodeTest"
+ }
+ ]
+ }
]
- // TODO (b/229286407) Add EncodeDecodeTest and DecodeEditEncodeTest to
- // platinum-postsubmit once issues in cuttlefish are fixed
}
diff --git a/media/audioaidlconversion/AidlConversionCppNdk.cpp b/media/audioaidlconversion/AidlConversionCppNdk.cpp
new file mode 100644
index 0000000..37887f6
--- /dev/null
+++ b/media/audioaidlconversion/AidlConversionCppNdk.cpp
@@ -0,0 +1,2266 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <map>
+#include <utility>
+#include <vector>
+
+#define LOG_TAG "AidlConversionCppNdk"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include "media/AidlConversionCppNdk.h"
+
+#include <media/ShmemCompat.h>
+#include <media/stagefright/foundation/MediaDefs.h>
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// AIDL CPP/NDK backend to legacy audio data structure conversion utilities.
+
+#if defined(BACKEND_NDK)
+/* AIDL String generated in NDK is different than CPP */
+#define GET_DEVICE_DESC_CONNECTION(x) AudioDeviceDescription::CONNECTION_##x
+namespace aidl {
+#else
+#define GET_DEVICE_DESC_CONNECTION(x) AudioDeviceDescription::CONNECTION_##x()
+#endif
+
+namespace android {
+
+using ::android::BAD_VALUE;
+using ::android::OK;
+using ::android::base::unexpected;
+
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioConfig;
+using media::audio::common::AudioConfigBase;
+using media::audio::common::AudioContentType;
+using media::audio::common::AudioDevice;
+using media::audio::common::AudioDeviceAddress;
+using media::audio::common::AudioDeviceDescription;
+using media::audio::common::AudioDeviceType;
+using media::audio::common::AudioDualMonoMode;
+using media::audio::common::AudioEncapsulationMetadataType;
+using media::audio::common::AudioEncapsulationMode;
+using media::audio::common::AudioEncapsulationType;
+using media::audio::common::AudioFormatDescription;
+using media::audio::common::AudioFormatType;
+using media::audio::common::AudioGain;
+using media::audio::common::AudioGainConfig;
+using media::audio::common::AudioGainMode;
+using media::audio::common::AudioInputFlags;
+using media::audio::common::AudioIoFlags;
+using media::audio::common::AudioLatencyMode;
+using media::audio::common::AudioMode;
+using media::audio::common::AudioOffloadInfo;
+using media::audio::common::AudioOutputFlags;
+using media::audio::common::AudioPlaybackRate;
+using media::audio::common::AudioPortDeviceExt;
+using media::audio::common::AudioPortExt;
+using media::audio::common::AudioPortMixExt;
+using media::audio::common::AudioPortMixExtUseCase;
+using media::audio::common::AudioProfile;
+using media::audio::common::AudioSource;
+using media::audio::common::AudioStandard;
+using media::audio::common::AudioStreamType;
+using media::audio::common::AudioUsage;
+using media::audio::common::AudioUuid;
+using media::audio::common::ExtraAudioDescriptor;
+using media::audio::common::Int;
+using media::audio::common::PcmType;
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Converters
+
+::android::status_t aidl2legacy_string(std::string_view aidl, char* dest, size_t maxSize) {
+ if (aidl.size() > maxSize - 1) {
+ return BAD_VALUE;
+ }
+ aidl.copy(dest, aidl.size());
+ dest[aidl.size()] = '\0';
+ return OK;
+}
+
+ConversionResult<std::string> legacy2aidl_string(const char* legacy, size_t maxSize) {
+ if (legacy == nullptr) {
+ return unexpected(BAD_VALUE);
+ }
+ if (strnlen(legacy, maxSize) == maxSize) {
+ // No null-terminator.
+ return unexpected(BAD_VALUE);
+ }
+ return std::string(legacy);
+}
+
+ConversionResult<audio_module_handle_t> aidl2legacy_int32_t_audio_module_handle_t(int32_t aidl) {
+ return convertReinterpret<audio_module_handle_t>(aidl);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_module_handle_t_int32_t(audio_module_handle_t legacy) {
+ return convertReinterpret<int32_t>(legacy);
+}
+
+ConversionResult<audio_io_handle_t> aidl2legacy_int32_t_audio_io_handle_t(int32_t aidl) {
+ return convertReinterpret<audio_io_handle_t>(aidl);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_io_handle_t_int32_t(audio_io_handle_t legacy) {
+ return convertReinterpret<int32_t>(legacy);
+}
+
+ConversionResult<audio_port_handle_t> aidl2legacy_int32_t_audio_port_handle_t(int32_t aidl) {
+ return convertReinterpret<audio_port_handle_t>(aidl);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_port_handle_t_int32_t(audio_port_handle_t legacy) {
+ return convertReinterpret<int32_t>(legacy);
+}
+
+ConversionResult<audio_patch_handle_t> aidl2legacy_int32_t_audio_patch_handle_t(int32_t aidl) {
+ return convertReinterpret<audio_patch_handle_t>(aidl);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_patch_handle_t_int32_t(audio_patch_handle_t legacy) {
+ return convertReinterpret<int32_t>(legacy);
+}
+
+ConversionResult<audio_unique_id_t> aidl2legacy_int32_t_audio_unique_id_t(int32_t aidl) {
+ return convertReinterpret<audio_unique_id_t>(aidl);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_unique_id_t_int32_t(audio_unique_id_t legacy) {
+ return convertReinterpret<int32_t>(legacy);
+}
+
+ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl) {
+ return convertReinterpret<audio_hw_sync_t>(aidl);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy) {
+ return convertReinterpret<int32_t>(legacy);
+}
+
+ConversionResult<pid_t> aidl2legacy_int32_t_pid_t(int32_t aidl) {
+ return convertReinterpret<pid_t>(aidl);
+}
+
+ConversionResult<int32_t> legacy2aidl_pid_t_int32_t(pid_t legacy) {
+ return convertReinterpret<int32_t>(legacy);
+}
+
+ConversionResult<uid_t> aidl2legacy_int32_t_uid_t(int32_t aidl) {
+ return convertReinterpret<uid_t>(aidl);
+}
+
+ConversionResult<int32_t> legacy2aidl_uid_t_int32_t(uid_t legacy) {
+ return convertReinterpret<int32_t>(legacy);
+}
+
+ConversionResult<String16> aidl2legacy_string_view_String16(std::string_view aidl) {
+ return String16(aidl.data(), aidl.size());
+}
+
+ConversionResult<std::string> legacy2aidl_String16_string(const String16& legacy) {
+ return std::string(String8(legacy).c_str());
+}
+
+// TODO b/182392769: create an optional -> optional util
+ConversionResult<std::optional<String16>>
+aidl2legacy_optional_string_view_optional_String16(std::optional<std::string_view> aidl) {
+ if (!aidl.has_value()) {
+ return std::nullopt;
+ }
+ ConversionResult<String16> conversion =
+ VALUE_OR_RETURN(aidl2legacy_string_view_String16(aidl.value()));
+ return conversion.value();
+}
+
+ConversionResult<std::optional<std::string_view>>
+legacy2aidl_optional_String16_optional_string(std::optional<String16> legacy) {
+ if (!legacy.has_value()) {
+ return std::nullopt;
+ }
+ ConversionResult<std::string> conversion =
+ VALUE_OR_RETURN(legacy2aidl_String16_string(legacy.value()));
+ return conversion.value();
+}
+
+ConversionResult<String8> aidl2legacy_string_view_String8(std::string_view aidl) {
+ return String8(aidl.data(), aidl.size());
+}
+
+ConversionResult<std::string> legacy2aidl_String8_string(const String8& legacy) {
+ return std::string(legacy.c_str());
+}
+
+namespace {
+
+namespace detail {
+using AudioChannelBitPair = std::pair<audio_channel_mask_t, int>;
+using AudioChannelBitPairs = std::vector<AudioChannelBitPair>;
+using AudioChannelPair = std::pair<audio_channel_mask_t, AudioChannelLayout>;
+using AudioChannelPairs = std::vector<AudioChannelPair>;
+using AudioDevicePair = std::pair<audio_devices_t, AudioDeviceDescription>;
+using AudioDevicePairs = std::vector<AudioDevicePair>;
+using AudioFormatPair = std::pair<audio_format_t, AudioFormatDescription>;
+using AudioFormatPairs = std::vector<AudioFormatPair>;
+}
+
+const detail::AudioChannelBitPairs& getInAudioChannelBits() {
+ static const detail::AudioChannelBitPairs pairs = {
+ { AUDIO_CHANNEL_IN_LEFT, AudioChannelLayout::CHANNEL_FRONT_LEFT },
+ { AUDIO_CHANNEL_IN_RIGHT, AudioChannelLayout::CHANNEL_FRONT_RIGHT },
+ // AUDIO_CHANNEL_IN_FRONT is at the end
+ { AUDIO_CHANNEL_IN_BACK, AudioChannelLayout::CHANNEL_BACK_CENTER },
+ // AUDIO_CHANNEL_IN_*_PROCESSED not supported
+ // AUDIO_CHANNEL_IN_PRESSURE not supported
+ // AUDIO_CHANNEL_IN_*_AXIS not supported
+ // AUDIO_CHANNEL_IN_VOICE_* not supported
+ { AUDIO_CHANNEL_IN_BACK_LEFT, AudioChannelLayout::CHANNEL_BACK_LEFT },
+ { AUDIO_CHANNEL_IN_BACK_RIGHT, AudioChannelLayout::CHANNEL_BACK_RIGHT },
+ { AUDIO_CHANNEL_IN_CENTER, AudioChannelLayout::CHANNEL_FRONT_CENTER },
+ { AUDIO_CHANNEL_IN_LOW_FREQUENCY, AudioChannelLayout::CHANNEL_LOW_FREQUENCY },
+ { AUDIO_CHANNEL_IN_TOP_LEFT, AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT },
+ { AUDIO_CHANNEL_IN_TOP_RIGHT, AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT },
+ // When going from aidl to legacy, IN_CENTER is used
+ { AUDIO_CHANNEL_IN_FRONT, AudioChannelLayout::CHANNEL_FRONT_CENTER }
+ };
+ return pairs;
+}
+
+const detail::AudioChannelPairs& getInAudioChannelPairs() {
+ static const detail::AudioChannelPairs pairs = {
+#define DEFINE_INPUT_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_IN_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+ AudioChannelLayout::LAYOUT_##n) \
+ }
+
+ DEFINE_INPUT_LAYOUT(MONO),
+ DEFINE_INPUT_LAYOUT(STEREO),
+ DEFINE_INPUT_LAYOUT(FRONT_BACK),
+ // AUDIO_CHANNEL_IN_6 not supported
+ DEFINE_INPUT_LAYOUT(2POINT0POINT2),
+ DEFINE_INPUT_LAYOUT(2POINT1POINT2),
+ DEFINE_INPUT_LAYOUT(3POINT0POINT2),
+ DEFINE_INPUT_LAYOUT(3POINT1POINT2),
+ DEFINE_INPUT_LAYOUT(5POINT1)
+#undef DEFINE_INPUT_LAYOUT
+ };
+ return pairs;
+}
+
+const detail::AudioChannelBitPairs& getOutAudioChannelBits() {
+ static const detail::AudioChannelBitPairs pairs = {
+#define DEFINE_OUTPUT_BITS(n) \
+ { AUDIO_CHANNEL_OUT_##n, AudioChannelLayout::CHANNEL_##n }
+
+ DEFINE_OUTPUT_BITS(FRONT_LEFT),
+ DEFINE_OUTPUT_BITS(FRONT_RIGHT),
+ DEFINE_OUTPUT_BITS(FRONT_CENTER),
+ DEFINE_OUTPUT_BITS(LOW_FREQUENCY),
+ DEFINE_OUTPUT_BITS(BACK_LEFT),
+ DEFINE_OUTPUT_BITS(BACK_RIGHT),
+ DEFINE_OUTPUT_BITS(FRONT_LEFT_OF_CENTER),
+ DEFINE_OUTPUT_BITS(FRONT_RIGHT_OF_CENTER),
+ DEFINE_OUTPUT_BITS(BACK_CENTER),
+ DEFINE_OUTPUT_BITS(SIDE_LEFT),
+ DEFINE_OUTPUT_BITS(SIDE_RIGHT),
+ DEFINE_OUTPUT_BITS(TOP_CENTER),
+ DEFINE_OUTPUT_BITS(TOP_FRONT_LEFT),
+ DEFINE_OUTPUT_BITS(TOP_FRONT_CENTER),
+ DEFINE_OUTPUT_BITS(TOP_FRONT_RIGHT),
+ DEFINE_OUTPUT_BITS(TOP_BACK_LEFT),
+ DEFINE_OUTPUT_BITS(TOP_BACK_CENTER),
+ DEFINE_OUTPUT_BITS(TOP_BACK_RIGHT),
+ DEFINE_OUTPUT_BITS(TOP_SIDE_LEFT),
+ DEFINE_OUTPUT_BITS(TOP_SIDE_RIGHT),
+ DEFINE_OUTPUT_BITS(BOTTOM_FRONT_LEFT),
+ DEFINE_OUTPUT_BITS(BOTTOM_FRONT_CENTER),
+ DEFINE_OUTPUT_BITS(BOTTOM_FRONT_RIGHT),
+ DEFINE_OUTPUT_BITS(LOW_FREQUENCY_2),
+ DEFINE_OUTPUT_BITS(FRONT_WIDE_LEFT),
+ DEFINE_OUTPUT_BITS(FRONT_WIDE_RIGHT),
+#undef DEFINE_OUTPUT_BITS
+ { AUDIO_CHANNEL_OUT_HAPTIC_A, AudioChannelLayout::CHANNEL_HAPTIC_A },
+ { AUDIO_CHANNEL_OUT_HAPTIC_B, AudioChannelLayout::CHANNEL_HAPTIC_B }
+ };
+ return pairs;
+}
+
+const detail::AudioChannelPairs& getOutAudioChannelPairs() {
+ static const detail::AudioChannelPairs pairs = {
+#define DEFINE_OUTPUT_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_OUT_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
+ AudioChannelLayout::LAYOUT_##n) \
+ }
+
+ DEFINE_OUTPUT_LAYOUT(MONO),
+ DEFINE_OUTPUT_LAYOUT(STEREO),
+ DEFINE_OUTPUT_LAYOUT(2POINT1),
+ DEFINE_OUTPUT_LAYOUT(TRI),
+ DEFINE_OUTPUT_LAYOUT(TRI_BACK),
+ DEFINE_OUTPUT_LAYOUT(3POINT1),
+ DEFINE_OUTPUT_LAYOUT(2POINT0POINT2),
+ DEFINE_OUTPUT_LAYOUT(2POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(3POINT0POINT2),
+ DEFINE_OUTPUT_LAYOUT(3POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(QUAD),
+ DEFINE_OUTPUT_LAYOUT(QUAD_SIDE),
+ DEFINE_OUTPUT_LAYOUT(SURROUND),
+ DEFINE_OUTPUT_LAYOUT(PENTA),
+ DEFINE_OUTPUT_LAYOUT(5POINT1),
+ DEFINE_OUTPUT_LAYOUT(5POINT1_SIDE),
+ DEFINE_OUTPUT_LAYOUT(5POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(5POINT1POINT4),
+ DEFINE_OUTPUT_LAYOUT(6POINT1),
+ DEFINE_OUTPUT_LAYOUT(7POINT1),
+ DEFINE_OUTPUT_LAYOUT(7POINT1POINT2),
+ DEFINE_OUTPUT_LAYOUT(7POINT1POINT4),
+ DEFINE_OUTPUT_LAYOUT(13POINT_360RA),
+ DEFINE_OUTPUT_LAYOUT(22POINT2),
+ DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_A),
+ DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_A),
+ DEFINE_OUTPUT_LAYOUT(HAPTIC_AB),
+ DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_AB),
+ DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_AB)
+#undef DEFINE_OUTPUT_LAYOUT
+ };
+ return pairs;
+}
+
+const detail::AudioChannelPairs& getVoiceAudioChannelPairs() {
+ static const detail::AudioChannelPairs pairs = {
+#define DEFINE_VOICE_LAYOUT(n) \
+ { \
+ AUDIO_CHANNEL_IN_VOICE_##n, \
+ AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>( \
+ AudioChannelLayout::VOICE_##n) \
+ }
+ DEFINE_VOICE_LAYOUT(UPLINK_MONO),
+ DEFINE_VOICE_LAYOUT(DNLINK_MONO),
+ DEFINE_VOICE_LAYOUT(CALL_MONO)
+#undef DEFINE_VOICE_LAYOUT
+ };
+ return pairs;
+}
+
+AudioDeviceDescription make_AudioDeviceDescription(AudioDeviceType type,
+ const std::string& connection = "") {
+ AudioDeviceDescription result;
+ result.type = type;
+ result.connection = connection;
+ return result;
+}
+
+void append_AudioDeviceDescription(detail::AudioDevicePairs& pairs,
+ audio_devices_t inputType, audio_devices_t outputType,
+ AudioDeviceType inType, AudioDeviceType outType,
+ const std::string& connection = "") {
+ pairs.push_back(std::make_pair(inputType, make_AudioDeviceDescription(inType, connection)));
+ pairs.push_back(std::make_pair(outputType, make_AudioDeviceDescription(outType, connection)));
+}
+
+const detail::AudioDevicePairs& getAudioDevicePairs() {
+ static const detail::AudioDevicePairs pairs = []() {
+ detail::AudioDevicePairs pairs = {{
+ {
+ AUDIO_DEVICE_NONE, AudioDeviceDescription{}
+ },
+ {
+ AUDIO_DEVICE_OUT_EARPIECE, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_SPEAKER_EARPIECE)
+ },
+ {
+ AUDIO_DEVICE_OUT_SPEAKER, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_SPEAKER)
+ },
+ {
+ AUDIO_DEVICE_OUT_WIRED_HEADPHONE, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_HEADPHONE,
+ GET_DEVICE_DESC_CONNECTION(ANALOG))
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_DEVICE,
+ GET_DEVICE_DESC_CONNECTION(BT_SCO))
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_CARKIT,
+ GET_DEVICE_DESC_CONNECTION(BT_SCO))
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_HEADPHONE,
+ GET_DEVICE_DESC_CONNECTION(BT_A2DP))
+ },
+ {
+ AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_SPEAKER,
+ GET_DEVICE_DESC_CONNECTION(BT_A2DP))
+ },
+ {
+ AUDIO_DEVICE_OUT_TELEPHONY_TX, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_TELEPHONY_TX)
+ },
+ {
+ AUDIO_DEVICE_OUT_AUX_LINE, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_LINE_AUX)
+ },
+ {
+ AUDIO_DEVICE_OUT_SPEAKER_SAFE, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_SPEAKER_SAFE)
+ },
+ {
+ AUDIO_DEVICE_OUT_HEARING_AID, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_HEARING_AID,
+ GET_DEVICE_DESC_CONNECTION(WIRELESS))
+ },
+ {
+ AUDIO_DEVICE_OUT_ECHO_CANCELLER, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_ECHO_CANCELLER)
+ },
+ {
+ AUDIO_DEVICE_OUT_BLE_SPEAKER, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_SPEAKER,
+ GET_DEVICE_DESC_CONNECTION(BT_LE))
+ },
+ {
+ AUDIO_DEVICE_OUT_BLE_BROADCAST, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_BROADCAST,
+ GET_DEVICE_DESC_CONNECTION(BT_LE))
+ },
+ // AUDIO_DEVICE_IN_AMBIENT and IN_COMMUNICATION are removed since they were deprecated.
+ {
+ AUDIO_DEVICE_IN_BUILTIN_MIC, make_AudioDeviceDescription(
+ AudioDeviceType::IN_MICROPHONE)
+ },
+ {
+ AUDIO_DEVICE_IN_BACK_MIC, make_AudioDeviceDescription(
+ AudioDeviceType::IN_MICROPHONE_BACK)
+ },
+ {
+ AUDIO_DEVICE_IN_TELEPHONY_RX, make_AudioDeviceDescription(
+ AudioDeviceType::IN_TELEPHONY_RX)
+ },
+ {
+ AUDIO_DEVICE_IN_TV_TUNER, make_AudioDeviceDescription(
+ AudioDeviceType::IN_TV_TUNER)
+ },
+ {
+ AUDIO_DEVICE_IN_LOOPBACK, make_AudioDeviceDescription(
+ AudioDeviceType::IN_LOOPBACK)
+ },
+ {
+ AUDIO_DEVICE_IN_BLUETOOTH_BLE, make_AudioDeviceDescription(
+ AudioDeviceType::IN_DEVICE,
+ GET_DEVICE_DESC_CONNECTION(BT_LE))
+ },
+ {
+ AUDIO_DEVICE_IN_ECHO_REFERENCE, make_AudioDeviceDescription(
+ AudioDeviceType::IN_ECHO_REFERENCE)
+ }
+ }};
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_DEFAULT, AUDIO_DEVICE_OUT_DEFAULT,
+ AudioDeviceType::IN_DEFAULT, AudioDeviceType::OUT_DEFAULT);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_OUT_WIRED_HEADSET,
+ AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+ GET_DEVICE_DESC_CONNECTION(ANALOG));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,
+ AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+ GET_DEVICE_DESC_CONNECTION(BT_SCO));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_HDMI, AUDIO_DEVICE_OUT_HDMI,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ GET_DEVICE_DESC_CONNECTION(HDMI));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
+ AudioDeviceType::IN_SUBMIX, AudioDeviceType::OUT_SUBMIX);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,
+ AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
+ GET_DEVICE_DESC_CONNECTION(ANALOG));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,
+ AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
+ GET_DEVICE_DESC_CONNECTION(USB));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_USB_ACCESSORY, AUDIO_DEVICE_OUT_USB_ACCESSORY,
+ AudioDeviceType::IN_ACCESSORY, AudioDeviceType::OUT_ACCESSORY,
+ GET_DEVICE_DESC_CONNECTION(USB));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_OUT_USB_DEVICE,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ GET_DEVICE_DESC_CONNECTION(USB));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_FM_TUNER, AUDIO_DEVICE_OUT_FM,
+ AudioDeviceType::IN_FM_TUNER, AudioDeviceType::OUT_FM);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_LINE, AUDIO_DEVICE_OUT_LINE,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ GET_DEVICE_DESC_CONNECTION(ANALOG));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_SPDIF, AUDIO_DEVICE_OUT_SPDIF,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ GET_DEVICE_DESC_CONNECTION(SPDIF));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ GET_DEVICE_DESC_CONNECTION(BT_A2DP));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_IP, AUDIO_DEVICE_OUT_IP,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ GET_DEVICE_DESC_CONNECTION(IP_V4));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_OUT_BUS,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ GET_DEVICE_DESC_CONNECTION(BUS));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_PROXY, AUDIO_DEVICE_OUT_PROXY,
+ AudioDeviceType::IN_AFE_PROXY, AudioDeviceType::OUT_AFE_PROXY);
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_USB_HEADSET, AUDIO_DEVICE_OUT_USB_HEADSET,
+ AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+ GET_DEVICE_DESC_CONNECTION(USB));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_HDMI_ARC, AUDIO_DEVICE_OUT_HDMI_ARC,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ GET_DEVICE_DESC_CONNECTION(HDMI_ARC));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_HDMI_EARC, AUDIO_DEVICE_OUT_HDMI_EARC,
+ AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
+ GET_DEVICE_DESC_CONNECTION(HDMI_EARC));
+ append_AudioDeviceDescription(pairs,
+ AUDIO_DEVICE_IN_BLE_HEADSET, AUDIO_DEVICE_OUT_BLE_HEADSET,
+ AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
+ GET_DEVICE_DESC_CONNECTION(BT_LE));
+ return pairs;
+ }();
+#undef GET_DEVICE_DESC_CONNECTION
+ return pairs;
+}
+
+AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
+ AudioFormatDescription result;
+ result.type = type;
+ return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
+ auto result = make_AudioFormatDescription(AudioFormatType::PCM);
+ result.pcm = pcm;
+ return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
+ AudioFormatDescription result;
+ result.encoding = encoding;
+ return result;
+}
+
+AudioFormatDescription make_AudioFormatDescription(PcmType transport,
+ const std::string& encoding) {
+ auto result = make_AudioFormatDescription(encoding);
+ result.pcm = transport;
+ return result;
+}
+
+const detail::AudioFormatPairs& getAudioFormatPairs() {
+ static const detail::AudioFormatPairs pairs = {{
+ {AUDIO_FORMAT_INVALID,
+ make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID)},
+ {AUDIO_FORMAT_DEFAULT, AudioFormatDescription{}},
+ {AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(PcmType::INT_16_BIT)},
+ {AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(PcmType::UINT_8_BIT)},
+ {AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(PcmType::INT_32_BIT)},
+ {AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(PcmType::FIXED_Q_8_24)},
+ {AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(PcmType::FLOAT_32_BIT)},
+ {AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(PcmType::INT_24_BIT)},
+ {AUDIO_FORMAT_MP3, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_MPEG)},
+ {AUDIO_FORMAT_AMR_NB,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AMR_NB)},
+ {AUDIO_FORMAT_AMR_WB,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AMR_WB)},
+ {AUDIO_FORMAT_AAC,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_MP4)},
+ {AUDIO_FORMAT_AAC_MAIN,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_MAIN)},
+ {AUDIO_FORMAT_AAC_LC,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_LC)},
+ {AUDIO_FORMAT_AAC_SSR,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_SSR)},
+ {AUDIO_FORMAT_AAC_LTP,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_LTP)},
+ {AUDIO_FORMAT_AAC_HE_V1,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_HE_V1)},
+ {AUDIO_FORMAT_AAC_SCALABLE,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE)},
+ {AUDIO_FORMAT_AAC_ERLC,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ERLC)},
+ {AUDIO_FORMAT_AAC_LD,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_LD)},
+ {AUDIO_FORMAT_AAC_HE_V2,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_HE_V2)},
+ {AUDIO_FORMAT_AAC_ELD,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ELD)},
+ {AUDIO_FORMAT_AAC_XHE,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_XHE)
+
+ },
+ // AUDIO_FORMAT_HE_AAC_V1 and HE_AAC_V2 are removed since they were deprecated long time
+ // ago.
+ {AUDIO_FORMAT_VORBIS,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_VORBIS)},
+ {AUDIO_FORMAT_OPUS, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_OPUS)},
+ {AUDIO_FORMAT_AC3, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AC3)},
+ {AUDIO_FORMAT_E_AC3, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_EAC3)},
+ {AUDIO_FORMAT_E_AC3_JOC,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_EAC3_JOC)},
+ {AUDIO_FORMAT_DTS, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_DTS)},
+ {AUDIO_FORMAT_DTS_HD,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_DTS_HD)},
+ {AUDIO_FORMAT_DTS_HD_MA,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_DTS_HD_MA)},
+ {AUDIO_FORMAT_DTS_UHD,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_DTS_UHD_P1)},
+ {AUDIO_FORMAT_DTS_UHD_P2,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_DTS_UHD_P2)},
+ // In the future, we would like to represent encapsulated bitstreams as
+ // nested AudioFormatDescriptions. The legacy 'AUDIO_FORMAT_IEC61937' type doesn't
+ // specify the format of the encapsulated bitstream.
+ {AUDIO_FORMAT_IEC61937,
+ make_AudioFormatDescription(PcmType::INT_16_BIT,
+ ::android::MEDIA_MIMETYPE_AUDIO_IEC61937)},
+ {AUDIO_FORMAT_DOLBY_TRUEHD,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD)},
+ {AUDIO_FORMAT_EVRC, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_EVRC)},
+ {AUDIO_FORMAT_EVRCB,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_EVRCB)},
+ {AUDIO_FORMAT_EVRCWB,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_EVRCWB)},
+ {AUDIO_FORMAT_EVRCNW,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_EVRCNW)},
+ {AUDIO_FORMAT_AAC_ADIF,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADIF)},
+ {AUDIO_FORMAT_WMA, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_WMA)},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_WMA_PRO, make_AudioFormatDescription("audio/x-ms-wma.pro")},
+ {AUDIO_FORMAT_AMR_WB_PLUS,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)},
+ {AUDIO_FORMAT_MP2,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)},
+ {AUDIO_FORMAT_QCELP,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_QCELP)},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_DSD, make_AudioFormatDescription("audio/vnd.sony.dsd")},
+ {AUDIO_FORMAT_FLAC, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_FLAC)},
+ {AUDIO_FORMAT_ALAC, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_ALAC)},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_APE, make_AudioFormatDescription("audio/x-ape")},
+ {AUDIO_FORMAT_AAC_ADTS,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS)},
+ {AUDIO_FORMAT_AAC_ADTS_MAIN,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN)},
+ {AUDIO_FORMAT_AAC_ADTS_LC,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC)},
+ {AUDIO_FORMAT_AAC_ADTS_SSR,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR)},
+ {AUDIO_FORMAT_AAC_ADTS_LTP,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP)},
+ {AUDIO_FORMAT_AAC_ADTS_HE_V1,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1)},
+ {AUDIO_FORMAT_AAC_ADTS_SCALABLE,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE)},
+ {AUDIO_FORMAT_AAC_ADTS_ERLC,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC)},
+ {AUDIO_FORMAT_AAC_ADTS_LD,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD)},
+ {AUDIO_FORMAT_AAC_ADTS_HE_V2,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2)},
+ {AUDIO_FORMAT_AAC_ADTS_ELD,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD)},
+ {AUDIO_FORMAT_AAC_ADTS_XHE,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE)},
+ {// Note: not in the IANA registry. "vnd.octel.sbc" is not BT SBC.
+ AUDIO_FORMAT_SBC, make_AudioFormatDescription("audio/x-sbc")},
+ {AUDIO_FORMAT_APTX, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_APTX)},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_APTX_HD, make_AudioFormatDescription("audio/vnd.qcom.aptx.hd")},
+ {AUDIO_FORMAT_AC4, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AC4)},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_LDAC, make_AudioFormatDescription("audio/vnd.sony.ldac")},
+ {AUDIO_FORMAT_MAT,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_DOLBY_MAT)},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_MAT_1_0,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_DOLBY_MAT +
+ std::string(".1.0"))},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_MAT_2_0,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_DOLBY_MAT +
+ std::string(".2.0"))},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_MAT_2_1,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_DOLBY_MAT +
+ std::string(".2.1"))},
+ {AUDIO_FORMAT_AAC_LATM,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC)},
+ {AUDIO_FORMAT_AAC_LATM_LC,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC)},
+ {AUDIO_FORMAT_AAC_LATM_HE_V1,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1)},
+ {AUDIO_FORMAT_AAC_LATM_HE_V2,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2)},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_CELT, make_AudioFormatDescription("audio/x-celt")},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_APTX_ADAPTIVE,
+ make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive")},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_LHDC, make_AudioFormatDescription("audio/vnd.savitech.lhdc")},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_LHDC_LL, make_AudioFormatDescription("audio/vnd.savitech.lhdc.ll")},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_APTX_TWSP, make_AudioFormatDescription("audio/vnd.qcom.aptx.twsp")},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_LC3, make_AudioFormatDescription("audio/x-lc3")},
+ {AUDIO_FORMAT_MPEGH,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1)},
+ {AUDIO_FORMAT_MPEGH_BL_L3,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3)},
+ {AUDIO_FORMAT_MPEGH_BL_L4,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4)},
+ {AUDIO_FORMAT_MPEGH_LC_L3,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3)},
+ {AUDIO_FORMAT_MPEGH_LC_L4,
+ make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4)},
+ {AUDIO_FORMAT_IEC60958,
+ make_AudioFormatDescription(PcmType::INT_24_BIT,
+ ::android::MEDIA_MIMETYPE_AUDIO_IEC60958)},
+ {AUDIO_FORMAT_DRA, make_AudioFormatDescription(::android::MEDIA_MIMETYPE_AUDIO_DRA)},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_APTX_ADAPTIVE_QLEA,
+ make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive.r3")},
+ {// Note: not in the IANA registry.
+ AUDIO_FORMAT_APTX_ADAPTIVE_R4,
+ make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive.r4")},
+ }};
+ return pairs;
+}
+
+template<typename S, typename T>
+std::map<S, T> make_DirectMap(const std::vector<std::pair<S, T>>& v) {
+ std::map<S, T> result(v.begin(), v.end());
+ LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
+ return result;
+}
+
+template<typename S, typename T>
+std::map<S, T> make_DirectMap(
+ const std::vector<std::pair<S, T>>& v1, const std::vector<std::pair<S, T>>& v2) {
+ std::map<S, T> result(v1.begin(), v1.end());
+ LOG_ALWAYS_FATAL_IF(result.size() != v1.size(), "Duplicate key elements detected in v1");
+ result.insert(v2.begin(), v2.end());
+ LOG_ALWAYS_FATAL_IF(result.size() != v1.size() + v2.size(),
+ "Duplicate key elements detected in v1+v2");
+ return result;
+}
+
+template<typename S, typename T>
+std::map<T, S> make_ReverseMap(const std::vector<std::pair<S, T>>& v) {
+ std::map<T, S> result;
+ std::transform(v.begin(), v.end(), std::inserter(result, result.begin()),
+ [](const std::pair<S, T>& p) {
+ return std::make_pair(p.second, p.first);
+ });
+ LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
+ return result;
+}
+
+} // namespace
+
+audio_channel_mask_t aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+ int aidlLayout, bool isInput) {
+ auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
+ const int aidlLayoutInitial = aidlLayout; // for error message
+ audio_channel_mask_t legacy = AUDIO_CHANNEL_NONE;
+ for (const auto& bitPair : bitMapping) {
+ if ((aidlLayout & bitPair.second) == bitPair.second) {
+ legacy = static_cast<audio_channel_mask_t>(legacy | bitPair.first);
+ aidlLayout &= ~bitPair.second;
+ if (aidlLayout == 0) {
+ return legacy;
+ }
+ }
+ }
+ ALOGE("%s: aidl layout 0x%x contains bits 0x%x that have no match to legacy %s bits",
+ __func__, aidlLayoutInitial, aidlLayout, isInput ? "input" : "output");
+ return AUDIO_CHANNEL_NONE;
+}
+
+ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ const AudioChannelLayout& aidl, bool isInput) {
+ using ReverseMap = std::map<AudioChannelLayout, audio_channel_mask_t>;
+ using Tag = AudioChannelLayout::Tag;
+ static const ReverseMap mIn = make_ReverseMap(getInAudioChannelPairs());
+ static const ReverseMap mOut = make_ReverseMap(getOutAudioChannelPairs());
+ static const ReverseMap mVoice = make_ReverseMap(getVoiceAudioChannelPairs());
+
+ auto convert = [](const AudioChannelLayout& aidl, const ReverseMap& m,
+ const char* func, const char* type) -> ConversionResult<audio_channel_mask_t> {
+ if (auto it = m.find(aidl); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGW("%s: no legacy %s audio_channel_mask_t found for %s", func, type,
+ aidl.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+ };
+
+ switch (aidl.getTag()) {
+ case Tag::none:
+ return AUDIO_CHANNEL_NONE;
+ case Tag::invalid:
+ return AUDIO_CHANNEL_INVALID;
+ case Tag::indexMask:
+ // Index masks do not have pre-defined values.
+ if (const int bits = aidl.get<Tag::indexMask>();
+ __builtin_popcount(bits) != 0 &&
+ __builtin_popcount(bits) <= (int)AUDIO_CHANNEL_COUNT_MAX) {
+ return audio_channel_mask_from_representation_and_bits(
+ AUDIO_CHANNEL_REPRESENTATION_INDEX, bits);
+ } else {
+ ALOGE("%s: invalid indexMask value 0x%x in %s",
+ __func__, bits, aidl.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+ case Tag::layoutMask:
+ // The fast path is to find a direct match for some known layout mask.
+ if (const auto layoutMatch = convert(aidl, isInput ? mIn : mOut, __func__,
+ isInput ? "input" : "output");
+ layoutMatch.ok()) {
+ return layoutMatch;
+ }
+ // If a match for a predefined layout wasn't found, make a custom one from bits.
+ if (audio_channel_mask_t bitMask =
+ aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+ aidl.get<Tag::layoutMask>(), isInput);
+ bitMask != AUDIO_CHANNEL_NONE) {
+ return bitMask;
+ }
+ return unexpected(BAD_VALUE);
+ case Tag::voiceMask:
+ return convert(aidl, mVoice, __func__, "voice");
+ }
+ ALOGE("%s: unexpected tag value %d", __func__, aidl.getTag());
+ return unexpected(BAD_VALUE);
+}
+
+int legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
+ audio_channel_mask_t legacy, bool isInput) {
+ auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
+ const int legacyInitial = legacy; // for error message
+ int aidlLayout = 0;
+ for (const auto& bitPair : bitMapping) {
+ if ((legacy & bitPair.first) == bitPair.first) {
+ aidlLayout |= bitPair.second;
+ legacy = static_cast<audio_channel_mask_t>(legacy & ~bitPair.first);
+ if (legacy == 0) {
+ return aidlLayout;
+ }
+ }
+ }
+ ALOGE("%s: legacy %s audio_channel_mask_t 0x%x contains unrecognized bits 0x%x",
+ __func__, isInput ? "input" : "output", legacyInitial, legacy);
+ return 0;
+}
+
+ConversionResult<AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ audio_channel_mask_t legacy, bool isInput) {
+ using DirectMap = std::map<audio_channel_mask_t, AudioChannelLayout>;
+ using Tag = AudioChannelLayout::Tag;
+ static const DirectMap mInAndVoice = make_DirectMap(
+ getInAudioChannelPairs(), getVoiceAudioChannelPairs());
+ static const DirectMap mOut = make_DirectMap(getOutAudioChannelPairs());
+
+ auto convert = [](const audio_channel_mask_t legacy, const DirectMap& m,
+ const char* func, const char* type) -> ConversionResult<AudioChannelLayout> {
+ if (auto it = m.find(legacy); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGW("%s: no AudioChannelLayout found for legacy %s audio_channel_mask_t value 0x%x",
+ func, type, legacy);
+ return unexpected(BAD_VALUE);
+ }
+ };
+
+ if (legacy == AUDIO_CHANNEL_NONE) {
+ return AudioChannelLayout{};
+ } else if (legacy == AUDIO_CHANNEL_INVALID) {
+ return AudioChannelLayout::make<Tag::invalid>(0);
+ }
+
+ const audio_channel_representation_t repr = audio_channel_mask_get_representation(legacy);
+ if (repr == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ if (audio_channel_mask_is_valid(legacy)) {
+ const int indexMask = VALUE_OR_RETURN(
+ convertIntegral<int>(audio_channel_mask_get_bits(legacy)));
+ return AudioChannelLayout::make<Tag::indexMask>(indexMask);
+ } else {
+ ALOGE("%s: legacy audio_channel_mask_t value 0x%x is invalid", __func__, legacy);
+ return unexpected(BAD_VALUE);
+ }
+ } else if (repr == AUDIO_CHANNEL_REPRESENTATION_POSITION) {
+ // The fast path is to find a direct match for some known layout mask.
+ if (const auto layoutMatch = convert(legacy, isInput ? mInAndVoice : mOut, __func__,
+ isInput ? "input / voice" : "output");
+ layoutMatch.ok()) {
+ return layoutMatch;
+ }
+ // If a match for a predefined layout wasn't found, make a custom one from bits,
+ // rejecting those with voice channel bits.
+ if (!isInput ||
+ (legacy & (AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK)) == 0) {
+ if (int bitMaskLayout =
+ legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
+ legacy, isInput);
+ bitMaskLayout != 0) {
+ return AudioChannelLayout::make<Tag::layoutMask>(bitMaskLayout);
+ }
+ } else {
+ ALOGE("%s: legacy audio_channel_mask_t value 0x%x contains voice bits",
+ __func__, legacy);
+ }
+ return unexpected(BAD_VALUE);
+ }
+
+ ALOGE("%s: unknown representation %d in audio_channel_mask_t value 0x%x",
+ __func__, repr, legacy);
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
+ const AudioDeviceDescription& aidl) {
+ static const std::map<AudioDeviceDescription, audio_devices_t> m =
+ make_ReverseMap(getAudioDevicePairs());
+ if (auto it = m.find(aidl); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no legacy audio_devices_t found for %s", __func__, aidl.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+}
+
+ConversionResult<AudioDeviceDescription> legacy2aidl_audio_devices_t_AudioDeviceDescription(
+ audio_devices_t legacy) {
+ static const std::map<audio_devices_t, AudioDeviceDescription> m =
+ make_DirectMap(getAudioDevicePairs());
+ if (auto it = m.find(legacy); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no AudioDeviceDescription found for legacy audio_devices_t value 0x%x",
+ __func__, legacy);
+ return unexpected(BAD_VALUE);
+ }
+}
+
+::android::status_t aidl2legacy_AudioDevice_audio_device(
+ const AudioDevice& aidl,
+ audio_devices_t* legacyType, char* legacyAddress) {
+ *legacyType = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+ return aidl2legacy_string(
+ aidl.address.get<AudioDeviceAddress::id>(),
+ legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN);
+}
+
+::android::status_t aidl2legacy_AudioDevice_audio_device(
+ const AudioDevice& aidl,
+ audio_devices_t* legacyType, String8* legacyAddress) {
+ *legacyType = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+ *legacyAddress = VALUE_OR_RETURN_STATUS(aidl2legacy_string_view_String8(
+ aidl.address.get<AudioDeviceAddress::id>()));
+ return OK;
+}
+
+::android::status_t aidl2legacy_AudioDevice_audio_device(
+ const AudioDevice& aidl,
+ audio_devices_t* legacyType, std::string* legacyAddress) {
+ *legacyType = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
+ *legacyAddress = aidl.address.get<AudioDeviceAddress::id>();
+ return OK;
+}
+
+ConversionResult<AudioDevice> legacy2aidl_audio_device_AudioDevice(
+ audio_devices_t legacyType, const char* legacyAddress) {
+ AudioDevice aidl;
+ aidl.type = VALUE_OR_RETURN(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
+ const std::string aidl_id = VALUE_OR_RETURN(
+ legacy2aidl_string(legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN));
+ aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
+ return aidl;
+}
+
+ConversionResult<AudioDevice>
+legacy2aidl_audio_device_AudioDevice(
+ audio_devices_t legacyType, const String8& legacyAddress) {
+ AudioDevice aidl;
+ aidl.type = VALUE_OR_RETURN(
+ legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
+ const std::string aidl_id = VALUE_OR_RETURN(
+ legacy2aidl_String8_string(legacyAddress));
+ aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
+ return aidl;
+}
+
+ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
+ const AudioFormatDescription& aidl) {
+ static const std::map<AudioFormatDescription, audio_format_t> m =
+ make_ReverseMap(getAudioFormatPairs());
+ if (auto it = m.find(aidl); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no legacy audio_format_t found for %s", __func__, aidl.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+}
+
+ConversionResult<AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
+ audio_format_t legacy) {
+ static const std::map<audio_format_t, AudioFormatDescription> m =
+ make_DirectMap(getAudioFormatPairs());
+ if (auto it = m.find(legacy); it != m.end()) {
+ return it->second;
+ } else {
+ ALOGE("%s: no AudioFormatDescription found for legacy audio_format_t value 0x%x",
+ __func__, legacy);
+ return unexpected(BAD_VALUE);
+ }
+}
+
+ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(
+ AudioGainMode aidl) {
+ switch (aidl) {
+ case AudioGainMode::JOINT:
+ return AUDIO_GAIN_MODE_JOINT;
+ case AudioGainMode::CHANNELS:
+ return AUDIO_GAIN_MODE_CHANNELS;
+ case AudioGainMode::RAMP:
+ return AUDIO_GAIN_MODE_RAMP;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(
+ audio_gain_mode_t legacy) {
+ switch (legacy) {
+ case AUDIO_GAIN_MODE_JOINT:
+ return AudioGainMode::JOINT;
+ case AUDIO_GAIN_MODE_CHANNELS:
+ return AudioGainMode::CHANNELS;
+ case AUDIO_GAIN_MODE_RAMP:
+ return AudioGainMode::RAMP;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl) {
+ return convertBitmask<audio_gain_mode_t, int32_t, audio_gain_mode_t, AudioGainMode>(
+ aidl, aidl2legacy_AudioGainMode_audio_gain_mode_t,
+ // AudioGainMode is index-based.
+ indexToEnum_index<AudioGainMode>,
+ // AUDIO_GAIN_MODE_* constants are mask-based.
+ enumToMask_bitmask<audio_gain_mode_t, audio_gain_mode_t>);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy) {
+ return convertBitmask<int32_t, audio_gain_mode_t, AudioGainMode, audio_gain_mode_t>(
+ legacy, legacy2aidl_audio_gain_mode_t_AudioGainMode,
+ // AUDIO_GAIN_MODE_* constants are mask-based.
+ indexToEnum_bitmask<audio_gain_mode_t>,
+ // AudioGainMode is index-based.
+ enumToMask_index<int32_t, AudioGainMode>);
+}
+
+ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
+ const AudioGainConfig& aidl, bool isInput) {
+ audio_gain_config legacy;
+ legacy.index = VALUE_OR_RETURN(convertIntegral<int>(aidl.index));
+ legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
+ legacy.channel_mask = VALUE_OR_RETURN(
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+ const bool isJoint = bitmaskIsSet(aidl.mode, AudioGainMode::JOINT);
+ size_t numValues = isJoint ? 1
+ : isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
+ : audio_channel_count_from_out_mask(legacy.channel_mask);
+ if (aidl.values.size() != numValues || aidl.values.size() > std::size(legacy.values)) {
+ return unexpected(BAD_VALUE);
+ }
+ for (size_t i = 0; i < numValues; ++i) {
+ legacy.values[i] = VALUE_OR_RETURN(convertIntegral<int>(aidl.values[i]));
+ }
+ legacy.ramp_duration_ms = VALUE_OR_RETURN(convertIntegral<int>(aidl.rampDurationMs));
+ return legacy;
+}
+
+ConversionResult<AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
+ const audio_gain_config& legacy, bool isInput) {
+ AudioGainConfig aidl;
+ aidl.index = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.index));
+ aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
+ aidl.channelMask = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
+ const bool isJoint = (legacy.mode & AUDIO_GAIN_MODE_JOINT) != 0;
+ size_t numValues = isJoint ? 1
+ : isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
+ : audio_channel_count_from_out_mask(legacy.channel_mask);
+ aidl.values.resize(numValues);
+ for (size_t i = 0; i < numValues; ++i) {
+ aidl.values[i] = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.values[i]));
+ }
+ aidl.rampDurationMs = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.ramp_duration_ms));
+ return aidl;
+}
+
+ConversionResult<audio_input_flags_t> aidl2legacy_AudioInputFlags_audio_input_flags_t(
+ AudioInputFlags aidl) {
+ switch (aidl) {
+ case AudioInputFlags::FAST:
+ return AUDIO_INPUT_FLAG_FAST;
+ case AudioInputFlags::HW_HOTWORD:
+ return AUDIO_INPUT_FLAG_HW_HOTWORD;
+ case AudioInputFlags::RAW:
+ return AUDIO_INPUT_FLAG_RAW;
+ case AudioInputFlags::SYNC:
+ return AUDIO_INPUT_FLAG_SYNC;
+ case AudioInputFlags::MMAP_NOIRQ:
+ return AUDIO_INPUT_FLAG_MMAP_NOIRQ;
+ case AudioInputFlags::VOIP_TX:
+ return AUDIO_INPUT_FLAG_VOIP_TX;
+ case AudioInputFlags::HW_AV_SYNC:
+ return AUDIO_INPUT_FLAG_HW_AV_SYNC;
+ case AudioInputFlags::DIRECT:
+ return AUDIO_INPUT_FLAG_DIRECT;
+ case AudioInputFlags::ULTRASOUND:
+ return AUDIO_INPUT_FLAG_ULTRASOUND;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
+ audio_input_flags_t legacy) {
+ switch (legacy) {
+ case AUDIO_INPUT_FLAG_NONE:
+ break; // shouldn't get here. must be listed -Werror,-Wswitch
+ case AUDIO_INPUT_FLAG_FAST:
+ return AudioInputFlags::FAST;
+ case AUDIO_INPUT_FLAG_HW_HOTWORD:
+ return AudioInputFlags::HW_HOTWORD;
+ case AUDIO_INPUT_FLAG_RAW:
+ return AudioInputFlags::RAW;
+ case AUDIO_INPUT_FLAG_SYNC:
+ return AudioInputFlags::SYNC;
+ case AUDIO_INPUT_FLAG_MMAP_NOIRQ:
+ return AudioInputFlags::MMAP_NOIRQ;
+ case AUDIO_INPUT_FLAG_VOIP_TX:
+ return AudioInputFlags::VOIP_TX;
+ case AUDIO_INPUT_FLAG_HW_AV_SYNC:
+ return AudioInputFlags::HW_AV_SYNC;
+ case AUDIO_INPUT_FLAG_DIRECT:
+ return AudioInputFlags::DIRECT;
+ case AUDIO_INPUT_FLAG_ULTRASOUND:
+ return AudioInputFlags::ULTRASOUND;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_output_flags_t> aidl2legacy_AudioOutputFlags_audio_output_flags_t(
+ AudioOutputFlags aidl) {
+ switch (aidl) {
+ case AudioOutputFlags::DIRECT:
+ return AUDIO_OUTPUT_FLAG_DIRECT;
+ case AudioOutputFlags::PRIMARY:
+ return AUDIO_OUTPUT_FLAG_PRIMARY;
+ case AudioOutputFlags::FAST:
+ return AUDIO_OUTPUT_FLAG_FAST;
+ case AudioOutputFlags::DEEP_BUFFER:
+ return AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
+ case AudioOutputFlags::COMPRESS_OFFLOAD:
+ return AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
+ case AudioOutputFlags::NON_BLOCKING:
+ return AUDIO_OUTPUT_FLAG_NON_BLOCKING;
+ case AudioOutputFlags::HW_AV_SYNC:
+ return AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
+ case AudioOutputFlags::TTS:
+ return AUDIO_OUTPUT_FLAG_TTS;
+ case AudioOutputFlags::RAW:
+ return AUDIO_OUTPUT_FLAG_RAW;
+ case AudioOutputFlags::SYNC:
+ return AUDIO_OUTPUT_FLAG_SYNC;
+ case AudioOutputFlags::IEC958_NONAUDIO:
+ return AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
+ case AudioOutputFlags::DIRECT_PCM:
+ return AUDIO_OUTPUT_FLAG_DIRECT_PCM;
+ case AudioOutputFlags::MMAP_NOIRQ:
+ return AUDIO_OUTPUT_FLAG_MMAP_NOIRQ;
+ case AudioOutputFlags::VOIP_RX:
+ return AUDIO_OUTPUT_FLAG_VOIP_RX;
+ case AudioOutputFlags::INCALL_MUSIC:
+ return AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
+ case AudioOutputFlags::GAPLESS_OFFLOAD:
+ return AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD;
+ case AudioOutputFlags::ULTRASOUND:
+ return AUDIO_OUTPUT_FLAG_ULTRASOUND;
+ case AudioOutputFlags::SPATIALIZER:
+ return AUDIO_OUTPUT_FLAG_SPATIALIZER;
+ case AudioOutputFlags::BIT_PERFECT:
+ return AUDIO_OUTPUT_FLAG_BIT_PERFECT;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
+ audio_output_flags_t legacy) {
+ switch (legacy) {
+ case AUDIO_OUTPUT_FLAG_NONE:
+ break; // shouldn't get here. must be listed -Werror,-Wswitch
+ case AUDIO_OUTPUT_FLAG_DIRECT:
+ return AudioOutputFlags::DIRECT;
+ case AUDIO_OUTPUT_FLAG_PRIMARY:
+ return AudioOutputFlags::PRIMARY;
+ case AUDIO_OUTPUT_FLAG_FAST:
+ return AudioOutputFlags::FAST;
+ case AUDIO_OUTPUT_FLAG_DEEP_BUFFER:
+ return AudioOutputFlags::DEEP_BUFFER;
+ case AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD:
+ return AudioOutputFlags::COMPRESS_OFFLOAD;
+ case AUDIO_OUTPUT_FLAG_NON_BLOCKING:
+ return AudioOutputFlags::NON_BLOCKING;
+ case AUDIO_OUTPUT_FLAG_HW_AV_SYNC:
+ return AudioOutputFlags::HW_AV_SYNC;
+ case AUDIO_OUTPUT_FLAG_TTS:
+ return AudioOutputFlags::TTS;
+ case AUDIO_OUTPUT_FLAG_RAW:
+ return AudioOutputFlags::RAW;
+ case AUDIO_OUTPUT_FLAG_SYNC:
+ return AudioOutputFlags::SYNC;
+ case AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO:
+ return AudioOutputFlags::IEC958_NONAUDIO;
+ case AUDIO_OUTPUT_FLAG_DIRECT_PCM:
+ return AudioOutputFlags::DIRECT_PCM;
+ case AUDIO_OUTPUT_FLAG_MMAP_NOIRQ:
+ return AudioOutputFlags::MMAP_NOIRQ;
+ case AUDIO_OUTPUT_FLAG_VOIP_RX:
+ return AudioOutputFlags::VOIP_RX;
+ case AUDIO_OUTPUT_FLAG_INCALL_MUSIC:
+ return AudioOutputFlags::INCALL_MUSIC;
+ case AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD:
+ return AudioOutputFlags::GAPLESS_OFFLOAD;
+ case AUDIO_OUTPUT_FLAG_ULTRASOUND:
+ return AudioOutputFlags::ULTRASOUND;
+ case AUDIO_OUTPUT_FLAG_SPATIALIZER:
+ return AudioOutputFlags::SPATIALIZER;
+ case AUDIO_OUTPUT_FLAG_BIT_PERFECT:
+ return AudioOutputFlags::BIT_PERFECT;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_input_flags_t> aidl2legacy_int32_t_audio_input_flags_t_mask(
+ int32_t aidl) {
+ using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
+
+ LegacyMask converted = VALUE_OR_RETURN(
+ (convertBitmask<LegacyMask, int32_t, audio_input_flags_t, AudioInputFlags>(
+ aidl, aidl2legacy_AudioInputFlags_audio_input_flags_t,
+ indexToEnum_index<AudioInputFlags>,
+ enumToMask_bitmask<LegacyMask, audio_input_flags_t>)));
+ return static_cast<audio_input_flags_t>(converted);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_input_flags_t_int32_t_mask(
+ audio_input_flags_t legacy) {
+ using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
+
+ LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
+ return convertBitmask<int32_t, LegacyMask, AudioInputFlags, audio_input_flags_t>(
+ legacyMask, legacy2aidl_audio_input_flags_t_AudioInputFlags,
+ indexToEnum_bitmask<audio_input_flags_t>,
+ enumToMask_index<int32_t, AudioInputFlags>);
+}
+
+ConversionResult<audio_output_flags_t> aidl2legacy_int32_t_audio_output_flags_t_mask(
+ int32_t aidl) {
+ return convertBitmask<audio_output_flags_t,
+ int32_t,
+ audio_output_flags_t,
+ AudioOutputFlags>(
+ aidl, aidl2legacy_AudioOutputFlags_audio_output_flags_t,
+ indexToEnum_index<AudioOutputFlags>,
+ enumToMask_bitmask<audio_output_flags_t, audio_output_flags_t>);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_output_flags_t_int32_t_mask(
+ audio_output_flags_t legacy) {
+ using LegacyMask = std::underlying_type_t<audio_output_flags_t>;
+
+ LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
+ return convertBitmask<int32_t, LegacyMask, AudioOutputFlags, audio_output_flags_t>(
+ legacyMask, legacy2aidl_audio_output_flags_t_AudioOutputFlags,
+ indexToEnum_bitmask<audio_output_flags_t>,
+ enumToMask_index<int32_t, AudioOutputFlags>);
+}
+
+ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
+ const AudioIoFlags& aidl, bool isInput) {
+ audio_io_flags legacy;
+ if (isInput) {
+ legacy.input = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_input_flags_t_mask(
+ VALUE_OR_RETURN(UNION_GET(aidl, input))));
+ } else {
+ legacy.output = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_output_flags_t_mask(
+ VALUE_OR_RETURN(UNION_GET(aidl, output))));
+ }
+ return legacy;
+}
+
+ConversionResult<AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
+ const audio_io_flags& legacy, bool isInput) {
+ AudioIoFlags aidl;
+ if (isInput) {
+ UNION_SET(aidl, input,
+ VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(legacy.input)));
+ } else {
+ UNION_SET(aidl, output,
+ VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(legacy.output)));
+ }
+ return aidl;
+}
+
+ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
+ AudioStreamType aidl) {
+ switch (aidl) {
+ case AudioStreamType::INVALID:
+ break; // return error
+ case AudioStreamType::SYS_RESERVED_DEFAULT:
+ return AUDIO_STREAM_DEFAULT;
+ case AudioStreamType::VOICE_CALL:
+ return AUDIO_STREAM_VOICE_CALL;
+ case AudioStreamType::SYSTEM:
+ return AUDIO_STREAM_SYSTEM;
+ case AudioStreamType::RING:
+ return AUDIO_STREAM_RING;
+ case AudioStreamType::MUSIC:
+ return AUDIO_STREAM_MUSIC;
+ case AudioStreamType::ALARM:
+ return AUDIO_STREAM_ALARM;
+ case AudioStreamType::NOTIFICATION:
+ return AUDIO_STREAM_NOTIFICATION;
+ case AudioStreamType::BLUETOOTH_SCO:
+ return AUDIO_STREAM_BLUETOOTH_SCO;
+ case AudioStreamType::ENFORCED_AUDIBLE:
+ return AUDIO_STREAM_ENFORCED_AUDIBLE;
+ case AudioStreamType::DTMF:
+ return AUDIO_STREAM_DTMF;
+ case AudioStreamType::TTS:
+ return AUDIO_STREAM_TTS;
+ case AudioStreamType::ACCESSIBILITY:
+ return AUDIO_STREAM_ACCESSIBILITY;
+ case AudioStreamType::ASSISTANT:
+ return AUDIO_STREAM_ASSISTANT;
+ case AudioStreamType::SYS_RESERVED_REROUTING:
+ return AUDIO_STREAM_REROUTING;
+ case AudioStreamType::SYS_RESERVED_PATCH:
+ return AUDIO_STREAM_PATCH;
+ case AudioStreamType::CALL_ASSISTANT:
+ return AUDIO_STREAM_CALL_ASSISTANT;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
+ audio_stream_type_t legacy) {
+ switch (legacy) {
+ case AUDIO_STREAM_DEFAULT:
+ return AudioStreamType::SYS_RESERVED_DEFAULT;
+ case AUDIO_STREAM_VOICE_CALL:
+ return AudioStreamType::VOICE_CALL;
+ case AUDIO_STREAM_SYSTEM:
+ return AudioStreamType::SYSTEM;
+ case AUDIO_STREAM_RING:
+ return AudioStreamType::RING;
+ case AUDIO_STREAM_MUSIC:
+ return AudioStreamType::MUSIC;
+ case AUDIO_STREAM_ALARM:
+ return AudioStreamType::ALARM;
+ case AUDIO_STREAM_NOTIFICATION:
+ return AudioStreamType::NOTIFICATION;
+ case AUDIO_STREAM_BLUETOOTH_SCO:
+ return AudioStreamType::BLUETOOTH_SCO;
+ case AUDIO_STREAM_ENFORCED_AUDIBLE:
+ return AudioStreamType::ENFORCED_AUDIBLE;
+ case AUDIO_STREAM_DTMF:
+ return AudioStreamType::DTMF;
+ case AUDIO_STREAM_TTS:
+ return AudioStreamType::TTS;
+ case AUDIO_STREAM_ACCESSIBILITY:
+ return AudioStreamType::ACCESSIBILITY;
+ case AUDIO_STREAM_ASSISTANT:
+ return AudioStreamType::ASSISTANT;
+ case AUDIO_STREAM_REROUTING:
+ return AudioStreamType::SYS_RESERVED_REROUTING;
+ case AUDIO_STREAM_PATCH:
+ return AudioStreamType::SYS_RESERVED_PATCH;
+ case AUDIO_STREAM_CALL_ASSISTANT:
+ return AudioStreamType::CALL_ASSISTANT;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
+ AudioSource aidl) {
+ switch (aidl) {
+ case AudioSource::SYS_RESERVED_INVALID:
+ return AUDIO_SOURCE_INVALID;
+ case AudioSource::DEFAULT:
+ return AUDIO_SOURCE_DEFAULT;
+ case AudioSource::MIC:
+ return AUDIO_SOURCE_MIC;
+ case AudioSource::VOICE_UPLINK:
+ return AUDIO_SOURCE_VOICE_UPLINK;
+ case AudioSource::VOICE_DOWNLINK:
+ return AUDIO_SOURCE_VOICE_DOWNLINK;
+ case AudioSource::VOICE_CALL:
+ return AUDIO_SOURCE_VOICE_CALL;
+ case AudioSource::CAMCORDER:
+ return AUDIO_SOURCE_CAMCORDER;
+ case AudioSource::VOICE_RECOGNITION:
+ return AUDIO_SOURCE_VOICE_RECOGNITION;
+ case AudioSource::VOICE_COMMUNICATION:
+ return AUDIO_SOURCE_VOICE_COMMUNICATION;
+ case AudioSource::REMOTE_SUBMIX:
+ return AUDIO_SOURCE_REMOTE_SUBMIX;
+ case AudioSource::UNPROCESSED:
+ return AUDIO_SOURCE_UNPROCESSED;
+ case AudioSource::VOICE_PERFORMANCE:
+ return AUDIO_SOURCE_VOICE_PERFORMANCE;
+ case AudioSource::ULTRASOUND:
+ return AUDIO_SOURCE_ULTRASOUND;
+ case AudioSource::ECHO_REFERENCE:
+ return AUDIO_SOURCE_ECHO_REFERENCE;
+ case AudioSource::FM_TUNER:
+ return AUDIO_SOURCE_FM_TUNER;
+ case AudioSource::HOTWORD:
+ return AUDIO_SOURCE_HOTWORD;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioSource> legacy2aidl_audio_source_t_AudioSource(
+ audio_source_t legacy) {
+ switch (legacy) {
+ case AUDIO_SOURCE_INVALID:
+ return AudioSource::SYS_RESERVED_INVALID;
+ case AUDIO_SOURCE_DEFAULT:
+ return AudioSource::DEFAULT;
+ case AUDIO_SOURCE_MIC:
+ return AudioSource::MIC;
+ case AUDIO_SOURCE_VOICE_UPLINK:
+ return AudioSource::VOICE_UPLINK;
+ case AUDIO_SOURCE_VOICE_DOWNLINK:
+ return AudioSource::VOICE_DOWNLINK;
+ case AUDIO_SOURCE_VOICE_CALL:
+ return AudioSource::VOICE_CALL;
+ case AUDIO_SOURCE_CAMCORDER:
+ return AudioSource::CAMCORDER;
+ case AUDIO_SOURCE_VOICE_RECOGNITION:
+ return AudioSource::VOICE_RECOGNITION;
+ case AUDIO_SOURCE_VOICE_COMMUNICATION:
+ return AudioSource::VOICE_COMMUNICATION;
+ case AUDIO_SOURCE_REMOTE_SUBMIX:
+ return AudioSource::REMOTE_SUBMIX;
+ case AUDIO_SOURCE_UNPROCESSED:
+ return AudioSource::UNPROCESSED;
+ case AUDIO_SOURCE_VOICE_PERFORMANCE:
+ return AudioSource::VOICE_PERFORMANCE;
+ case AUDIO_SOURCE_ULTRASOUND:
+ return AudioSource::ULTRASOUND;
+ case AUDIO_SOURCE_ECHO_REFERENCE:
+ return AudioSource::ECHO_REFERENCE;
+ case AUDIO_SOURCE_FM_TUNER:
+ return AudioSource::FM_TUNER;
+ case AUDIO_SOURCE_HOTWORD:
+ return AudioSource::HOTWORD;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_session_t> aidl2legacy_int32_t_audio_session_t(int32_t aidl) {
+ return convertReinterpret<audio_session_t>(aidl);
+}
+
+ConversionResult<int32_t> legacy2aidl_audio_session_t_int32_t(audio_session_t legacy) {
+ return convertReinterpret<int32_t>(legacy);
+}
+
+ConversionResult<audio_content_type_t>
+aidl2legacy_AudioContentType_audio_content_type_t(AudioContentType aidl) {
+ switch (aidl) {
+ case AudioContentType::UNKNOWN:
+ return AUDIO_CONTENT_TYPE_UNKNOWN;
+ case AudioContentType::SPEECH:
+ return AUDIO_CONTENT_TYPE_SPEECH;
+ case AudioContentType::MUSIC:
+ return AUDIO_CONTENT_TYPE_MUSIC;
+ case AudioContentType::MOVIE:
+ return AUDIO_CONTENT_TYPE_MOVIE;
+ case AudioContentType::SONIFICATION:
+ return AUDIO_CONTENT_TYPE_SONIFICATION;
+ case AudioContentType::ULTRASOUND:
+ return AUDIO_CONTENT_TYPE_ULTRASOUND;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioContentType>
+legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy) {
+ switch (legacy) {
+ case AUDIO_CONTENT_TYPE_UNKNOWN:
+ return AudioContentType::UNKNOWN;
+ case AUDIO_CONTENT_TYPE_SPEECH:
+ return AudioContentType::SPEECH;
+ case AUDIO_CONTENT_TYPE_MUSIC:
+ return AudioContentType::MUSIC;
+ case AUDIO_CONTENT_TYPE_MOVIE:
+ return AudioContentType::MOVIE;
+ case AUDIO_CONTENT_TYPE_SONIFICATION:
+ return AudioContentType::SONIFICATION;
+ case AUDIO_CONTENT_TYPE_ULTRASOUND:
+ return AudioContentType::ULTRASOUND;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_usage_t>
+aidl2legacy_AudioUsage_audio_usage_t(AudioUsage aidl) {
+ switch (aidl) {
+ case AudioUsage::INVALID:
+ break; // return error
+ case AudioUsage::UNKNOWN:
+ return AUDIO_USAGE_UNKNOWN;
+ case AudioUsage::MEDIA:
+ return AUDIO_USAGE_MEDIA;
+ case AudioUsage::VOICE_COMMUNICATION:
+ return AUDIO_USAGE_VOICE_COMMUNICATION;
+ case AudioUsage::VOICE_COMMUNICATION_SIGNALLING:
+ return AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
+ case AudioUsage::ALARM:
+ return AUDIO_USAGE_ALARM;
+ case AudioUsage::NOTIFICATION:
+ return AUDIO_USAGE_NOTIFICATION;
+ case AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE:
+ return AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
+ case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST:
+ return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST;
+ case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT:
+ return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT;
+ case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED:
+ return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED;
+ case AudioUsage::NOTIFICATION_EVENT:
+ return AUDIO_USAGE_NOTIFICATION_EVENT;
+ case AudioUsage::ASSISTANCE_ACCESSIBILITY:
+ return AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
+ case AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE:
+ return AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE;
+ case AudioUsage::ASSISTANCE_SONIFICATION:
+ return AUDIO_USAGE_ASSISTANCE_SONIFICATION;
+ case AudioUsage::GAME:
+ return AUDIO_USAGE_GAME;
+ case AudioUsage::VIRTUAL_SOURCE:
+ return AUDIO_USAGE_VIRTUAL_SOURCE;
+ case AudioUsage::ASSISTANT:
+ return AUDIO_USAGE_ASSISTANT;
+ case AudioUsage::CALL_ASSISTANT:
+ return AUDIO_USAGE_CALL_ASSISTANT;
+ case AudioUsage::EMERGENCY:
+ return AUDIO_USAGE_EMERGENCY;
+ case AudioUsage::SAFETY:
+ return AUDIO_USAGE_SAFETY;
+ case AudioUsage::VEHICLE_STATUS:
+ return AUDIO_USAGE_VEHICLE_STATUS;
+ case AudioUsage::ANNOUNCEMENT:
+ return AUDIO_USAGE_ANNOUNCEMENT;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioUsage>
+legacy2aidl_audio_usage_t_AudioUsage(audio_usage_t legacy) {
+ switch (legacy) {
+ case AUDIO_USAGE_UNKNOWN:
+ return AudioUsage::UNKNOWN;
+ case AUDIO_USAGE_MEDIA:
+ return AudioUsage::MEDIA;
+ case AUDIO_USAGE_VOICE_COMMUNICATION:
+ return AudioUsage::VOICE_COMMUNICATION;
+ case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
+ return AudioUsage::VOICE_COMMUNICATION_SIGNALLING;
+ case AUDIO_USAGE_ALARM:
+ return AudioUsage::ALARM;
+ case AUDIO_USAGE_NOTIFICATION:
+ return AudioUsage::NOTIFICATION;
+ case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
+ return AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE;
+ case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
+ return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST;
+ case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
+ return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT;
+ case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
+ return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED;
+ case AUDIO_USAGE_NOTIFICATION_EVENT:
+ return AudioUsage::NOTIFICATION_EVENT;
+ case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
+ return AudioUsage::ASSISTANCE_ACCESSIBILITY;
+ case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
+ return AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE;
+ case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
+ return AudioUsage::ASSISTANCE_SONIFICATION;
+ case AUDIO_USAGE_GAME:
+ return AudioUsage::GAME;
+ case AUDIO_USAGE_VIRTUAL_SOURCE:
+ return AudioUsage::VIRTUAL_SOURCE;
+ case AUDIO_USAGE_ASSISTANT:
+ return AudioUsage::ASSISTANT;
+ case AUDIO_USAGE_CALL_ASSISTANT:
+ return AudioUsage::CALL_ASSISTANT;
+ case AUDIO_USAGE_EMERGENCY:
+ return AudioUsage::EMERGENCY;
+ case AUDIO_USAGE_SAFETY:
+ return AudioUsage::SAFETY;
+ case AUDIO_USAGE_VEHICLE_STATUS:
+ return AudioUsage::VEHICLE_STATUS;
+ case AUDIO_USAGE_ANNOUNCEMENT:
+ return AudioUsage::ANNOUNCEMENT;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+
+ConversionResult<audio_encapsulation_mode_t>
+aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(AudioEncapsulationMode aidl) {
+ switch (aidl) {
+ case AudioEncapsulationMode::INVALID:
+ break; // return error
+ case AudioEncapsulationMode::NONE:
+ return AUDIO_ENCAPSULATION_MODE_NONE;
+ case AudioEncapsulationMode::ELEMENTARY_STREAM:
+ return AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM;
+ case AudioEncapsulationMode::HANDLE:
+ return AUDIO_ENCAPSULATION_MODE_HANDLE;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioEncapsulationMode>
+legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy) {
+ switch (legacy) {
+ case AUDIO_ENCAPSULATION_MODE_NONE:
+ return AudioEncapsulationMode::NONE;
+ case AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM:
+ return AudioEncapsulationMode::ELEMENTARY_STREAM;
+ case AUDIO_ENCAPSULATION_MODE_HANDLE:
+ return AudioEncapsulationMode::HANDLE;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_offload_info_t>
+aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const AudioOffloadInfo& aidl) {
+ audio_offload_info_t legacy = AUDIO_INFO_INITIALIZER;
+ audio_config_base_t base = VALUE_OR_RETURN(
+ aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, false /*isInput*/));
+ legacy.sample_rate = base.sample_rate;
+ legacy.channel_mask = base.channel_mask;
+ legacy.format = base.format;
+ legacy.stream_type = VALUE_OR_RETURN(
+ aidl2legacy_AudioStreamType_audio_stream_type_t(aidl.streamType));
+ legacy.bit_rate = VALUE_OR_RETURN(convertIntegral<int32_t>(aidl.bitRatePerSecond));
+ legacy.duration_us = VALUE_OR_RETURN(convertIntegral<int64_t>(aidl.durationUs));
+ legacy.has_video = aidl.hasVideo;
+ legacy.is_streaming = aidl.isStreaming;
+ legacy.bit_width = VALUE_OR_RETURN(convertIntegral<int32_t>(aidl.bitWidth));
+ legacy.offload_buffer_size = VALUE_OR_RETURN(convertIntegral<int32_t>(aidl.offloadBufferSize));
+ legacy.usage = VALUE_OR_RETURN(aidl2legacy_AudioUsage_audio_usage_t(aidl.usage));
+ legacy.encapsulation_mode = VALUE_OR_RETURN(
+ aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(aidl.encapsulationMode));
+ legacy.content_id = VALUE_OR_RETURN(convertReinterpret<int32_t>(aidl.contentId));
+ legacy.sync_id = VALUE_OR_RETURN(convertReinterpret<int32_t>(aidl.syncId));
+ return legacy;
+}
+
+ConversionResult<AudioOffloadInfo>
+legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy) {
+ AudioOffloadInfo aidl;
+ // Version 0.1 fields.
+ if (legacy.size < offsetof(audio_offload_info_t, usage) + sizeof(audio_offload_info_t::usage)) {
+ return unexpected(BAD_VALUE);
+ }
+ const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
+ .channel_mask = legacy.channel_mask, .format = legacy.format };
+ aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(
+ base, false /*isInput*/));
+ aidl.streamType = VALUE_OR_RETURN(
+ legacy2aidl_audio_stream_type_t_AudioStreamType(legacy.stream_type));
+ aidl.bitRatePerSecond = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_rate));
+ aidl.durationUs = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.duration_us));
+ aidl.hasVideo = legacy.has_video;
+ aidl.isStreaming = legacy.is_streaming;
+ aidl.bitWidth = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_width));
+ aidl.offloadBufferSize = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.offload_buffer_size));
+ aidl.usage = VALUE_OR_RETURN(legacy2aidl_audio_usage_t_AudioUsage(legacy.usage));
+
+ // Version 0.2 fields.
+ if (legacy.version >= AUDIO_OFFLOAD_INFO_VERSION_0_2) {
+ if (legacy.size <
+ offsetof(audio_offload_info_t, sync_id) + sizeof(audio_offload_info_t::sync_id)) {
+ return unexpected(BAD_VALUE);
+ }
+ aidl.encapsulationMode = VALUE_OR_RETURN(
+ legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(
+ legacy.encapsulation_mode));
+ aidl.contentId = VALUE_OR_RETURN(convertReinterpret<int32_t>(legacy.content_id));
+ aidl.syncId = VALUE_OR_RETURN(convertReinterpret<int32_t>(legacy.sync_id));
+ }
+ return aidl;
+}
+
+ConversionResult<audio_config_t>
+aidl2legacy_AudioConfig_audio_config_t(const AudioConfig& aidl, bool isInput) {
+ const audio_config_base_t legacyBase = VALUE_OR_RETURN(
+ aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, isInput));
+ audio_config_t legacy = AUDIO_CONFIG_INITIALIZER;
+ legacy.sample_rate = legacyBase.sample_rate;
+ legacy.channel_mask = legacyBase.channel_mask;
+ legacy.format = legacyBase.format;
+ legacy.offload_info = VALUE_OR_RETURN(
+ aidl2legacy_AudioOffloadInfo_audio_offload_info_t(aidl.offloadInfo));
+ legacy.frame_count = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.frameCount));
+ return legacy;
+}
+
+ConversionResult<AudioConfig>
+legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput) {
+ const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
+ .channel_mask = legacy.channel_mask, .format = legacy.format };
+ AudioConfig aidl;
+ aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(base, isInput));
+ aidl.offloadInfo = VALUE_OR_RETURN(
+ legacy2aidl_audio_offload_info_t_AudioOffloadInfo(legacy.offload_info));
+ aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.frame_count));
+ return aidl;
+}
+
+ConversionResult<audio_config_base_t>
+aidl2legacy_AudioConfigBase_audio_config_base_t(const AudioConfigBase& aidl, bool isInput) {
+ audio_config_base_t legacy;
+ legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<int>(aidl.sampleRate));
+ legacy.channel_mask = VALUE_OR_RETURN(
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+ legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
+ return legacy;
+}
+
+ConversionResult<AudioConfigBase>
+legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput) {
+ AudioConfigBase aidl;
+ aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
+ aidl.channelMask = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
+ aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
+ return aidl;
+}
+
+ConversionResult<audio_uuid_t>
+aidl2legacy_AudioUuid_audio_uuid_t(const AudioUuid& aidl) {
+ audio_uuid_t legacy;
+ legacy.timeLow = VALUE_OR_RETURN(convertReinterpret<uint32_t>(aidl.timeLow));
+ legacy.timeMid = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.timeMid));
+ legacy.timeHiAndVersion = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.timeHiAndVersion));
+ legacy.clockSeq = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.clockSeq));
+ if (aidl.node.size() != std::size(legacy.node)) {
+ return unexpected(BAD_VALUE);
+ }
+ std::copy(aidl.node.begin(), aidl.node.end(), legacy.node);
+ return legacy;
+}
+
+ConversionResult<AudioUuid>
+legacy2aidl_audio_uuid_t_AudioUuid(const audio_uuid_t& legacy) {
+ AudioUuid aidl;
+ aidl.timeLow = VALUE_OR_RETURN(convertReinterpret<int32_t>(legacy.timeLow));
+ aidl.timeMid = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeMid));
+ aidl.timeHiAndVersion = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeHiAndVersion));
+ aidl.clockSeq = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.clockSeq));
+ std::copy(legacy.node, legacy.node + std::size(legacy.node), std::back_inserter(aidl.node));
+ return aidl;
+}
+
+ConversionResult<audio_encapsulation_metadata_type_t>
+aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
+ AudioEncapsulationMetadataType aidl) {
+ switch (aidl) {
+ case AudioEncapsulationMetadataType::NONE:
+ return AUDIO_ENCAPSULATION_METADATA_TYPE_NONE;
+ case AudioEncapsulationMetadataType::FRAMEWORK_TUNER:
+ return AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER;
+ case AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR:
+ return AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioEncapsulationMetadataType>
+legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
+ audio_encapsulation_metadata_type_t legacy) {
+ switch (legacy) {
+ case AUDIO_ENCAPSULATION_METADATA_TYPE_NONE:
+ return AudioEncapsulationMetadataType::NONE;
+ case AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER:
+ return AudioEncapsulationMetadataType::FRAMEWORK_TUNER;
+ case AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR:
+ return AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<uint32_t>
+aidl2legacy_AudioEncapsulationMode_mask(int32_t aidl) {
+ return convertBitmask<uint32_t,
+ int32_t,
+ audio_encapsulation_mode_t,
+ AudioEncapsulationMode>(
+ aidl, aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t,
+ indexToEnum_index<AudioEncapsulationMode>,
+ enumToMask_index<uint32_t, audio_encapsulation_mode_t>);
+}
+
+ConversionResult<int32_t>
+legacy2aidl_AudioEncapsulationMode_mask(uint32_t legacy) {
+ return convertBitmask<int32_t,
+ uint32_t,
+ AudioEncapsulationMode,
+ audio_encapsulation_mode_t>(
+ legacy, legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode,
+ indexToEnum_index<audio_encapsulation_mode_t>,
+ enumToMask_index<int32_t, AudioEncapsulationMode>);
+}
+
+ConversionResult<uint32_t>
+aidl2legacy_AudioEncapsulationMetadataType_mask(int32_t aidl) {
+ return convertBitmask<uint32_t,
+ int32_t,
+ audio_encapsulation_metadata_type_t,
+ AudioEncapsulationMetadataType>(
+ aidl, aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t,
+ indexToEnum_index<AudioEncapsulationMetadataType>,
+ enumToMask_index<uint32_t, audio_encapsulation_metadata_type_t>);
+}
+
+ConversionResult<int32_t>
+legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy) {
+ return convertBitmask<int32_t,
+ uint32_t,
+ AudioEncapsulationMetadataType,
+ audio_encapsulation_metadata_type_t>(
+ legacy, legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType,
+ indexToEnum_index<audio_encapsulation_metadata_type_t>,
+ enumToMask_index<int32_t, AudioEncapsulationMetadataType>);
+}
+
+ConversionResult<audio_profile>
+aidl2legacy_AudioProfile_audio_profile(const AudioProfile& aidl, bool isInput) {
+ audio_profile legacy;
+ legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
+
+ if (aidl.sampleRates.size() > std::size(legacy.sample_rates)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(aidl.sampleRates.begin(), aidl.sampleRates.end(), legacy.sample_rates,
+ convertIntegral<int32_t, unsigned int>));
+ legacy.num_sample_rates = aidl.sampleRates.size();
+
+ if (aidl.channelMasks.size() > std::size(legacy.channel_masks)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(aidl.channelMasks.begin(), aidl.channelMasks.end(), legacy.channel_masks,
+ [isInput](const AudioChannelLayout& l) {
+ return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
+ }));
+ legacy.num_channel_masks = aidl.channelMasks.size();
+
+ legacy.encapsulation_type = VALUE_OR_RETURN(
+ aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(aidl.encapsulationType));
+ return legacy;
+}
+
+ConversionResult<AudioProfile>
+legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput) {
+ AudioProfile aidl;
+ aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
+
+ if (legacy.num_sample_rates > std::size(legacy.sample_rates)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(legacy.sample_rates, legacy.sample_rates + legacy.num_sample_rates,
+ std::back_inserter(aidl.sampleRates),
+ convertIntegral<unsigned int, int32_t>));
+
+ if (legacy.num_channel_masks > std::size(legacy.channel_masks)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(legacy.channel_masks, legacy.channel_masks + legacy.num_channel_masks,
+ std::back_inserter(aidl.channelMasks),
+ [isInput](audio_channel_mask_t m) {
+ return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
+ }));
+
+ aidl.encapsulationType = VALUE_OR_RETURN(
+ legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
+ legacy.encapsulation_type));
+ return aidl;
+}
+
+ConversionResult<audio_gain>
+aidl2legacy_AudioGain_audio_gain(const AudioGain& aidl, bool isInput) {
+ audio_gain legacy;
+ legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
+ legacy.channel_mask = VALUE_OR_RETURN(aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ aidl.channelMask, isInput));
+ legacy.min_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.minValue));
+ legacy.max_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.maxValue));
+ legacy.default_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.defaultValue));
+ legacy.step_value = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.stepValue));
+ legacy.min_ramp_ms = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.minRampMs));
+ legacy.max_ramp_ms = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.maxRampMs));
+ return legacy;
+}
+
+ConversionResult<AudioGain>
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput) {
+ AudioGain aidl;
+ aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
+ aidl.channelMask = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
+ aidl.minValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_value));
+ aidl.maxValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.max_value));
+ aidl.defaultValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.default_value));
+ aidl.stepValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.step_value));
+ aidl.minRampMs = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_ramp_ms));
+ aidl.maxRampMs = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.max_ramp_ms));
+ return aidl;
+}
+
+ConversionResult<audio_mode_t>
+aidl2legacy_AudioMode_audio_mode_t(AudioMode aidl) {
+ switch (aidl) {
+ case AudioMode::SYS_RESERVED_INVALID:
+ return AUDIO_MODE_INVALID;
+ case AudioMode::SYS_RESERVED_CURRENT:
+ return AUDIO_MODE_CURRENT;
+ case AudioMode::NORMAL:
+ return AUDIO_MODE_NORMAL;
+ case AudioMode::RINGTONE:
+ return AUDIO_MODE_RINGTONE;
+ case AudioMode::IN_CALL:
+ return AUDIO_MODE_IN_CALL;
+ case AudioMode::IN_COMMUNICATION:
+ return AUDIO_MODE_IN_COMMUNICATION;
+ case AudioMode::CALL_SCREEN:
+ return AUDIO_MODE_CALL_SCREEN;
+ case AudioMode::SYS_RESERVED_CALL_REDIRECT:
+ return AUDIO_MODE_CALL_REDIRECT;
+ case AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT:
+ return AUDIO_MODE_COMMUNICATION_REDIRECT;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioMode>
+legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy) {
+ switch (legacy) {
+ case AUDIO_MODE_INVALID:
+ return AudioMode::SYS_RESERVED_INVALID;
+ case AUDIO_MODE_CURRENT:
+ return AudioMode::SYS_RESERVED_CURRENT;
+ case AUDIO_MODE_NORMAL:
+ return AudioMode::NORMAL;
+ case AUDIO_MODE_RINGTONE:
+ return AudioMode::RINGTONE;
+ case AUDIO_MODE_IN_CALL:
+ return AudioMode::IN_CALL;
+ case AUDIO_MODE_IN_COMMUNICATION:
+ return AudioMode::IN_COMMUNICATION;
+ case AUDIO_MODE_CALL_SCREEN:
+ return AudioMode::CALL_SCREEN;
+ case AUDIO_MODE_CALL_REDIRECT:
+ return AudioMode::SYS_RESERVED_CALL_REDIRECT;
+ case AUDIO_MODE_COMMUNICATION_REDIRECT:
+ return AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT;
+ case AUDIO_MODE_CNT:
+ break;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_standard_t>
+aidl2legacy_AudioStandard_audio_standard_t(AudioStandard aidl) {
+ switch (aidl) {
+ case AudioStandard::NONE:
+ return AUDIO_STANDARD_NONE;
+ case AudioStandard::EDID:
+ return AUDIO_STANDARD_EDID;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioStandard>
+legacy2aidl_audio_standard_t_AudioStandard(audio_standard_t legacy) {
+ switch (legacy) {
+ case AUDIO_STANDARD_NONE:
+ return AudioStandard::NONE;
+ case AUDIO_STANDARD_EDID:
+ return AudioStandard::EDID;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_extra_audio_descriptor>
+aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
+ const ExtraAudioDescriptor& aidl) {
+ audio_extra_audio_descriptor legacy;
+ legacy.standard = VALUE_OR_RETURN(aidl2legacy_AudioStandard_audio_standard_t(aidl.standard));
+ if (aidl.audioDescriptor.size() > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
+ return unexpected(BAD_VALUE);
+ }
+ legacy.descriptor_length = aidl.audioDescriptor.size();
+ std::copy(aidl.audioDescriptor.begin(), aidl.audioDescriptor.end(),
+ std::begin(legacy.descriptor));
+ legacy.encapsulation_type =
+ VALUE_OR_RETURN(aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
+ aidl.encapsulationType));
+ return legacy;
+}
+
+ConversionResult<ExtraAudioDescriptor>
+legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
+ const audio_extra_audio_descriptor& legacy) {
+ ExtraAudioDescriptor aidl;
+ aidl.standard = VALUE_OR_RETURN(legacy2aidl_audio_standard_t_AudioStandard(legacy.standard));
+ if (legacy.descriptor_length > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
+ return unexpected(BAD_VALUE);
+ }
+ aidl.audioDescriptor.resize(legacy.descriptor_length);
+ std::copy(legacy.descriptor, legacy.descriptor + legacy.descriptor_length,
+ aidl.audioDescriptor.begin());
+ aidl.encapsulationType =
+ VALUE_OR_RETURN(legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
+ legacy.encapsulation_type));
+ return aidl;
+}
+
+ConversionResult<audio_encapsulation_type_t>
+aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
+ const AudioEncapsulationType& aidl) {
+ switch (aidl) {
+ case AudioEncapsulationType::NONE:
+ return AUDIO_ENCAPSULATION_TYPE_NONE;
+ case AudioEncapsulationType::IEC61937:
+ return AUDIO_ENCAPSULATION_TYPE_IEC61937;
+ case AudioEncapsulationType::PCM:
+ return AUDIO_ENCAPSULATION_TYPE_PCM;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioEncapsulationType>
+legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
+ const audio_encapsulation_type_t & legacy) {
+ switch (legacy) {
+ case AUDIO_ENCAPSULATION_TYPE_NONE:
+ return AudioEncapsulationType::NONE;
+ case AUDIO_ENCAPSULATION_TYPE_IEC61937:
+ return AudioEncapsulationType::IEC61937;
+ case AUDIO_ENCAPSULATION_TYPE_PCM:
+ return AudioEncapsulationType::PCM;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_dual_mono_mode_t>
+aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(AudioDualMonoMode aidl) {
+ switch (aidl) {
+ case AudioDualMonoMode::OFF:
+ return AUDIO_DUAL_MONO_MODE_OFF;
+ case AudioDualMonoMode::LR:
+ return AUDIO_DUAL_MONO_MODE_LR;
+ case AudioDualMonoMode::LL:
+ return AUDIO_DUAL_MONO_MODE_LL;
+ case AudioDualMonoMode::RR:
+ return AUDIO_DUAL_MONO_MODE_RR;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioDualMonoMode>
+legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(audio_dual_mono_mode_t legacy) {
+ switch (legacy) {
+ case AUDIO_DUAL_MONO_MODE_OFF:
+ return AudioDualMonoMode::OFF;
+ case AUDIO_DUAL_MONO_MODE_LR:
+ return AudioDualMonoMode::LR;
+ case AUDIO_DUAL_MONO_MODE_LL:
+ return AudioDualMonoMode::LL;
+ case AUDIO_DUAL_MONO_MODE_RR:
+ return AudioDualMonoMode::RR;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_timestretch_fallback_mode_t>
+aidl2legacy_TimestretchFallbackMode_audio_timestretch_fallback_mode_t(
+ AudioPlaybackRate::TimestretchFallbackMode aidl) {
+ switch (aidl) {
+ case AudioPlaybackRate::TimestretchFallbackMode::SYS_RESERVED_CUT_REPEAT:
+ return AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT;
+ case AudioPlaybackRate::TimestretchFallbackMode::SYS_RESERVED_DEFAULT:
+ return AUDIO_TIMESTRETCH_FALLBACK_DEFAULT;
+ case AudioPlaybackRate::TimestretchFallbackMode::MUTE:
+ return AUDIO_TIMESTRETCH_FALLBACK_MUTE;
+ case AudioPlaybackRate::TimestretchFallbackMode::FAIL:
+ return AUDIO_TIMESTRETCH_FALLBACK_FAIL;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioPlaybackRate::TimestretchFallbackMode>
+legacy2aidl_audio_timestretch_fallback_mode_t_TimestretchFallbackMode(
+ audio_timestretch_fallback_mode_t legacy) {
+ switch (legacy) {
+ case AUDIO_TIMESTRETCH_FALLBACK_CUT_REPEAT:
+ return AudioPlaybackRate::TimestretchFallbackMode::SYS_RESERVED_CUT_REPEAT;
+ case AUDIO_TIMESTRETCH_FALLBACK_DEFAULT:
+ return AudioPlaybackRate::TimestretchFallbackMode::SYS_RESERVED_DEFAULT;
+ case AUDIO_TIMESTRETCH_FALLBACK_MUTE:
+ return AudioPlaybackRate::TimestretchFallbackMode::MUTE;
+ case AUDIO_TIMESTRETCH_FALLBACK_FAIL:
+ return AudioPlaybackRate::TimestretchFallbackMode::FAIL;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_timestretch_stretch_mode_t>
+aidl2legacy_TimestretchMode_audio_timestretch_stretch_mode_t(
+ AudioPlaybackRate::TimestretchMode aidl) {
+ switch (aidl) {
+ case AudioPlaybackRate::TimestretchMode::DEFAULT:
+ return AUDIO_TIMESTRETCH_STRETCH_DEFAULT;
+ case AudioPlaybackRate::TimestretchMode::VOICE:
+ return AUDIO_TIMESTRETCH_STRETCH_VOICE;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<AudioPlaybackRate::TimestretchMode>
+legacy2aidl_audio_timestretch_stretch_mode_t_TimestretchMode(
+ audio_timestretch_stretch_mode_t legacy) {
+ switch (legacy) {
+ case AUDIO_TIMESTRETCH_STRETCH_DEFAULT:
+ return AudioPlaybackRate::TimestretchMode::DEFAULT;
+ case AUDIO_TIMESTRETCH_STRETCH_VOICE:
+ return AudioPlaybackRate::TimestretchMode::VOICE;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_playback_rate_t>
+aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(const AudioPlaybackRate& aidl) {
+ audio_playback_rate_t legacy;
+ legacy.mSpeed = aidl.speed;
+ legacy.mPitch = aidl.pitch;
+ legacy.mFallbackMode = VALUE_OR_RETURN(
+ aidl2legacy_TimestretchFallbackMode_audio_timestretch_fallback_mode_t(
+ aidl.fallbackMode));
+ legacy.mStretchMode = VALUE_OR_RETURN(
+ aidl2legacy_TimestretchMode_audio_timestretch_stretch_mode_t(aidl.timestretchMode));
+ return legacy;
+}
+
+ConversionResult<AudioPlaybackRate>
+legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(const audio_playback_rate_t& legacy) {
+ AudioPlaybackRate aidl;
+ aidl.speed = legacy.mSpeed;
+ aidl.pitch = legacy.mPitch;
+ aidl.fallbackMode = VALUE_OR_RETURN(
+ legacy2aidl_audio_timestretch_fallback_mode_t_TimestretchFallbackMode(
+ legacy.mFallbackMode));
+ aidl.timestretchMode = VALUE_OR_RETURN(
+ legacy2aidl_audio_timestretch_stretch_mode_t_TimestretchMode(legacy.mStretchMode));
+ return aidl;
+}
+
+ConversionResult<audio_latency_mode_t>
+aidl2legacy_AudioLatencyMode_audio_latency_mode_t(AudioLatencyMode aidl) {
+ switch (aidl) {
+ case AudioLatencyMode::FREE:
+ return AUDIO_LATENCY_MODE_FREE;
+ case AudioLatencyMode::LOW:
+ return AUDIO_LATENCY_MODE_LOW;
+ }
+ return unexpected(BAD_VALUE);
+}
+ConversionResult<AudioLatencyMode>
+legacy2aidl_audio_latency_mode_t_AudioLatencyMode(audio_latency_mode_t legacy) {
+ switch (legacy) {
+ case AUDIO_LATENCY_MODE_FREE:
+ return AudioLatencyMode::FREE;
+ case AUDIO_LATENCY_MODE_LOW:
+ return AudioLatencyMode::LOW;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+} // namespace android
+
+#if defined(BACKEND_NDK)
+} // aidl
+#endif
\ No newline at end of file
diff --git a/media/audioaidlconversion/AidlConversionNdk.cpp b/media/audioaidlconversion/AidlConversionNdk.cpp
new file mode 100644
index 0000000..a3e39c7
--- /dev/null
+++ b/media/audioaidlconversion/AidlConversionNdk.cpp
@@ -0,0 +1,259 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <utility>
+
+#define LOG_TAG "AidlConversionNdk"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <media/AidlConversionCppNdk.h>
+#include <media/AidlConversionNdk.h>
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// AIDL NDK backend to legacy audio data structure conversion utilities.
+
+namespace aidl {
+namespace android {
+
+using ::aidl::android::hardware::audio::effect::Descriptor;
+using ::aidl::android::hardware::audio::effect::Flags;
+
+using ::android::BAD_VALUE;
+using ::android::base::unexpected;
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Converters
+
+ConversionResult<uint32_t> aidl2legacy_Flags_Type_uint32(Flags::Type type) {
+ switch (type) {
+ case Flags::Type::INSERT:
+ return EFFECT_FLAG_TYPE_INSERT;
+ case Flags::Type::AUXILIARY:
+ return EFFECT_FLAG_TYPE_AUXILIARY;
+ case Flags::Type::REPLACE:
+ return EFFECT_FLAG_TYPE_REPLACE;
+ case Flags::Type::PRE_PROC:
+ return EFFECT_FLAG_TYPE_PRE_PROC;
+ case Flags::Type::POST_PROC:
+ return EFFECT_FLAG_TYPE_POST_PROC;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<uint32_t> aidl2legacy_Flags_Insert_uint32(Flags::Insert insert) {
+ switch (insert) {
+ case Flags::Insert::ANY:
+ return EFFECT_FLAG_INSERT_ANY;
+ case Flags::Insert::FIRST:
+ return EFFECT_FLAG_INSERT_FIRST;
+ case Flags::Insert::LAST:
+ return EFFECT_FLAG_INSERT_LAST;
+ case Flags::Insert::EXCLUSIVE:
+ return EFFECT_FLAG_INSERT_EXCLUSIVE;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<uint32_t> aidl2legacy_Flags_Volume_uint32(Flags::Volume volume) {
+ switch (volume) {
+ case Flags::Volume::NONE:
+ return 0;
+ case Flags::Volume::CTRL:
+ return EFFECT_FLAG_VOLUME_CTRL;
+ case Flags::Volume::IND:
+ return EFFECT_FLAG_VOLUME_IND;
+ case Flags::Volume::MONITOR:
+ return EFFECT_FLAG_VOLUME_MONITOR;
+ }
+ return unexpected(BAD_VALUE);
+}
+ConversionResult<uint32_t> aidl2legacy_Flags_HardwareAccelerator_uint32(
+ Flags::HardwareAccelerator hwAcceleratorMode) {
+ switch (hwAcceleratorMode) {
+ case Flags::HardwareAccelerator::NONE:
+ return 0;
+ case Flags::HardwareAccelerator::SIMPLE:
+ return EFFECT_FLAG_HW_ACC_SIMPLE;
+ case Flags::HardwareAccelerator::TUNNEL:
+ return EFFECT_FLAG_HW_ACC_TUNNEL;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<uint32_t> aidl2legacy_Flags_uint32(Flags aidl) {
+ uint32_t legacy = 0;
+ legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_Type_uint32(aidl.type));
+ legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_Insert_uint32(aidl.insert));
+ legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_Volume_uint32(aidl.volume));
+ legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_HardwareAccelerator_uint32(aidl.hwAcceleratorMode));
+
+ if (aidl.offloadIndication) {
+ legacy |= EFFECT_FLAG_OFFLOAD_SUPPORTED;
+ }
+ if (aidl.deviceIndication) {
+ legacy |= EFFECT_FLAG_DEVICE_IND;
+ }
+ if (aidl.audioModeIndication) {
+ legacy |= EFFECT_FLAG_AUDIO_MODE_IND;
+ }
+ if (aidl.audioSourceIndication) {
+ legacy |= EFFECT_FLAG_AUDIO_SOURCE_IND;
+ }
+ if (aidl.noProcessing) {
+ legacy |= EFFECT_FLAG_NO_PROCESS;
+ }
+ return legacy;
+}
+
+ConversionResult<Flags::Type> legacy2aidl_uint32_Flags_Type(uint32_t legacy) {
+ switch (legacy & EFFECT_FLAG_TYPE_MASK) {
+ case EFFECT_FLAG_TYPE_INSERT:
+ return Flags::Type::INSERT;
+ case EFFECT_FLAG_TYPE_AUXILIARY:
+ return Flags::Type::AUXILIARY;
+ case EFFECT_FLAG_TYPE_REPLACE:
+ return Flags::Type::REPLACE;
+ case EFFECT_FLAG_TYPE_PRE_PROC:
+ return Flags::Type::PRE_PROC;
+ case EFFECT_FLAG_TYPE_POST_PROC:
+ return Flags::Type::POST_PROC;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<Flags::Insert> legacy2aidl_uint32_Flags_Insert(uint32_t legacy) {
+ switch (legacy & EFFECT_FLAG_INSERT_MASK) {
+ case EFFECT_FLAG_INSERT_ANY:
+ return Flags::Insert::ANY;
+ case EFFECT_FLAG_INSERT_FIRST:
+ return Flags::Insert::FIRST;
+ case EFFECT_FLAG_INSERT_LAST:
+ return Flags::Insert::LAST;
+ case EFFECT_FLAG_INSERT_EXCLUSIVE:
+ return Flags::Insert::EXCLUSIVE;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<Flags::Volume> legacy2aidl_uint32_Flags_Volume(uint32_t legacy) {
+ switch (legacy & EFFECT_FLAG_VOLUME_MASK) {
+ case EFFECT_FLAG_VOLUME_IND:
+ return Flags::Volume::IND;
+ case EFFECT_FLAG_VOLUME_MONITOR:
+ return Flags::Volume::MONITOR;
+ case EFFECT_FLAG_VOLUME_NONE:
+ return Flags::Volume::NONE;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<Flags::HardwareAccelerator> legacy2aidl_uint32_Flags_HardwareAccelerator(
+ uint32_t legacy) {
+ switch (legacy & EFFECT_FLAG_HW_ACC_MASK) {
+ case EFFECT_FLAG_HW_ACC_SIMPLE:
+ return Flags::HardwareAccelerator::SIMPLE;
+ case EFFECT_FLAG_HW_ACC_TUNNEL:
+ return Flags::HardwareAccelerator::TUNNEL;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<Flags> legacy2aidl_uint32_Flags(uint32_t legacy) {
+ Flags aidl;
+
+ aidl.type = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_Type(legacy));
+ aidl.insert = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_Insert(legacy));
+ aidl.volume = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_Volume(legacy));
+ aidl.hwAcceleratorMode = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_HardwareAccelerator(legacy));
+ aidl.offloadIndication = (legacy & EFFECT_FLAG_OFFLOAD_SUPPORTED);
+ aidl.deviceIndication = (legacy & EFFECT_FLAG_DEVICE_IND);
+ aidl.audioModeIndication = (legacy & EFFECT_FLAG_AUDIO_MODE_IND);
+ aidl.audioSourceIndication = (legacy & EFFECT_FLAG_AUDIO_SOURCE_IND);
+ aidl.noProcessing = (legacy & EFFECT_FLAG_NO_PROCESS);
+ return aidl;
+}
+
+ConversionResult<effect_descriptor_t>
+aidl2legacy_Descriptor_effect_descriptor(const Descriptor& aidl) {
+ effect_descriptor_t legacy;
+ legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioUuid_audio_uuid_t(aidl.common.id.type));
+ legacy.uuid = VALUE_OR_RETURN(aidl2legacy_AudioUuid_audio_uuid_t(aidl.common.id.uuid));
+ // legacy descriptor doesn't have proxy information
+ // proxy = VALUE_OR_RETURN(aidl2legacy_AudioUuid_audio_uuid_t(aidl.proxy));
+ legacy.apiVersion = EFFECT_CONTROL_API_VERSION;
+ legacy.flags = VALUE_OR_RETURN(aidl2legacy_Flags_uint32(aidl.common.flags));
+ legacy.cpuLoad = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.common.cpuLoad));
+ legacy.memoryUsage = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.common.memoryUsage));
+ RETURN_IF_ERROR(aidl2legacy_string(aidl.common.name, legacy.name, sizeof(legacy.name)));
+ RETURN_IF_ERROR(aidl2legacy_string(aidl.common.implementor, legacy.implementor,
+ sizeof(legacy.implementor)));
+ return legacy;
+}
+
+ConversionResult<Descriptor>
+legacy2aidl_effect_descriptor_Descriptor(const effect_descriptor_t& legacy) {
+ Descriptor aidl;
+ aidl.common.id.type = VALUE_OR_RETURN(legacy2aidl_audio_uuid_t_AudioUuid(legacy.type));
+ aidl.common.id.uuid = VALUE_OR_RETURN(legacy2aidl_audio_uuid_t_AudioUuid(legacy.uuid));
+ // legacy descriptor doesn't have proxy information
+ // aidl.common.id.proxy
+ aidl.common.flags = VALUE_OR_RETURN(legacy2aidl_uint32_Flags(legacy.flags));
+ aidl.common.cpuLoad = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.cpuLoad));
+ aidl.common.memoryUsage = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.memoryUsage));
+ aidl.common.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
+ aidl.common.implementor =
+ VALUE_OR_RETURN(legacy2aidl_string(legacy.implementor, sizeof(legacy.implementor)));
+ return aidl;
+}
+
+ConversionResult<buffer_config_t> aidl2legacy_AudioConfigBase_buffer_config_t(
+ const media::audio::common::AudioConfigBase& aidl, bool isInput) {
+ buffer_config_t legacy;
+
+ legacy.samplingRate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
+ legacy.mask |= EFFECT_CONFIG_SMP_RATE;
+
+ legacy.channels = VALUE_OR_RETURN(
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
+ legacy.mask |= EFFECT_CONFIG_CHANNELS;
+
+ legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
+ legacy.mask |= EFFECT_CONFIG_FORMAT;
+
+ return legacy;
+}
+
+ConversionResult<media::audio::common::AudioConfigBase>
+legacy2aidl_AudioConfigBase_buffer_config_t(const buffer_config_t& legacy, bool isInput) {
+ media::audio::common::AudioConfigBase aidl;
+
+ if (legacy.mask & EFFECT_CONFIG_SMP_RATE) {
+ aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.samplingRate));
+ }
+ if (legacy.mask & EFFECT_CONFIG_CHANNELS) {
+ aidl.channelMask = VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ static_cast<audio_channel_mask_t>(legacy.channels), isInput));
+ }
+ if (legacy.mask & EFFECT_CONFIG_FORMAT) {
+ aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(
+ static_cast<audio_format_t>(legacy.format)));
+ }
+ return aidl;
+}
+
+} // namespace android
+} // aidl
diff --git a/media/audioaidlconversion/Android.bp b/media/audioaidlconversion/Android.bp
new file mode 100644
index 0000000..86f455e
--- /dev/null
+++ b/media/audioaidlconversion/Android.bp
@@ -0,0 +1,150 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_av_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_defaults {
+ name: "audio_aidl_conversion_common_util_default",
+ host_supported: true,
+ vendor_available: true,
+ double_loadable: true,
+ min_sdk_version: "29",
+ export_include_dirs: [
+ "include",
+ ],
+ header_libs: [
+ "libbase_headers",
+ "liberror_headers",
+ ],
+ export_header_lib_headers: [
+ "libbase_headers",
+ "liberror_headers",
+ ],
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.btservices",
+ "com.android.media",
+ "com.android.media.swcodec",
+ ],
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
+}
+
+// This is intended for clients needing to include AidlConversionUtil.h, without extra dependencies.
+cc_library_headers {
+ name: "libaudio_aidl_conversion_common_util_cpp",
+ defaults: [
+ "audio_aidl_conversion_common_util_default",
+ ],
+}
+
+cc_library_headers {
+ name: "libaudio_aidl_conversion_common_util_ndk",
+ defaults: [
+ "audio_aidl_conversion_common_util_default",
+ ],
+ cflags: [
+ "-DBACKEND_NDK",
+ ],
+}
+
+cc_defaults {
+ name: "audio_aidl_conversion_common_default",
+ export_include_dirs: ["include"],
+ host_supported: true,
+ vendor_available: true,
+ double_loadable: true,
+ header_libs: [
+ "libaudio_system_headers",
+ "libhardware_headers",
+ ],
+ shared_libs: [
+ "libbase",
+ "libbinder",
+ "liblog",
+ "libshmemcompat",
+ "libstagefright_foundation",
+ "libutils",
+ "shared-file-region-aidl-cpp",
+ "framework-permission-aidl-cpp",
+ ],
+ export_shared_lib_headers: [
+ "libbase",
+ "shared-file-region-aidl-cpp",
+ ],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wno-error=deprecated-declarations",
+ ],
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ },
+ target: {
+ darwin: {
+ enabled: false,
+ },
+ },
+}
+
+/**
+ * Only AIDL CPP backend conversion supported.
+ */
+cc_library {
+ name: "libaudio_aidl_conversion_common_cpp",
+ srcs: [
+ "AidlConversionCppNdk.cpp",
+ ],
+ header_libs: [
+ "libaudio_aidl_conversion_common_util_cpp",
+ ],
+ export_header_lib_headers: [
+ "libaudio_aidl_conversion_common_util_cpp",
+ ],
+ defaults: [
+ "audio_aidl_conversion_common_default",
+ "latest_android_media_audio_common_types_cpp_export_shared",
+ ],
+ min_sdk_version: "29",
+}
+
+/**
+ * Only AIDL NDK backend conversion supported.
+ */
+cc_library {
+ name: "libaudio_aidl_conversion_common_ndk",
+ srcs: [
+ "AidlConversionCppNdk.cpp",
+ "AidlConversionNdk.cpp",
+ ],
+ header_libs: [
+ "libaudio_aidl_conversion_common_util_ndk",
+ ],
+ export_header_lib_headers: [
+ "libaudio_aidl_conversion_common_util_ndk",
+ ],
+ defaults: [
+ "audio_aidl_conversion_common_default",
+ "latest_android_hardware_audio_common_ndk_shared",
+ "latest_android_hardware_audio_effect_ndk_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
+ ],
+ shared_libs: [
+ "libbinder_ndk",
+ "libbase",
+ ],
+ cflags: [
+ "-DBACKEND_NDK",
+ ],
+ min_sdk_version: "31", //AParcelableHolder has been introduced in 31
+}
diff --git a/media/audioaidlconversion/include/media/AidlConversionCppNdk.h b/media/audioaidlconversion/include/media/AidlConversionCppNdk.h
new file mode 100644
index 0000000..c25ddb1
--- /dev/null
+++ b/media/audioaidlconversion/include/media/AidlConversionCppNdk.h
@@ -0,0 +1,327 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <limits>
+#include <type_traits>
+#include <system/audio.h>
+
+/**
+ * Can handle conversion between AIDL (both CPP and NDK backend) and legacy type.
+ * Controlled by the cflags preprocessor in Android.bp.
+ */
+#if defined(BACKEND_NDK)
+#define PREFIX(f) <aidl/f>
+#else
+#define PREFIX(f) <f>
+#endif
+
+#include PREFIX(android/media/audio/common/AudioChannelLayout.h)
+#include PREFIX(android/media/audio/common/AudioConfig.h)
+#include PREFIX(android/media/audio/common/AudioConfigBase.h)
+#include PREFIX(android/media/audio/common/AudioContentType.h)
+#include PREFIX(android/media/audio/common/AudioDeviceDescription.h)
+#include PREFIX(android/media/audio/common/AudioDualMonoMode.h)
+#include PREFIX(android/media/audio/common/AudioEncapsulationMetadataType.h)
+#include PREFIX(android/media/audio/common/AudioEncapsulationMode.h)
+#include PREFIX(android/media/audio/common/AudioEncapsulationType.h)
+#include PREFIX(android/media/audio/common/AudioFormatDescription.h)
+#include PREFIX(android/media/audio/common/AudioGain.h)
+#include PREFIX(android/media/audio/common/AudioGainConfig.h)
+#include PREFIX(android/media/audio/common/AudioGainMode.h)
+#include PREFIX(android/media/audio/common/AudioInputFlags.h)
+#include PREFIX(android/media/audio/common/AudioIoFlags.h)
+#include PREFIX(android/media/audio/common/AudioLatencyMode.h)
+#include PREFIX(android/media/audio/common/AudioMode.h)
+#include PREFIX(android/media/audio/common/AudioOffloadInfo.h)
+#include PREFIX(android/media/audio/common/AudioOutputFlags.h)
+#include PREFIX(android/media/audio/common/AudioPortExt.h)
+#include PREFIX(android/media/audio/common/AudioPortMixExt.h)
+#include PREFIX(android/media/audio/common/AudioPlaybackRate.h)
+#include PREFIX(android/media/audio/common/AudioProfile.h)
+#include PREFIX(android/media/audio/common/AudioSource.h)
+#include PREFIX(android/media/audio/common/AudioStandard.h)
+#include PREFIX(android/media/audio/common/AudioUsage.h)
+#include PREFIX(android/media/audio/common/AudioUuid.h)
+#include PREFIX(android/media/audio/common/ExtraAudioDescriptor.h)
+#include PREFIX(android/media/audio/common/Int.h)
+#undef PREFIX
+
+#include <media/AidlConversionUtil.h>
+#include <system/audio.h>
+#include <system/audio_effect.h>
+
+using ::android::String16;
+using ::android::String8;
+
+#if defined(BACKEND_NDK)
+namespace aidl {
+#endif
+
+namespace android {
+
+// maxSize is the size of the C-string buffer (including the 0-terminator), NOT the max length of
+// the string.
+::android::status_t aidl2legacy_string(std::string_view aidl, char* dest, size_t maxSize);
+ConversionResult<std::string> legacy2aidl_string(const char* legacy, size_t maxSize);
+
+ConversionResult<audio_module_handle_t> aidl2legacy_int32_t_audio_module_handle_t(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_module_handle_t_int32_t(audio_module_handle_t legacy);
+
+ConversionResult<audio_io_handle_t> aidl2legacy_int32_t_audio_io_handle_t(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_io_handle_t_int32_t(audio_io_handle_t legacy);
+
+ConversionResult<audio_port_handle_t> aidl2legacy_int32_t_audio_port_handle_t(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_port_handle_t_int32_t(audio_port_handle_t legacy);
+
+ConversionResult<audio_patch_handle_t> aidl2legacy_int32_t_audio_patch_handle_t(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_patch_handle_t_int32_t(audio_patch_handle_t legacy);
+
+ConversionResult<audio_unique_id_t> aidl2legacy_int32_t_audio_unique_id_t(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_unique_id_t_int32_t(audio_unique_id_t legacy);
+
+ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy);
+
+ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy);
+
+ConversionResult<pid_t> aidl2legacy_int32_t_pid_t(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_pid_t_int32_t(pid_t legacy);
+
+ConversionResult<uid_t> aidl2legacy_int32_t_uid_t(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_uid_t_int32_t(uid_t legacy);
+
+ConversionResult<String8> aidl2legacy_string_view_String8(std::string_view aidl);
+ConversionResult<std::string> legacy2aidl_String8_string(const String8& legacy);
+
+ConversionResult<String16> aidl2legacy_string_view_String16(std::string_view aidl);
+ConversionResult<std::string> legacy2aidl_String16_string(const String16& legacy);
+
+ConversionResult<std::optional<String16>>
+aidl2legacy_optional_string_view_optional_String16(std::optional<std::string_view> aidl);
+ConversionResult<std::optional<std::string_view>>
+legacy2aidl_optional_String16_optional_string(std::optional<String16> legacy);
+
+ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ const media::audio::common::AudioChannelLayout& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioChannelLayout>
+legacy2aidl_audio_channel_mask_t_AudioChannelLayout(audio_channel_mask_t legacy, bool isInput);
+
+ConversionResult<audio_config_t>
+aidl2legacy_AudioConfig_audio_config_t(const media::audio::common::AudioConfig& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioConfig>
+legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput);
+
+ConversionResult<audio_config_base_t>
+aidl2legacy_AudioConfigBase_audio_config_base_t(
+ const media::audio::common::AudioConfigBase& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioConfigBase>
+legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput);
+
+ConversionResult<audio_input_flags_t>
+aidl2legacy_AudioInputFlags_audio_input_flags_t(media::audio::common::AudioInputFlags aidl);
+ConversionResult<media::audio::common::AudioInputFlags>
+legacy2aidl_audio_input_flags_t_AudioInputFlags(audio_input_flags_t legacy);
+
+ConversionResult<audio_output_flags_t>
+aidl2legacy_AudioOutputFlags_audio_output_flags_t(media::audio::common::AudioOutputFlags aidl);
+ConversionResult<media::audio::common::AudioOutputFlags>
+legacy2aidl_audio_output_flags_t_AudioOutputFlags(audio_output_flags_t legacy);
+
+ConversionResult<audio_input_flags_t> aidl2legacy_int32_t_audio_input_flags_t_mask(
+ int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_input_flags_t_int32_t_mask(
+ audio_input_flags_t legacy);
+
+ConversionResult<audio_output_flags_t> aidl2legacy_int32_t_audio_output_flags_t_mask(
+ int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_output_flags_t_int32_t_mask(
+ audio_output_flags_t legacy);
+
+ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
+ const media::audio::common::AudioIoFlags& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
+ const audio_io_flags& legacy, bool isInput);
+
+ConversionResult<audio_session_t> aidl2legacy_int32_t_audio_session_t(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_session_t_int32_t(audio_session_t legacy);
+
+ConversionResult<audio_content_type_t>
+aidl2legacy_AudioContentType_audio_content_type_t(
+ media::audio::common::AudioContentType aidl);
+ConversionResult<media::audio::common::AudioContentType>
+legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy);
+
+ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
+ const media::audio::common::AudioDeviceDescription& aidl);
+ConversionResult<media::audio::common::AudioDeviceDescription>
+legacy2aidl_audio_devices_t_AudioDeviceDescription(audio_devices_t legacy);
+
+::android::status_t aidl2legacy_AudioDevice_audio_device(
+ const media::audio::common::AudioDevice& aidl, audio_devices_t* legacyType,
+ char* legacyAddress);
+::android::status_t aidl2legacy_AudioDevice_audio_device(
+ const media::audio::common::AudioDevice& aidl, audio_devices_t* legacyType,
+ String8* legacyAddress);
+::android::status_t aidl2legacy_AudioDevice_audio_device(
+ const media::audio::common::AudioDevice& aidl, audio_devices_t* legacyType,
+ std::string* legacyAddress);
+
+ConversionResult<media::audio::common::AudioDevice> legacy2aidl_audio_device_AudioDevice(
+ audio_devices_t legacyType, const char* legacyAddress);
+ConversionResult<media::audio::common::AudioDevice> legacy2aidl_audio_device_AudioDevice(
+ audio_devices_t legacyType, const String8& legacyAddress);
+
+ConversionResult<audio_extra_audio_descriptor>
+aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
+ const media::audio::common::ExtraAudioDescriptor& aidl);
+
+ConversionResult<media::audio::common::ExtraAudioDescriptor>
+legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
+ const audio_extra_audio_descriptor& legacy);
+
+ConversionResult<audio_encapsulation_metadata_type_t>
+aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
+ media::audio::common::AudioEncapsulationMetadataType aidl);
+ConversionResult<media::audio::common::AudioEncapsulationMetadataType>
+legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
+ audio_encapsulation_metadata_type_t legacy);
+
+ConversionResult<uint32_t> aidl2legacy_AudioEncapsulationMetadataType_mask(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy);
+
+ConversionResult<audio_encapsulation_mode_t>
+aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(
+ media::audio::common::AudioEncapsulationMode aidl);
+ConversionResult<media::audio::common::AudioEncapsulationMode>
+legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy);
+
+ConversionResult<uint32_t> aidl2legacy_AudioEncapsulationMode_mask(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_AudioEncapsulationMode_mask(uint32_t legacy);
+
+ConversionResult<audio_encapsulation_type_t>
+aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
+ const media::audio::common::AudioEncapsulationType& aidl);
+ConversionResult<media::audio::common::AudioEncapsulationType>
+legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
+ const audio_encapsulation_type_t& legacy);
+
+ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
+ const media::audio::common::AudioFormatDescription& aidl);
+ConversionResult<media::audio::common::AudioFormatDescription>
+legacy2aidl_audio_format_t_AudioFormatDescription(audio_format_t legacy);
+
+ConversionResult<audio_gain_mode_t>
+aidl2legacy_AudioGainMode_audio_gain_mode_t(media::audio::common::AudioGainMode aidl);
+ConversionResult<media::audio::common::AudioGainMode>
+legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy);
+
+ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl);
+ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy);
+
+ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
+ const media::audio::common::AudioGainConfig& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioGainConfig>
+legacy2aidl_audio_gain_config_AudioGainConfig(const audio_gain_config& legacy, bool isInput);
+
+ConversionResult<audio_gain>
+aidl2legacy_AudioGain_audio_gain(const media::audio::common::AudioGain& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioGain>
+legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput);
+
+ConversionResult<audio_input_flags_t>
+aidl2legacy_AudioInputFlags_audio_input_flags_t(media::audio::common::AudioInputFlags aidl);
+ConversionResult<media::audio::common::AudioInputFlags>
+legacy2aidl_audio_input_flags_t_AudioInputFlags(audio_input_flags_t legacy);
+
+ConversionResult<audio_mode_t>
+aidl2legacy_AudioMode_audio_mode_t(media::audio::common::AudioMode aidl);
+ConversionResult<media::audio::common::AudioMode>
+legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy);
+
+ConversionResult<audio_offload_info_t>
+aidl2legacy_AudioOffloadInfo_audio_offload_info_t(
+ const media::audio::common::AudioOffloadInfo& aidl);
+ConversionResult<media::audio::common::AudioOffloadInfo>
+legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy);
+
+ConversionResult<audio_output_flags_t>
+aidl2legacy_AudioOutputFlags_audio_output_flags_t(media::audio::common::AudioOutputFlags aidl);
+ConversionResult<media::audio::common::AudioOutputFlags>
+legacy2aidl_audio_output_flags_t_AudioOutputFlags(audio_output_flags_t legacy);
+
+ConversionResult<audio_profile> aidl2legacy_AudioProfile_audio_profile(
+ const media::audio::common::AudioProfile& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioProfile> legacy2aidl_audio_profile_AudioProfile(
+ const audio_profile& legacy, bool isInput);
+
+ConversionResult<audio_standard_t> aidl2legacy_AudioStandard_audio_standard_t(
+ media::audio::common::AudioStandard aidl);
+ConversionResult<media::audio::common::AudioStandard> legacy2aidl_audio_standard_t_AudioStandard(
+ audio_standard_t legacy);
+
+ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
+ media::audio::common::AudioSource aidl);
+ConversionResult<media::audio::common::AudioSource> legacy2aidl_audio_source_t_AudioSource(
+ audio_source_t legacy);
+
+ConversionResult<audio_usage_t> aidl2legacy_AudioUsage_audio_usage_t(
+ media::audio::common::AudioUsage aidl);
+ConversionResult<media::audio::common::AudioUsage> legacy2aidl_audio_usage_t_AudioUsage(
+ audio_usage_t legacy);
+
+ConversionResult<audio_uuid_t> aidl2legacy_AudioUuid_audio_uuid_t(
+ const media::audio::common::AudioUuid &aidl);
+ConversionResult<media::audio::common::AudioUuid> legacy2aidl_audio_uuid_t_AudioUuid(
+ const audio_uuid_t& legacy);
+
+ConversionResult<audio_dual_mono_mode_t>
+aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(media::audio::common::AudioDualMonoMode aidl);
+ConversionResult<media::audio::common::AudioDualMonoMode>
+legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(audio_dual_mono_mode_t legacy);
+
+ConversionResult<audio_timestretch_fallback_mode_t>
+aidl2legacy_TimestretchFallbackMode_audio_timestretch_fallback_mode_t(
+ media::audio::common::AudioPlaybackRate::TimestretchFallbackMode aidl);
+ConversionResult<media::audio::common::AudioPlaybackRate::TimestretchFallbackMode>
+legacy2aidl_audio_timestretch_fallback_mode_t_TimestretchFallbackMode(
+ audio_timestretch_fallback_mode_t legacy);
+
+ConversionResult<audio_timestretch_stretch_mode_t>
+aidl2legacy_TimestretchMode_audio_timestretch_stretch_mode_t(
+ media::audio::common::AudioPlaybackRate::TimestretchMode aidl);
+ConversionResult<media::audio::common::AudioPlaybackRate::TimestretchMode>
+legacy2aidl_audio_timestretch_stretch_mode_t_TimestretchMode(
+ audio_timestretch_stretch_mode_t legacy);
+
+ConversionResult<audio_playback_rate_t>
+aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(
+ const media::audio::common::AudioPlaybackRate& aidl);
+ConversionResult<media::audio::common::AudioPlaybackRate>
+legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(const audio_playback_rate_t& legacy);
+
+ConversionResult<audio_latency_mode_t>
+aidl2legacy_AudioLatencyMode_audio_latency_mode_t(media::audio::common::AudioLatencyMode aidl);
+ConversionResult<media::audio::common::AudioLatencyMode>
+legacy2aidl_audio_latency_mode_t_AudioLatencyMode(audio_latency_mode_t legacy);
+
+} // namespace android
+
+#if defined(BACKEND_NDK)
+} // aidl
+#endif
diff --git a/media/audioaidlconversion/include/media/AidlConversionNdk.h b/media/audioaidlconversion/include/media/AidlConversionNdk.h
new file mode 100644
index 0000000..a3176f6
--- /dev/null
+++ b/media/audioaidlconversion/include/media/AidlConversionNdk.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/binder_auto_utils.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+
+/**
+ * Can only handle conversion between AIDL (NDK backend) and legacy type.
+ */
+#include <hardware/audio_effect.h>
+#include <media/AidlConversionUtil.h>
+#include <system/audio_effect.h>
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+
+namespace aidl {
+namespace android {
+
+ConversionResult<uint32_t> aidl2legacy_Flags_Type_uint32(
+ ::aidl::android::hardware::audio::effect::Flags::Type type);
+ConversionResult<uint32_t> aidl2legacy_Flags_Insert_uint32(
+ ::aidl::android::hardware::audio::effect::Flags::Insert insert);
+ConversionResult<uint32_t> aidl2legacy_Flags_Volume_uint32(
+ ::aidl::android::hardware::audio::effect::Flags::Volume volume);
+ConversionResult<uint32_t> aidl2legacy_Flags_HardwareAccelerator_uint32(
+ ::aidl::android::hardware::audio::effect::Flags::HardwareAccelerator hwAcceleratorMode);
+ConversionResult<uint32_t> aidl2legacy_Flags_uint32(
+ const ::aidl::android::hardware::audio::effect::Flags aidl);
+
+ConversionResult<::aidl::android::hardware::audio::effect::Flags::Type>
+legacy2aidl_uint32_Flags_Type(uint32_t legacy);
+ConversionResult<::aidl::android::hardware::audio::effect::Flags::Insert>
+legacy2aidl_uint32_Flags_Insert(uint32_t legacy);
+ConversionResult<::aidl::android::hardware::audio::effect::Flags::Volume>
+legacy2aidl_uint32_Flags_Volume(uint32_t legacy);
+ConversionResult<::aidl::android::hardware::audio::effect::Flags::HardwareAccelerator>
+legacy2aidl_uint32_Flags_HardwareAccelerator(uint32_t legacy);
+ConversionResult<::aidl::android::hardware::audio::effect::Flags> legacy2aidl_uint32_Flags(
+ uint32_t hal);
+
+ConversionResult<effect_descriptor_t> aidl2legacy_Descriptor_effect_descriptor(
+ const ::aidl::android::hardware::audio::effect::Descriptor& aidl);
+ConversionResult<::aidl::android::hardware::audio::effect::Descriptor>
+legacy2aidl_effect_descriptor_Descriptor(const effect_descriptor_t& hal);
+
+ConversionResult<buffer_config_t> aidl2legacy_AudioConfigBase_buffer_config_t(
+ const media::audio::common::AudioConfigBase& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioConfigBase> legacy2aidl_AudioConfigBase_buffer_config_t(
+ const buffer_config_t& legacy, bool isInput);
+
+} // namespace android
+} // namespace aidl
diff --git a/media/libaudioclient/include/media/AidlConversionUtil.h b/media/audioaidlconversion/include/media/AidlConversionUtil.h
similarity index 80%
rename from media/libaudioclient/include/media/AidlConversionUtil.h
rename to media/audioaidlconversion/include/media/AidlConversionUtil.h
index 8817c35..28c7522 100644
--- a/media/libaudioclient/include/media/AidlConversionUtil.h
+++ b/media/audioaidlconversion/include/media/AidlConversionUtil.h
@@ -20,15 +20,23 @@
#include <type_traits>
#include <utility>
-#include <binder/Enums.h>
+#include <android-base/expected.h>
#include <binder/Status.h>
#include <error/Result.h>
-namespace android {
+#if defined(BACKEND_NDK)
+#include <android/binder_enums.h>
+#include <android/binder_status.h>
+
+namespace aidl {
+#else
+#include <binder/Enums.h>
+#endif
template <typename T>
-using ConversionResult = error::Result<T>;
+using ConversionResult = ::android::error::Result<T>;
+namespace android {
/**
* A generic template to safely cast between integral types, respecting limits of the destination
* type.
@@ -39,15 +47,15 @@
// have the signed converted to unsigned and produce wrong results.
if (std::is_signed_v<From> && !std::is_signed_v<To>) {
if (from < 0 || from > std::numeric_limits<To>::max()) {
- return base::unexpected(BAD_VALUE);
+ return ::android::base::unexpected(::android::BAD_VALUE);
}
} else if (std::is_signed_v<To> && !std::is_signed_v<From>) {
if (from > std::numeric_limits<To>::max()) {
- return base::unexpected(BAD_VALUE);
+ return ::android::base::unexpected(::android::BAD_VALUE);
}
} else {
if (from < std::numeric_limits<To>::min() || from > std::numeric_limits<To>::max()) {
- return base::unexpected(BAD_VALUE);
+ return ::android::base::unexpected(::android::BAD_VALUE);
}
}
return static_cast<To>(from);
@@ -67,14 +75,14 @@
* A generic template that helps convert containers of convertible types, using iterators.
*/
template<typename InputIterator, typename OutputIterator, typename Func>
-status_t convertRange(InputIterator start,
+::android::status_t convertRange(InputIterator start,
InputIterator end,
OutputIterator out,
const Func& itemConversion) {
for (InputIterator iter = start; iter != end; ++iter, ++out) {
*out = VALUE_OR_RETURN_STATUS(itemConversion(*iter));
}
- return OK;
+ return ::android::OK;
}
/**
@@ -82,7 +90,7 @@
* Uses a limit as maximum conversion items.
*/
template<typename InputIterator, typename OutputIterator, typename Func>
-status_t convertRangeWithLimit(InputIterator start,
+::android::status_t convertRangeWithLimit(InputIterator start,
InputIterator end,
OutputIterator out,
const Func& itemConversion,
@@ -94,7 +102,7 @@
for (InputIterator iter = start; (iter != last); ++iter, ++out) {
*out = VALUE_OR_RETURN_STATUS(itemConversion(*iter));
}
- return OK;
+ return ::android::OK;
}
/**
@@ -140,7 +148,7 @@
OutputContainer output;
auto ins = std::inserter(output, output.begin());
for (const auto& item1 : input1) {
- RETURN_IF_ERROR(iter2 != input2.end() ? OK : BAD_VALUE);
+ RETURN_IF_ERROR(iter2 != input2.end() ? ::android::OK : ::android::BAD_VALUE);
*ins = VALUE_OR_RETURN(itemConversion(item1, *iter2++));
}
return output;
@@ -248,7 +256,8 @@
////////////////////////////////////////////////////////////////////////////////////////////////////
// Utilities for working with AIDL unions.
// UNION_GET(obj, fieldname) returns a ConversionResult<T> containing either the strongly-typed
-// value of the respective field, or BAD_VALUE if the union is not set to the requested field.
+// value of the respective field, or ::android::BAD_VALUE if the union is not set to the requested
+// field.
// UNION_SET(obj, fieldname, value) sets the requested field to the given value.
template<typename T, typename T::Tag tag>
@@ -257,7 +266,7 @@
template<typename T, typename T::Tag tag>
ConversionResult<UnionFieldType<T, tag>> unionGetField(const T& u) {
if (u.getTag() != tag) {
- return base::unexpected(BAD_VALUE);
+ return ::android::base::unexpected(::android::BAD_VALUE);
}
return u.template get<tag>();
}
@@ -275,7 +284,11 @@
*/
template <typename T>
bool isValidEnum(T value) {
- constexpr android::enum_range<T> er{};
+#if defined(BACKEND_NDK)
+ constexpr ndk::enum_range<T> er{};
+#else
+ constexpr ::android::enum_range<T> er{};
+#endif
return std::find(er.begin(), er.end(), value) != er.end();
}
@@ -294,7 +307,7 @@
}
/**
- * Return the equivalent Android status_t from a binder exception code.
+ * Return the equivalent Android ::android::status_t from a binder exception code.
*
* Generally one should use statusTFromBinderStatus() instead.
*
@@ -304,33 +317,33 @@
* Note: for EX_TRANSACTION_FAILED and EX_SERVICE_SPECIFIC a more detailed error code
* can be found from transactionError() or serviceSpecificErrorCode().
*/
-static inline status_t statusTFromExceptionCode(int32_t exceptionCode) {
+static inline ::android::status_t statusTFromExceptionCode(int32_t exceptionCode) {
using namespace ::android::binder;
switch (exceptionCode) {
case Status::EX_NONE:
- return OK;
- case Status::EX_SECURITY: // Java SecurityException, rethrows locally in Java
- return PERMISSION_DENIED;
- case Status::EX_BAD_PARCELABLE: // Java BadParcelableException, rethrows in Java
- case Status::EX_ILLEGAL_ARGUMENT: // Java IllegalArgumentException, rethrows in Java
- case Status::EX_NULL_POINTER: // Java NullPointerException, rethrows in Java
- return BAD_VALUE;
- case Status::EX_ILLEGAL_STATE: // Java IllegalStateException, rethrows in Java
- case Status::EX_UNSUPPORTED_OPERATION: // Java UnsupportedOperationException, rethrows
- return INVALID_OPERATION;
+ return ::android::OK;
+ case Status::EX_SECURITY: // Java SecurityException, rethrows locally in Java
+ return ::android::PERMISSION_DENIED;
+ case Status::EX_BAD_PARCELABLE: // Java BadParcelableException, rethrows in Java
+ case Status::EX_ILLEGAL_ARGUMENT: // Java IllegalArgumentException, rethrows in Java
+ case Status::EX_NULL_POINTER: // Java NullPointerException, rethrows in Java
+ return ::android::BAD_VALUE;
+ case Status::EX_ILLEGAL_STATE: // Java IllegalStateException, rethrows in Java
+ case Status::EX_UNSUPPORTED_OPERATION: // Java UnsupportedOperationException, rethrows
+ return ::android::INVALID_OPERATION;
case Status::EX_HAS_REPLY_HEADER: // Native strictmode violation
- case Status::EX_PARCELABLE: // Java bootclass loader (not standard exception), rethrows
- case Status::EX_NETWORK_MAIN_THREAD: // Java NetworkOnMainThreadException, rethrows
+ case Status::EX_PARCELABLE: // Java bootclass loader (not standard exception), rethrows
+ case Status::EX_NETWORK_MAIN_THREAD: // Java NetworkOnMainThreadException, rethrows
case Status::EX_TRANSACTION_FAILED: // Native - see error code
- case Status::EX_SERVICE_SPECIFIC: // Java ServiceSpecificException,
- // rethrows in Java with integer error code
- return UNKNOWN_ERROR;
+ case Status::EX_SERVICE_SPECIFIC: // Java ServiceSpecificException,
+ // rethrows in Java with integer error code
+ return ::android::UNKNOWN_ERROR;
}
- return UNKNOWN_ERROR;
+ return ::android::UNKNOWN_ERROR;
}
/**
- * Return the equivalent Android status_t from a binder status.
+ * Return the equivalent Android ::android::status_t from a binder status.
*
* Used to handle errors from a AIDL method declaration
*
@@ -340,8 +353,8 @@
*
* return_type method(type0 param0, ...)
*/
-static inline status_t statusTFromBinderStatus(const ::android::binder::Status &status) {
- return status.isOk() ? OK // check OK,
+static inline ::android::status_t statusTFromBinderStatus(const ::android::binder::Status &status) {
+ return status.isOk() ? ::android::OK // check ::android::OK,
: status.serviceSpecificErrorCode() // service-side error, not standard Java exception
// (fromServiceSpecificError)
?: status.transactionError() // a native binder transaction error (fromStatusT)
@@ -356,23 +369,23 @@
* where Java callers expect an exception, not an integer return value.
*/
static inline ::android::binder::Status binderStatusFromStatusT(
- status_t status, const char *optionalMessage = nullptr) {
+ ::android::status_t status, const char *optionalMessage = nullptr) {
const char * const emptyIfNull = optionalMessage == nullptr ? "" : optionalMessage;
// From binder::Status instructions:
// Prefer a generic exception code when possible, then a service specific
- // code, and finally a status_t for low level failures or legacy support.
+ // code, and finally a ::android::status_t for low level failures or legacy support.
// Exception codes and service specific errors map to nicer exceptions for
// Java clients.
using namespace ::android::binder;
switch (status) {
- case OK:
+ case ::android::OK:
return Status::ok();
- case PERMISSION_DENIED: // throw SecurityException on Java side
+ case ::android::PERMISSION_DENIED: // throw SecurityException on Java side
return Status::fromExceptionCode(Status::EX_SECURITY, emptyIfNull);
- case BAD_VALUE: // throw IllegalArgumentException on Java side
+ case ::android::BAD_VALUE: // throw IllegalArgumentException on Java side
return Status::fromExceptionCode(Status::EX_ILLEGAL_ARGUMENT, emptyIfNull);
- case INVALID_OPERATION: // throw IllegalStateException on Java side
+ case ::android::INVALID_OPERATION: // throw IllegalStateException on Java side
return Status::fromExceptionCode(Status::EX_ILLEGAL_STATE, emptyIfNull);
}
@@ -387,3 +400,7 @@
} // namespace aidl_utils
} // namespace android
+
+#if defined(BACKEND_NDK)
+} // namespace aidl
+#endif
\ No newline at end of file
diff --git a/media/codec2/hidl/1.0/utils/types.cpp b/media/codec2/hidl/1.0/utils/types.cpp
index 35a3b53..319ba62 100644
--- a/media/codec2/hidl/1.0/utils/types.cpp
+++ b/media/codec2/hidl/1.0/utils/types.cpp
@@ -1613,6 +1613,7 @@
// assuming blob is const here
size_t size = blob.size();
size_t ix = 0;
+ size_t old_ix = 0;
const uint8_t *data = blob.data();
C2Param *p = nullptr;
@@ -1620,8 +1621,13 @@
p = C2ParamUtils::ParseFirst(data + ix, size - ix);
if (p) {
params->emplace_back(p);
+ old_ix = ix;
ix += p->size();
ix = align(ix, PARAMS_ALIGNMENT);
+ if (ix <= old_ix || ix > size) {
+ android_errorWriteLog(0x534e4554, "238083570");
+ break;
+ }
}
} while (p);
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index a11d789..a008dc2 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1014,7 +1014,7 @@
C2StoreFlexiblePixelFormatDescriptorsInfo *pixelFormatInfo = nullptr;
int vendorSdkVersion = base::GetIntProperty(
"ro.vendor.build.version.sdk", android_get_device_api_level());
- if (vendorSdkVersion >= __ANDROID_API_S__ && mClient->query(
+ if (mClient->query(
{},
{C2StoreFlexiblePixelFormatDescriptorsInfo::PARAM_TYPE},
C2_MAY_BLOCK,
@@ -1898,6 +1898,7 @@
comp = state->comp;
}
status_t err = comp->stop();
+ mChannel->stopUseOutputSurface();
if (err != C2_OK) {
// TODO: convert err into status_t
mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
@@ -1991,6 +1992,7 @@
comp = state->comp;
}
comp->release();
+ mChannel->stopUseOutputSurface();
{
Mutexed<State>::Locked state(mState);
@@ -2138,7 +2140,8 @@
std::map<size_t, sp<MediaCodecBuffer>> clientInputBuffers;
status_t err = mChannel->prepareInitialInputBuffers(&clientInputBuffers);
- if (err != OK) {
+ // FIXME(b/237656746)
+ if (err != OK && err != NO_MEMORY) {
ALOGE("Resume request for Input Buffers failed");
mCallback->onError(err, ACTION_CODE_FATAL);
return;
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 3ca263f..4bf8dce 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1578,6 +1578,17 @@
mFirstValidFrameIndex = mFrameIndex.load(std::memory_order_relaxed);
}
+void CCodecBufferChannel::stopUseOutputSurface() {
+ if (mOutputSurface.lock()->surface) {
+ C2BlockPool::local_id_t outputPoolId;
+ {
+ Mutexed<BlockPools>::Locked pools(mBlockPools);
+ outputPoolId = pools->outputPoolId;
+ }
+ if (mComponent) mComponent->stopUsingOutputSurface(outputPoolId);
+ }
+}
+
void CCodecBufferChannel::reset() {
stop();
if (mInputSurface != nullptr) {
@@ -1593,14 +1604,6 @@
Mutexed<Output>::Locked output(mOutput);
output->buffers.reset();
}
- if (mOutputSurface.lock()->surface) {
- C2BlockPool::local_id_t outputPoolId;
- {
- Mutexed<BlockPools>::Locked pools(mBlockPools);
- outputPoolId = pools->outputPoolId;
- }
- mComponent->stopUsingOutputSurface(outputPoolId);
- }
}
void CCodecBufferChannel::release() {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index f29a225..61fb06f 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -149,6 +149,12 @@
std::map<size_t, sp<MediaCodecBuffer>> &&clientInputBuffers);
/**
+ * Stop using buffers of the current output surface for other Codec
+ * instances to use the surface safely.
+ */
+ void stopUseOutputSurface();
+
+ /**
* Stop queueing buffers to the component. This object should never queue
* buffers after this call, until start() is called.
*/
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 5208be6..0253815 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -389,7 +389,7 @@
// read back always as int
float value;
if (v.get(&value)) {
- return (int32_t)value;
+ return (int32_t) (value + 0.5);
}
return C2Value();
}));
diff --git a/media/libaaudio/fuzzer/Android.bp b/media/libaaudio/fuzzer/Android.bp
index f5d2939..3393930 100644
--- a/media/libaaudio/fuzzer/Android.bp
+++ b/media/libaaudio/fuzzer/Android.bp
@@ -39,6 +39,7 @@
"libaudiomanager",
"libaudiopolicy",
"libaudioclient_aidl_conversion",
+ "libaudio_aidl_conversion_common_cpp",
"libutils",
],
static_libs: [
diff --git a/media/libaaudio/tests/test_attributes.cpp b/media/libaaudio/tests/test_attributes.cpp
index b88d562..e5676a7 100644
--- a/media/libaaudio/tests/test_attributes.cpp
+++ b/media/libaaudio/tests/test_attributes.cpp
@@ -20,6 +20,7 @@
// "test_aaudio_attributes.cpp". That other file is more current.
// So these tests could be deleted.
+#include <memory>
#include <stdio.h>
#include <unistd.h>
@@ -40,7 +41,7 @@
int privacyMode = DONT_SET,
aaudio_direction_t direction = AAUDIO_DIRECTION_OUTPUT) {
- float *buffer = new float[kNumFrames * kChannelCount];
+ std::unique_ptr<float[]> buffer(new float[kNumFrames * kChannelCount]);
AAudioStreamBuilder *aaudioBuilder = nullptr;
AAudioStream *aaudioStream = nullptr;
@@ -109,16 +110,15 @@
if (direction == AAUDIO_DIRECTION_INPUT) {
EXPECT_EQ(kNumFrames,
- AAudioStream_read(aaudioStream, buffer, kNumFrames, kNanosPerSecond));
+ AAudioStream_read(aaudioStream, buffer.get(), kNumFrames, kNanosPerSecond));
} else {
EXPECT_EQ(kNumFrames,
- AAudioStream_write(aaudioStream, buffer, kNumFrames, kNanosPerSecond));
+ AAudioStream_write(aaudioStream, buffer.get(), kNumFrames, kNanosPerSecond));
}
EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
- delete[] buffer;
}
static const aaudio_usage_t sUsages[] = {
diff --git a/media/libaaudio/tests/test_recovery.cpp b/media/libaaudio/tests/test_recovery.cpp
index 6e89f83..11331af 100644
--- a/media/libaaudio/tests/test_recovery.cpp
+++ b/media/libaaudio/tests/test_recovery.cpp
@@ -16,6 +16,7 @@
// Play silence and recover from dead servers or disconnected devices.
+#include <memory>
#include <stdio.h>
#include <aaudio/AAudio.h>
@@ -32,7 +33,6 @@
int32_t triesLeft = 3;
int32_t bufferCapacity;
int32_t framesPerBurst = 0;
- float *buffer = nullptr;
int32_t actualChannelCount = 0;
int32_t actualSampleRate = 0;
@@ -83,7 +83,7 @@
bufferCapacity, framesPerBurst);
int samplesPerBurst = framesPerBurst * actualChannelCount;
- buffer = new float[samplesPerBurst];
+ std::unique_ptr<float[]> buffer(new float[samplesPerBurst]);
result = AAudioStream_requestStart(aaudioStream);
if (result != AAUDIO_OK) {
@@ -98,7 +98,7 @@
int64_t printAt = actualSampleRate;
while (result == AAUDIO_OK && framesTotal < framesMax) {
int32_t framesWritten = AAudioStream_write(aaudioStream,
- buffer, framesPerBurst,
+ buffer.get(), framesPerBurst,
DEFAULT_TIMEOUT_NANOS);
if (framesWritten < 0) {
result = framesWritten;
@@ -134,6 +134,5 @@
AAudioStream_close(aaudioStream);
}
AAudioStreamBuilder_delete(aaudioBuilder);
- delete[] buffer;
printf(" result = %d = %s\n", result, AAudio_convertResultToText(result));
}
diff --git a/media/libaaudio/tests/test_session_id.cpp b/media/libaaudio/tests/test_session_id.cpp
index 3f7d4fc..5968b5d 100644
--- a/media/libaaudio/tests/test_session_id.cpp
+++ b/media/libaaudio/tests/test_session_id.cpp
@@ -16,6 +16,7 @@
// Test AAudio SessionId, which is used to associate Effects with a stream
+#include <memory>
#include <stdio.h>
#include <unistd.h>
@@ -29,7 +30,7 @@
// Test AAUDIO_SESSION_ID_NONE default
static void checkSessionIdNone(aaudio_performance_mode_t perfMode) {
- float *buffer = new float[kNumFrames * kChannelCount];
+ std::unique_ptr<float[]> buffer(new float[kNumFrames * kChannelCount]);
AAudioStreamBuilder *aaudioBuilder = nullptr;
@@ -51,12 +52,12 @@
ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream1));
- ASSERT_EQ(kNumFrames, AAudioStream_write(aaudioStream1, buffer, kNumFrames, kNanosPerSecond));
+ ASSERT_EQ(kNumFrames,
+ AAudioStream_write(aaudioStream1, buffer.get(), kNumFrames, kNanosPerSecond));
EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream1));
EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream1));
- delete[] buffer;
AAudioStreamBuilder_delete(aaudioBuilder);
}
@@ -72,7 +73,7 @@
static void checkSessionIdAllocate(aaudio_performance_mode_t perfMode,
aaudio_direction_t direction) {
- float *buffer = new float[kNumFrames * kChannelCount];
+ std::unique_ptr<float[]> buffer(new float[kNumFrames * kChannelCount]);
AAudioStreamBuilder *aaudioBuilder = nullptr;
@@ -106,10 +107,10 @@
if (direction == AAUDIO_DIRECTION_INPUT) {
ASSERT_EQ(kNumFrames, AAudioStream_read(aaudioStream1,
- buffer, kNumFrames, kNanosPerSecond));
+ buffer.get(), kNumFrames, kNanosPerSecond));
} else {
ASSERT_EQ(kNumFrames, AAudioStream_write(aaudioStream1,
- buffer, kNumFrames, kNanosPerSecond));
+ buffer.get(), kNumFrames, kNanosPerSecond));
}
EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream1));
@@ -135,10 +136,10 @@
if (otherDirection == AAUDIO_DIRECTION_INPUT) {
ASSERT_EQ(kNumFrames, AAudioStream_read(aaudioStream2,
- buffer, kNumFrames, kNanosPerSecond));
+ buffer.get(), kNumFrames, kNanosPerSecond));
} else {
ASSERT_EQ(kNumFrames, AAudioStream_write(aaudioStream2,
- buffer, kNumFrames, kNanosPerSecond));
+ buffer.get(), kNumFrames, kNanosPerSecond));
}
EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream2));
@@ -147,7 +148,6 @@
EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream1));
- delete[] buffer;
AAudioStreamBuilder_delete(aaudioBuilder);
}
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 6586e01..f75e58d 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -42,9 +42,6 @@
using media::audio::common::AudioDeviceAddress;
using media::audio::common::AudioDeviceDescription;
using media::audio::common::AudioDeviceType;
-using media::audio::common::AudioEncapsulationMetadataType;
-using media::audio::common::AudioEncapsulationMode;
-using media::audio::common::AudioEncapsulationType;
using media::audio::common::AudioFormatDescription;
using media::audio::common::AudioFormatType;
using media::audio::common::AudioGain;
@@ -138,127 +135,6 @@
////////////////////////////////////////////////////////////////////////////////////////////////////
// Converters
-status_t aidl2legacy_string(std::string_view aidl, char* dest, size_t maxSize) {
- if (aidl.size() > maxSize - 1) {
- return BAD_VALUE;
- }
- aidl.copy(dest, aidl.size());
- dest[aidl.size()] = '\0';
- return OK;
-}
-
-ConversionResult<std::string> legacy2aidl_string(const char* legacy, size_t maxSize) {
- if (legacy == nullptr) {
- return unexpected(BAD_VALUE);
- }
- if (strnlen(legacy, maxSize) == maxSize) {
- // No null-terminator.
- return unexpected(BAD_VALUE);
- }
- return std::string(legacy);
-}
-
-ConversionResult<audio_module_handle_t> aidl2legacy_int32_t_audio_module_handle_t(int32_t aidl) {
- return convertReinterpret<audio_module_handle_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_module_handle_t_int32_t(audio_module_handle_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<audio_io_handle_t> aidl2legacy_int32_t_audio_io_handle_t(int32_t aidl) {
- return convertReinterpret<audio_io_handle_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_io_handle_t_int32_t(audio_io_handle_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<audio_port_handle_t> aidl2legacy_int32_t_audio_port_handle_t(int32_t aidl) {
- return convertReinterpret<audio_port_handle_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_port_handle_t_int32_t(audio_port_handle_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<audio_patch_handle_t> aidl2legacy_int32_t_audio_patch_handle_t(int32_t aidl) {
- return convertReinterpret<audio_patch_handle_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_patch_handle_t_int32_t(audio_patch_handle_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<audio_unique_id_t> aidl2legacy_int32_t_audio_unique_id_t(int32_t aidl) {
- return convertReinterpret<audio_unique_id_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_unique_id_t_int32_t(audio_unique_id_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl) {
- return convertReinterpret<audio_hw_sync_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<pid_t> aidl2legacy_int32_t_pid_t(int32_t aidl) {
- return convertReinterpret<pid_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_pid_t_int32_t(pid_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<uid_t> aidl2legacy_int32_t_uid_t(int32_t aidl) {
- return convertReinterpret<uid_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_uid_t_int32_t(uid_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<String16> aidl2legacy_string_view_String16(std::string_view aidl) {
- return String16(aidl.data(), aidl.size());
-}
-
-ConversionResult<std::string> legacy2aidl_String16_string(const String16& legacy) {
- return std::string(String8(legacy).c_str());
-}
-
-// TODO b/182392769: create an optional -> optional util
-ConversionResult<std::optional<String16>>
-aidl2legacy_optional_string_view_optional_String16(std::optional<std::string_view> aidl) {
- if (!aidl.has_value()) {
- return std::nullopt;
- }
- ConversionResult<String16> conversion =
- VALUE_OR_RETURN(aidl2legacy_string_view_String16(aidl.value()));
- return conversion.value();
-}
-
-ConversionResult<std::optional<std::string_view>>
-legacy2aidl_optional_String16_optional_string(std::optional<String16> legacy) {
- if (!legacy.has_value()) {
- return std::nullopt;
- }
- ConversionResult<std::string> conversion =
- VALUE_OR_RETURN(legacy2aidl_String16_string(legacy.value()));
- return conversion.value();
-}
-
-ConversionResult<String8> aidl2legacy_string_view_String8(std::string_view aidl) {
- return String8(aidl.data(), aidl.size());
-}
-
-ConversionResult<std::string> legacy2aidl_String8_string(const String8& legacy) {
- return std::string(legacy.c_str());
-}
-
ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
media::AudioIoConfigEvent aidl) {
switch (aidl) {
@@ -365,1471 +241,6 @@
return unexpected(BAD_VALUE);
}
-namespace {
-
-namespace detail {
-using AudioChannelBitPair = std::pair<audio_channel_mask_t, int>;
-using AudioChannelBitPairs = std::vector<AudioChannelBitPair>;
-using AudioChannelPair = std::pair<audio_channel_mask_t, AudioChannelLayout>;
-using AudioChannelPairs = std::vector<AudioChannelPair>;
-using AudioDevicePair = std::pair<audio_devices_t, AudioDeviceDescription>;
-using AudioDevicePairs = std::vector<AudioDevicePair>;
-using AudioFormatPair = std::pair<audio_format_t, AudioFormatDescription>;
-using AudioFormatPairs = std::vector<AudioFormatPair>;
-}
-
-const detail::AudioChannelBitPairs& getInAudioChannelBits() {
- static const detail::AudioChannelBitPairs pairs = {
- { AUDIO_CHANNEL_IN_LEFT, AudioChannelLayout::CHANNEL_FRONT_LEFT },
- { AUDIO_CHANNEL_IN_RIGHT, AudioChannelLayout::CHANNEL_FRONT_RIGHT },
- // AUDIO_CHANNEL_IN_FRONT is at the end
- { AUDIO_CHANNEL_IN_BACK, AudioChannelLayout::CHANNEL_BACK_CENTER },
- // AUDIO_CHANNEL_IN_*_PROCESSED not supported
- // AUDIO_CHANNEL_IN_PRESSURE not supported
- // AUDIO_CHANNEL_IN_*_AXIS not supported
- // AUDIO_CHANNEL_IN_VOICE_* not supported
- { AUDIO_CHANNEL_IN_BACK_LEFT, AudioChannelLayout::CHANNEL_BACK_LEFT },
- { AUDIO_CHANNEL_IN_BACK_RIGHT, AudioChannelLayout::CHANNEL_BACK_RIGHT },
- { AUDIO_CHANNEL_IN_CENTER, AudioChannelLayout::CHANNEL_FRONT_CENTER },
- { AUDIO_CHANNEL_IN_LOW_FREQUENCY, AudioChannelLayout::CHANNEL_LOW_FREQUENCY },
- { AUDIO_CHANNEL_IN_TOP_LEFT, AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT },
- { AUDIO_CHANNEL_IN_TOP_RIGHT, AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT },
- // When going from aidl to legacy, IN_CENTER is used
- { AUDIO_CHANNEL_IN_FRONT, AudioChannelLayout::CHANNEL_FRONT_CENTER }
- };
- return pairs;
-}
-
-const detail::AudioChannelPairs& getInAudioChannelPairs() {
- static const detail::AudioChannelPairs pairs = {
-#define DEFINE_INPUT_LAYOUT(n) \
- { \
- AUDIO_CHANNEL_IN_##n, \
- AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
- AudioChannelLayout::LAYOUT_##n) \
- }
-
- DEFINE_INPUT_LAYOUT(MONO),
- DEFINE_INPUT_LAYOUT(STEREO),
- DEFINE_INPUT_LAYOUT(FRONT_BACK),
- // AUDIO_CHANNEL_IN_6 not supported
- DEFINE_INPUT_LAYOUT(2POINT0POINT2),
- DEFINE_INPUT_LAYOUT(2POINT1POINT2),
- DEFINE_INPUT_LAYOUT(3POINT0POINT2),
- DEFINE_INPUT_LAYOUT(3POINT1POINT2),
- DEFINE_INPUT_LAYOUT(5POINT1)
-#undef DEFINE_INPUT_LAYOUT
- };
- return pairs;
-}
-
-const detail::AudioChannelBitPairs& getOutAudioChannelBits() {
- static const detail::AudioChannelBitPairs pairs = {
-#define DEFINE_OUTPUT_BITS(n) \
- { AUDIO_CHANNEL_OUT_##n, AudioChannelLayout::CHANNEL_##n }
-
- DEFINE_OUTPUT_BITS(FRONT_LEFT),
- DEFINE_OUTPUT_BITS(FRONT_RIGHT),
- DEFINE_OUTPUT_BITS(FRONT_CENTER),
- DEFINE_OUTPUT_BITS(LOW_FREQUENCY),
- DEFINE_OUTPUT_BITS(BACK_LEFT),
- DEFINE_OUTPUT_BITS(BACK_RIGHT),
- DEFINE_OUTPUT_BITS(FRONT_LEFT_OF_CENTER),
- DEFINE_OUTPUT_BITS(FRONT_RIGHT_OF_CENTER),
- DEFINE_OUTPUT_BITS(BACK_CENTER),
- DEFINE_OUTPUT_BITS(SIDE_LEFT),
- DEFINE_OUTPUT_BITS(SIDE_RIGHT),
- DEFINE_OUTPUT_BITS(TOP_CENTER),
- DEFINE_OUTPUT_BITS(TOP_FRONT_LEFT),
- DEFINE_OUTPUT_BITS(TOP_FRONT_CENTER),
- DEFINE_OUTPUT_BITS(TOP_FRONT_RIGHT),
- DEFINE_OUTPUT_BITS(TOP_BACK_LEFT),
- DEFINE_OUTPUT_BITS(TOP_BACK_CENTER),
- DEFINE_OUTPUT_BITS(TOP_BACK_RIGHT),
- DEFINE_OUTPUT_BITS(TOP_SIDE_LEFT),
- DEFINE_OUTPUT_BITS(TOP_SIDE_RIGHT),
- DEFINE_OUTPUT_BITS(BOTTOM_FRONT_LEFT),
- DEFINE_OUTPUT_BITS(BOTTOM_FRONT_CENTER),
- DEFINE_OUTPUT_BITS(BOTTOM_FRONT_RIGHT),
- DEFINE_OUTPUT_BITS(LOW_FREQUENCY_2),
- DEFINE_OUTPUT_BITS(FRONT_WIDE_LEFT),
- DEFINE_OUTPUT_BITS(FRONT_WIDE_RIGHT),
-#undef DEFINE_OUTPUT_BITS
- { AUDIO_CHANNEL_OUT_HAPTIC_A, AudioChannelLayout::CHANNEL_HAPTIC_A },
- { AUDIO_CHANNEL_OUT_HAPTIC_B, AudioChannelLayout::CHANNEL_HAPTIC_B }
- };
- return pairs;
-}
-
-const detail::AudioChannelPairs& getOutAudioChannelPairs() {
- static const detail::AudioChannelPairs pairs = {
-#define DEFINE_OUTPUT_LAYOUT(n) \
- { \
- AUDIO_CHANNEL_OUT_##n, \
- AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>( \
- AudioChannelLayout::LAYOUT_##n) \
- }
-
- DEFINE_OUTPUT_LAYOUT(MONO),
- DEFINE_OUTPUT_LAYOUT(STEREO),
- DEFINE_OUTPUT_LAYOUT(2POINT1),
- DEFINE_OUTPUT_LAYOUT(TRI),
- DEFINE_OUTPUT_LAYOUT(TRI_BACK),
- DEFINE_OUTPUT_LAYOUT(3POINT1),
- DEFINE_OUTPUT_LAYOUT(2POINT0POINT2),
- DEFINE_OUTPUT_LAYOUT(2POINT1POINT2),
- DEFINE_OUTPUT_LAYOUT(3POINT0POINT2),
- DEFINE_OUTPUT_LAYOUT(3POINT1POINT2),
- DEFINE_OUTPUT_LAYOUT(QUAD),
- DEFINE_OUTPUT_LAYOUT(QUAD_SIDE),
- DEFINE_OUTPUT_LAYOUT(SURROUND),
- DEFINE_OUTPUT_LAYOUT(PENTA),
- DEFINE_OUTPUT_LAYOUT(5POINT1),
- DEFINE_OUTPUT_LAYOUT(5POINT1_SIDE),
- DEFINE_OUTPUT_LAYOUT(5POINT1POINT2),
- DEFINE_OUTPUT_LAYOUT(5POINT1POINT4),
- DEFINE_OUTPUT_LAYOUT(6POINT1),
- DEFINE_OUTPUT_LAYOUT(7POINT1),
- DEFINE_OUTPUT_LAYOUT(7POINT1POINT2),
- DEFINE_OUTPUT_LAYOUT(7POINT1POINT4),
- DEFINE_OUTPUT_LAYOUT(13POINT_360RA),
- DEFINE_OUTPUT_LAYOUT(22POINT2),
- DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_A),
- DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_A),
- DEFINE_OUTPUT_LAYOUT(HAPTIC_AB),
- DEFINE_OUTPUT_LAYOUT(MONO_HAPTIC_AB),
- DEFINE_OUTPUT_LAYOUT(STEREO_HAPTIC_AB)
-#undef DEFINE_OUTPUT_LAYOUT
- };
- return pairs;
-}
-
-const detail::AudioChannelPairs& getVoiceAudioChannelPairs() {
- static const detail::AudioChannelPairs pairs = {
-#define DEFINE_VOICE_LAYOUT(n) \
- { \
- AUDIO_CHANNEL_IN_VOICE_##n, \
- AudioChannelLayout::make<AudioChannelLayout::Tag::voiceMask>( \
- AudioChannelLayout::VOICE_##n) \
- }
- DEFINE_VOICE_LAYOUT(UPLINK_MONO),
- DEFINE_VOICE_LAYOUT(DNLINK_MONO),
- DEFINE_VOICE_LAYOUT(CALL_MONO)
-#undef DEFINE_VOICE_LAYOUT
- };
- return pairs;
-}
-
-AudioDeviceDescription make_AudioDeviceDescription(AudioDeviceType type,
- const std::string& connection = "") {
- AudioDeviceDescription result;
- result.type = type;
- result.connection = connection;
- return result;
-}
-
-void append_AudioDeviceDescription(detail::AudioDevicePairs& pairs,
- audio_devices_t inputType, audio_devices_t outputType,
- AudioDeviceType inType, AudioDeviceType outType,
- const std::string& connection = "") {
- pairs.push_back(std::make_pair(inputType, make_AudioDeviceDescription(inType, connection)));
- pairs.push_back(std::make_pair(outputType, make_AudioDeviceDescription(outType, connection)));
-}
-
-const detail::AudioDevicePairs& getAudioDevicePairs() {
- static const detail::AudioDevicePairs pairs = []() {
- detail::AudioDevicePairs pairs = {{
- {
- AUDIO_DEVICE_NONE, AudioDeviceDescription{}
- },
- {
- AUDIO_DEVICE_OUT_EARPIECE, make_AudioDeviceDescription(
- AudioDeviceType::OUT_SPEAKER_EARPIECE)
- },
- {
- AUDIO_DEVICE_OUT_SPEAKER, make_AudioDeviceDescription(
- AudioDeviceType::OUT_SPEAKER)
- },
- {
- AUDIO_DEVICE_OUT_WIRED_HEADPHONE, make_AudioDeviceDescription(
- AudioDeviceType::OUT_HEADPHONE,
- AudioDeviceDescription::CONNECTION_ANALOG())
- },
- {
- AUDIO_DEVICE_OUT_BLUETOOTH_SCO, make_AudioDeviceDescription(
- AudioDeviceType::OUT_DEVICE,
- AudioDeviceDescription::CONNECTION_BT_SCO())
- },
- {
- AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT, make_AudioDeviceDescription(
- AudioDeviceType::OUT_CARKIT,
- AudioDeviceDescription::CONNECTION_BT_SCO())
- },
- {
- AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES, make_AudioDeviceDescription(
- AudioDeviceType::OUT_HEADPHONE,
- AudioDeviceDescription::CONNECTION_BT_A2DP())
- },
- {
- AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, make_AudioDeviceDescription(
- AudioDeviceType::OUT_SPEAKER,
- AudioDeviceDescription::CONNECTION_BT_A2DP())
- },
- {
- AUDIO_DEVICE_OUT_TELEPHONY_TX, make_AudioDeviceDescription(
- AudioDeviceType::OUT_TELEPHONY_TX)
- },
- {
- AUDIO_DEVICE_OUT_AUX_LINE, make_AudioDeviceDescription(
- AudioDeviceType::OUT_LINE_AUX)
- },
- {
- AUDIO_DEVICE_OUT_SPEAKER_SAFE, make_AudioDeviceDescription(
- AudioDeviceType::OUT_SPEAKER_SAFE)
- },
- {
- AUDIO_DEVICE_OUT_HEARING_AID, make_AudioDeviceDescription(
- AudioDeviceType::OUT_HEARING_AID,
- AudioDeviceDescription::CONNECTION_WIRELESS())
- },
- {
- AUDIO_DEVICE_OUT_ECHO_CANCELLER, make_AudioDeviceDescription(
- AudioDeviceType::OUT_ECHO_CANCELLER)
- },
- {
- AUDIO_DEVICE_OUT_BLE_SPEAKER, make_AudioDeviceDescription(
- AudioDeviceType::OUT_SPEAKER,
- AudioDeviceDescription::CONNECTION_BT_LE())
- },
- {
- AUDIO_DEVICE_OUT_BLE_BROADCAST, make_AudioDeviceDescription(
- AudioDeviceType::OUT_BROADCAST,
- AudioDeviceDescription::CONNECTION_BT_LE())
- },
- // AUDIO_DEVICE_IN_AMBIENT and IN_COMMUNICATION are removed since they were deprecated.
- {
- AUDIO_DEVICE_IN_BUILTIN_MIC, make_AudioDeviceDescription(
- AudioDeviceType::IN_MICROPHONE)
- },
- {
- AUDIO_DEVICE_IN_BACK_MIC, make_AudioDeviceDescription(
- AudioDeviceType::IN_MICROPHONE_BACK)
- },
- {
- AUDIO_DEVICE_IN_TELEPHONY_RX, make_AudioDeviceDescription(
- AudioDeviceType::IN_TELEPHONY_RX)
- },
- {
- AUDIO_DEVICE_IN_TV_TUNER, make_AudioDeviceDescription(
- AudioDeviceType::IN_TV_TUNER)
- },
- {
- AUDIO_DEVICE_IN_LOOPBACK, make_AudioDeviceDescription(
- AudioDeviceType::IN_LOOPBACK)
- },
- {
- AUDIO_DEVICE_IN_BLUETOOTH_BLE, make_AudioDeviceDescription(
- AudioDeviceType::IN_DEVICE,
- AudioDeviceDescription::CONNECTION_BT_LE())
- },
- {
- AUDIO_DEVICE_IN_ECHO_REFERENCE, make_AudioDeviceDescription(
- AudioDeviceType::IN_ECHO_REFERENCE)
- }
- }};
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_DEFAULT, AUDIO_DEVICE_OUT_DEFAULT,
- AudioDeviceType::IN_DEFAULT, AudioDeviceType::OUT_DEFAULT);
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_WIRED_HEADSET, AUDIO_DEVICE_OUT_WIRED_HEADSET,
- AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
- AudioDeviceDescription::CONNECTION_ANALOG());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET, AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET,
- AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
- AudioDeviceDescription::CONNECTION_BT_SCO());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_HDMI, AUDIO_DEVICE_OUT_HDMI,
- AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
- AudioDeviceDescription::CONNECTION_HDMI());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_REMOTE_SUBMIX, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
- AudioDeviceType::IN_SUBMIX, AudioDeviceType::OUT_SUBMIX);
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,
- AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
- AudioDeviceDescription::CONNECTION_ANALOG());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET,
- AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
- AudioDeviceDescription::CONNECTION_USB());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_USB_ACCESSORY, AUDIO_DEVICE_OUT_USB_ACCESSORY,
- AudioDeviceType::IN_ACCESSORY, AudioDeviceType::OUT_ACCESSORY,
- AudioDeviceDescription::CONNECTION_USB());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_USB_DEVICE, AUDIO_DEVICE_OUT_USB_DEVICE,
- AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
- AudioDeviceDescription::CONNECTION_USB());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_FM_TUNER, AUDIO_DEVICE_OUT_FM,
- AudioDeviceType::IN_FM_TUNER, AudioDeviceType::OUT_FM);
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_LINE, AUDIO_DEVICE_OUT_LINE,
- AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
- AudioDeviceDescription::CONNECTION_ANALOG());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_SPDIF, AUDIO_DEVICE_OUT_SPDIF,
- AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
- AudioDeviceDescription::CONNECTION_SPDIF());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_BLUETOOTH_A2DP, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP,
- AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
- AudioDeviceDescription::CONNECTION_BT_A2DP());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_IP, AUDIO_DEVICE_OUT_IP,
- AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
- AudioDeviceDescription::CONNECTION_IP_V4());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_BUS, AUDIO_DEVICE_OUT_BUS,
- AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
- AudioDeviceDescription::CONNECTION_BUS());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_PROXY, AUDIO_DEVICE_OUT_PROXY,
- AudioDeviceType::IN_AFE_PROXY, AudioDeviceType::OUT_AFE_PROXY);
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_USB_HEADSET, AUDIO_DEVICE_OUT_USB_HEADSET,
- AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
- AudioDeviceDescription::CONNECTION_USB());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_HDMI_ARC, AUDIO_DEVICE_OUT_HDMI_ARC,
- AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
- AudioDeviceDescription::CONNECTION_HDMI_ARC());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_HDMI_EARC, AUDIO_DEVICE_OUT_HDMI_EARC,
- AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
- AudioDeviceDescription::CONNECTION_HDMI_EARC());
- append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_BLE_HEADSET, AUDIO_DEVICE_OUT_BLE_HEADSET,
- AudioDeviceType::IN_HEADSET, AudioDeviceType::OUT_HEADSET,
- AudioDeviceDescription::CONNECTION_BT_LE());
- return pairs;
- }();
- return pairs;
-}
-
-AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
- AudioFormatDescription result;
- result.type = type;
- return result;
-}
-
-AudioFormatDescription make_AudioFormatDescription(PcmType pcm) {
- auto result = make_AudioFormatDescription(AudioFormatType::PCM);
- result.pcm = pcm;
- return result;
-}
-
-AudioFormatDescription make_AudioFormatDescription(const std::string& encoding) {
- AudioFormatDescription result;
- result.encoding = encoding;
- return result;
-}
-
-AudioFormatDescription make_AudioFormatDescription(PcmType transport,
- const std::string& encoding) {
- auto result = make_AudioFormatDescription(encoding);
- result.pcm = transport;
- return result;
-}
-
-const detail::AudioFormatPairs& getAudioFormatPairs() {
- static const detail::AudioFormatPairs pairs = {{
- {
- AUDIO_FORMAT_INVALID,
- make_AudioFormatDescription(AudioFormatType::SYS_RESERVED_INVALID)
- },
- {
- AUDIO_FORMAT_DEFAULT, AudioFormatDescription{}
- },
- {
- AUDIO_FORMAT_PCM_16_BIT, make_AudioFormatDescription(PcmType::INT_16_BIT)
- },
- {
- AUDIO_FORMAT_PCM_8_BIT, make_AudioFormatDescription(PcmType::UINT_8_BIT)
- },
- {
- AUDIO_FORMAT_PCM_32_BIT, make_AudioFormatDescription(PcmType::INT_32_BIT)
- },
- {
- AUDIO_FORMAT_PCM_8_24_BIT, make_AudioFormatDescription(PcmType::FIXED_Q_8_24)
- },
- {
- AUDIO_FORMAT_PCM_FLOAT, make_AudioFormatDescription(PcmType::FLOAT_32_BIT)
- },
- {
- AUDIO_FORMAT_PCM_24_BIT_PACKED, make_AudioFormatDescription(PcmType::INT_24_BIT)
- },
- {
- AUDIO_FORMAT_MP3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG)
- },
- {
- AUDIO_FORMAT_AMR_NB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_NB)
- },
- {
- AUDIO_FORMAT_AMR_WB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB)
- },
- {
- AUDIO_FORMAT_AAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MP4)
- },
- {
- AUDIO_FORMAT_AAC_MAIN, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_MAIN)
- },
- {
- AUDIO_FORMAT_AAC_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LC)
- },
- {
- AUDIO_FORMAT_AAC_SSR, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SSR)
- },
- {
- AUDIO_FORMAT_AAC_LTP, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LTP)
- },
- {
- AUDIO_FORMAT_AAC_HE_V1, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V1)
- },
- {
- AUDIO_FORMAT_AAC_SCALABLE,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_SCALABLE)
- },
- {
- AUDIO_FORMAT_AAC_ERLC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ERLC)
- },
- {
- AUDIO_FORMAT_AAC_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LD)
- },
- {
- AUDIO_FORMAT_AAC_HE_V2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_HE_V2)
- },
- {
- AUDIO_FORMAT_AAC_ELD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ELD)
- },
- {
- AUDIO_FORMAT_AAC_XHE, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_XHE)
- },
- // AUDIO_FORMAT_HE_AAC_V1 and HE_AAC_V2 are removed since they were deprecated long time
- // ago.
- {
- AUDIO_FORMAT_VORBIS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_VORBIS)
- },
- {
- AUDIO_FORMAT_OPUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_OPUS)
- },
- {
- AUDIO_FORMAT_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC3)
- },
- {
- AUDIO_FORMAT_E_AC3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3)
- },
- {
- AUDIO_FORMAT_E_AC3_JOC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EAC3_JOC)
- },
- {
- AUDIO_FORMAT_DTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS)
- },
- {
- AUDIO_FORMAT_DTS_HD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_HD)
- },
- // In the future, we would like to represent encapsulated bitstreams as
- // nested AudioFormatDescriptions. The legacy 'AUDIO_FORMAT_IEC61937' type doesn't
- // specify the format of the encapsulated bitstream.
- {
- AUDIO_FORMAT_IEC61937,
- make_AudioFormatDescription(PcmType::INT_16_BIT, MEDIA_MIMETYPE_AUDIO_IEC61937)
- },
- {
- AUDIO_FORMAT_DOLBY_TRUEHD,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_TRUEHD)
- },
- {
- AUDIO_FORMAT_EVRC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRC)
- },
- {
- AUDIO_FORMAT_EVRCB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCB)
- },
- {
- AUDIO_FORMAT_EVRCWB, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCWB)
- },
- {
- AUDIO_FORMAT_EVRCNW, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_EVRCNW)
- },
- {
- AUDIO_FORMAT_AAC_ADIF, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADIF)
- },
- {
- AUDIO_FORMAT_WMA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_WMA)
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_WMA_PRO, make_AudioFormatDescription("audio/x-ms-wma.pro")
- },
- {
- AUDIO_FORMAT_AMR_WB_PLUS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AMR_WB_PLUS)
- },
- {
- AUDIO_FORMAT_MP2, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEG_LAYER_II)
- },
- {
- AUDIO_FORMAT_QCELP, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_QCELP)
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_DSD, make_AudioFormatDescription("audio/vnd.sony.dsd")
- },
- {
- AUDIO_FORMAT_FLAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_FLAC)
- },
- {
- AUDIO_FORMAT_ALAC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_ALAC)
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_APE, make_AudioFormatDescription("audio/x-ape")
- },
- {
- AUDIO_FORMAT_AAC_ADTS, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS)
- },
- {
- AUDIO_FORMAT_AAC_ADTS_MAIN,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_MAIN)
- },
- {
- AUDIO_FORMAT_AAC_ADTS_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LC)
- },
- {
- AUDIO_FORMAT_AAC_ADTS_SSR,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SSR)
- },
- {
- AUDIO_FORMAT_AAC_ADTS_LTP,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LTP)
- },
- {
- AUDIO_FORMAT_AAC_ADTS_HE_V1,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V1)
- },
- {
- AUDIO_FORMAT_AAC_ADTS_SCALABLE,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_SCALABLE)
- },
- {
- AUDIO_FORMAT_AAC_ADTS_ERLC,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ERLC)
- },
- {
- AUDIO_FORMAT_AAC_ADTS_LD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_LD)
- },
- {
- AUDIO_FORMAT_AAC_ADTS_HE_V2,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_HE_V2)
- },
- {
- AUDIO_FORMAT_AAC_ADTS_ELD,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_ELD)
- },
- {
- AUDIO_FORMAT_AAC_ADTS_XHE,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_ADTS_XHE)
- },
- {
- // Note: not in the IANA registry. "vnd.octel.sbc" is not BT SBC.
- AUDIO_FORMAT_SBC, make_AudioFormatDescription("audio/x-sbc")
- },
- {
- AUDIO_FORMAT_APTX, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_APTX)
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_APTX_HD, make_AudioFormatDescription("audio/vnd.qcom.aptx.hd")
- },
- {
- AUDIO_FORMAT_AC4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AC4)
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_LDAC, make_AudioFormatDescription("audio/vnd.sony.ldac")
- },
- {
- AUDIO_FORMAT_MAT, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT)
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_MAT_1_0,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".1.0"))
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_MAT_2_0,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.0"))
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_MAT_2_1,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DOLBY_MAT + std::string(".2.1"))
- },
- {
- AUDIO_FORMAT_AAC_LATM, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC)
- },
- {
- AUDIO_FORMAT_AAC_LATM_LC, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_LC)
- },
- {
- AUDIO_FORMAT_AAC_LATM_HE_V1,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V1)
- },
- {
- AUDIO_FORMAT_AAC_LATM_HE_V2,
- make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_AAC_LATM_HE_V2)
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_CELT, make_AudioFormatDescription("audio/x-celt")
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_APTX_ADAPTIVE, make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive")
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_LHDC, make_AudioFormatDescription("audio/vnd.savitech.lhdc")
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_LHDC_LL, make_AudioFormatDescription("audio/vnd.savitech.lhdc.ll")
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_APTX_TWSP, make_AudioFormatDescription("audio/vnd.qcom.aptx.twsp")
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_LC3, make_AudioFormatDescription("audio/x-lc3")
- },
- {
- AUDIO_FORMAT_MPEGH, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1)
- },
- {
- AUDIO_FORMAT_MPEGH_BL_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L3)
- },
- {
- AUDIO_FORMAT_MPEGH_BL_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_BL_L4)
- },
- {
- AUDIO_FORMAT_MPEGH_LC_L3, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L3)
- },
- {
- AUDIO_FORMAT_MPEGH_LC_L4, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_MPEGH_LC_L4)
- },
- {
- AUDIO_FORMAT_IEC60958,
- make_AudioFormatDescription(PcmType::INT_24_BIT, MEDIA_MIMETYPE_AUDIO_IEC60958)
- },
- {
- AUDIO_FORMAT_DTS_UHD, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DTS_UHD)
- },
- {
- AUDIO_FORMAT_DRA, make_AudioFormatDescription(MEDIA_MIMETYPE_AUDIO_DRA)
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_APTX_ADAPTIVE_QLEA,
- make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive.r3")
- },
- {
- // Note: not in the IANA registry.
- AUDIO_FORMAT_APTX_ADAPTIVE_R4,
- make_AudioFormatDescription("audio/vnd.qcom.aptx.adaptive.r4")
- },
- }};
- return pairs;
-}
-
-template<typename S, typename T>
-std::unordered_map<S, T> make_DirectMap(const std::vector<std::pair<S, T>>& v) {
- std::unordered_map<S, T> result(v.begin(), v.end());
- LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
- return result;
-}
-
-template<typename S, typename T>
-std::unordered_map<S, T> make_DirectMap(
- const std::vector<std::pair<S, T>>& v1, const std::vector<std::pair<S, T>>& v2) {
- std::unordered_map<S, T> result(v1.begin(), v1.end());
- LOG_ALWAYS_FATAL_IF(result.size() != v1.size(), "Duplicate key elements detected in v1");
- result.insert(v2.begin(), v2.end());
- LOG_ALWAYS_FATAL_IF(result.size() != v1.size() + v2.size(),
- "Duplicate key elements detected in v1+v2");
- return result;
-}
-
-template<typename S, typename T>
-std::unordered_map<T, S> make_ReverseMap(const std::vector<std::pair<S, T>>& v) {
- std::unordered_map<T, S> result;
- std::transform(v.begin(), v.end(), std::inserter(result, result.begin()),
- [](const std::pair<S, T>& p) {
- return std::make_pair(p.second, p.first);
- });
- LOG_ALWAYS_FATAL_IF(result.size() != v.size(), "Duplicate key elements detected");
- return result;
-}
-
-} // namespace
-
-audio_channel_mask_t aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
- int aidlLayout, bool isInput) {
- auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
- const int aidlLayoutInitial = aidlLayout; // for error message
- audio_channel_mask_t legacy = AUDIO_CHANNEL_NONE;
- for (const auto& bitPair : bitMapping) {
- if ((aidlLayout & bitPair.second) == bitPair.second) {
- legacy = static_cast<audio_channel_mask_t>(legacy | bitPair.first);
- aidlLayout &= ~bitPair.second;
- if (aidlLayout == 0) {
- return legacy;
- }
- }
- }
- ALOGE("%s: aidl layout 0x%x contains bits 0x%x that have no match to legacy %s bits",
- __func__, aidlLayoutInitial, aidlLayout, isInput ? "input" : "output");
- return AUDIO_CHANNEL_NONE;
-}
-
-ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
- const AudioChannelLayout& aidl, bool isInput) {
- using ReverseMap = std::unordered_map<AudioChannelLayout, audio_channel_mask_t>;
- using Tag = AudioChannelLayout::Tag;
- static const ReverseMap mIn = make_ReverseMap(getInAudioChannelPairs());
- static const ReverseMap mOut = make_ReverseMap(getOutAudioChannelPairs());
- static const ReverseMap mVoice = make_ReverseMap(getVoiceAudioChannelPairs());
-
- auto convert = [](const AudioChannelLayout& aidl, const ReverseMap& m,
- const char* func, const char* type) -> ConversionResult<audio_channel_mask_t> {
- if (auto it = m.find(aidl); it != m.end()) {
- return it->second;
- } else {
- ALOGW("%s: no legacy %s audio_channel_mask_t found for %s", func, type,
- aidl.toString().c_str());
- return unexpected(BAD_VALUE);
- }
- };
-
- switch (aidl.getTag()) {
- case Tag::none:
- return AUDIO_CHANNEL_NONE;
- case Tag::invalid:
- return AUDIO_CHANNEL_INVALID;
- case Tag::indexMask:
- // Index masks do not have pre-defined values.
- if (const int bits = aidl.get<Tag::indexMask>();
- __builtin_popcount(bits) != 0 &&
- __builtin_popcount(bits) <= AUDIO_CHANNEL_COUNT_MAX) {
- return audio_channel_mask_from_representation_and_bits(
- AUDIO_CHANNEL_REPRESENTATION_INDEX, bits);
- } else {
- ALOGE("%s: invalid indexMask value 0x%x in %s",
- __func__, bits, aidl.toString().c_str());
- return unexpected(BAD_VALUE);
- }
- case Tag::layoutMask:
- // The fast path is to find a direct match for some known layout mask.
- if (const auto layoutMatch = convert(aidl, isInput ? mIn : mOut, __func__,
- isInput ? "input" : "output");
- layoutMatch.ok()) {
- return layoutMatch;
- }
- // If a match for a predefined layout wasn't found, make a custom one from bits.
- if (audio_channel_mask_t bitMask =
- aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
- aidl.get<Tag::layoutMask>(), isInput);
- bitMask != AUDIO_CHANNEL_NONE) {
- return bitMask;
- }
- return unexpected(BAD_VALUE);
- case Tag::voiceMask:
- return convert(aidl, mVoice, __func__, "voice");
- }
- ALOGE("%s: unexpected tag value %d", __func__, aidl.getTag());
- return unexpected(BAD_VALUE);
-}
-
-int legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
- audio_channel_mask_t legacy, bool isInput) {
- auto& bitMapping = isInput ? getInAudioChannelBits() : getOutAudioChannelBits();
- const int legacyInitial = legacy; // for error message
- int aidlLayout = 0;
- for (const auto& bitPair : bitMapping) {
- if ((legacy & bitPair.first) == bitPair.first) {
- aidlLayout |= bitPair.second;
- legacy = static_cast<audio_channel_mask_t>(legacy & ~bitPair.first);
- if (legacy == 0) {
- return aidlLayout;
- }
- }
- }
- ALOGE("%s: legacy %s audio_channel_mask_t 0x%x contains unrecognized bits 0x%x",
- __func__, isInput ? "input" : "output", legacyInitial, legacy);
- return 0;
-}
-
-ConversionResult<AudioChannelLayout> legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
- audio_channel_mask_t legacy, bool isInput) {
- using DirectMap = std::unordered_map<audio_channel_mask_t, AudioChannelLayout>;
- using Tag = AudioChannelLayout::Tag;
- static const DirectMap mInAndVoice = make_DirectMap(
- getInAudioChannelPairs(), getVoiceAudioChannelPairs());
- static const DirectMap mOut = make_DirectMap(getOutAudioChannelPairs());
-
- auto convert = [](const audio_channel_mask_t legacy, const DirectMap& m,
- const char* func, const char* type) -> ConversionResult<AudioChannelLayout> {
- if (auto it = m.find(legacy); it != m.end()) {
- return it->second;
- } else {
- ALOGW("%s: no AudioChannelLayout found for legacy %s audio_channel_mask_t value 0x%x",
- func, type, legacy);
- return unexpected(BAD_VALUE);
- }
- };
-
- if (legacy == AUDIO_CHANNEL_NONE) {
- return AudioChannelLayout{};
- } else if (legacy == AUDIO_CHANNEL_INVALID) {
- return AudioChannelLayout::make<Tag::invalid>(0);
- }
-
- const audio_channel_representation_t repr = audio_channel_mask_get_representation(legacy);
- if (repr == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
- if (audio_channel_mask_is_valid(legacy)) {
- const int indexMask = VALUE_OR_RETURN(
- convertIntegral<int>(audio_channel_mask_get_bits(legacy)));
- return AudioChannelLayout::make<Tag::indexMask>(indexMask);
- } else {
- ALOGE("%s: legacy audio_channel_mask_t value 0x%x is invalid", __func__, legacy);
- return unexpected(BAD_VALUE);
- }
- } else if (repr == AUDIO_CHANNEL_REPRESENTATION_POSITION) {
- // The fast path is to find a direct match for some known layout mask.
- if (const auto layoutMatch = convert(legacy, isInput ? mInAndVoice : mOut, __func__,
- isInput ? "input / voice" : "output");
- layoutMatch.ok()) {
- return layoutMatch;
- }
- // If a match for a predefined layout wasn't found, make a custom one from bits,
- // rejecting those with voice channel bits.
- if (!isInput ||
- (legacy & (AUDIO_CHANNEL_IN_VOICE_UPLINK | AUDIO_CHANNEL_IN_VOICE_DNLINK)) == 0) {
- if (int bitMaskLayout =
- legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
- legacy, isInput);
- bitMaskLayout != 0) {
- return AudioChannelLayout::make<Tag::layoutMask>(bitMaskLayout);
- }
- } else {
- ALOGE("%s: legacy audio_channel_mask_t value 0x%x contains voice bits",
- __func__, legacy);
- }
- return unexpected(BAD_VALUE);
- }
-
- ALOGE("%s: unknown representation %d in audio_channel_mask_t value 0x%x",
- __func__, repr, legacy);
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
- const AudioDeviceDescription& aidl) {
- static const std::unordered_map<AudioDeviceDescription, audio_devices_t> m =
- make_ReverseMap(getAudioDevicePairs());
- if (auto it = m.find(aidl); it != m.end()) {
- return it->second;
- } else {
- ALOGE("%s: no legacy audio_devices_t found for %s", __func__, aidl.toString().c_str());
- return unexpected(BAD_VALUE);
- }
-}
-
-ConversionResult<AudioDeviceDescription> legacy2aidl_audio_devices_t_AudioDeviceDescription(
- audio_devices_t legacy) {
- static const std::unordered_map<audio_devices_t, AudioDeviceDescription> m =
- make_DirectMap(getAudioDevicePairs());
- if (auto it = m.find(legacy); it != m.end()) {
- return it->second;
- } else {
- ALOGE("%s: no AudioDeviceDescription found for legacy audio_devices_t value 0x%x",
- __func__, legacy);
- return unexpected(BAD_VALUE);
- }
-}
-
-status_t aidl2legacy_AudioDevice_audio_device(
- const AudioDevice& aidl,
- audio_devices_t* legacyType, char* legacyAddress) {
- *legacyType = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
- return aidl2legacy_string(
- aidl.address.get<AudioDeviceAddress::id>(),
- legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN);
-}
-
-status_t aidl2legacy_AudioDevice_audio_device(
- const AudioDevice& aidl,
- audio_devices_t* legacyType, String8* legacyAddress) {
- *legacyType = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
- *legacyAddress = VALUE_OR_RETURN_STATUS(aidl2legacy_string_view_String8(
- aidl.address.get<AudioDeviceAddress::id>()));
- return OK;
-}
-
-status_t aidl2legacy_AudioDevice_audio_device(
- const AudioDevice& aidl,
- audio_devices_t* legacyType, std::string* legacyAddress) {
- *legacyType = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioDeviceDescription_audio_devices_t(aidl.type));
- *legacyAddress = aidl.address.get<AudioDeviceAddress::id>();
- return OK;
-}
-
-ConversionResult<AudioDevice> legacy2aidl_audio_device_AudioDevice(
- audio_devices_t legacyType, const char* legacyAddress) {
- AudioDevice aidl;
- aidl.type = VALUE_OR_RETURN(
- legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
- const std::string aidl_id = VALUE_OR_RETURN(
- legacy2aidl_string(legacyAddress, AUDIO_DEVICE_MAX_ADDRESS_LEN));
- aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
- return aidl;
-}
-
-ConversionResult<AudioDevice>
-legacy2aidl_audio_device_AudioDevice(
- audio_devices_t legacyType, const String8& legacyAddress) {
- AudioDevice aidl;
- aidl.type = VALUE_OR_RETURN(
- legacy2aidl_audio_devices_t_AudioDeviceDescription(legacyType));
- const std::string aidl_id = VALUE_OR_RETURN(
- legacy2aidl_String8_string(legacyAddress));
- aidl.address = AudioDeviceAddress::make<AudioDeviceAddress::id>(aidl_id);
- return aidl;
-}
-
-ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
- const AudioFormatDescription& aidl) {
- static const std::unordered_map<AudioFormatDescription, audio_format_t> m =
- make_ReverseMap(getAudioFormatPairs());
- if (auto it = m.find(aidl); it != m.end()) {
- return it->second;
- } else {
- ALOGE("%s: no legacy audio_format_t found for %s", __func__, aidl.toString().c_str());
- return unexpected(BAD_VALUE);
- }
-}
-
-ConversionResult<AudioFormatDescription> legacy2aidl_audio_format_t_AudioFormatDescription(
- audio_format_t legacy) {
- static const std::unordered_map<audio_format_t, AudioFormatDescription> m =
- make_DirectMap(getAudioFormatPairs());
- if (auto it = m.find(legacy); it != m.end()) {
- return it->second;
- } else {
- ALOGE("%s: no AudioFormatDescription found for legacy audio_format_t value 0x%x",
- __func__, legacy);
- return unexpected(BAD_VALUE);
- }
-}
-
-ConversionResult<audio_gain_mode_t> aidl2legacy_AudioGainMode_audio_gain_mode_t(
- AudioGainMode aidl) {
- switch (aidl) {
- case AudioGainMode::JOINT:
- return AUDIO_GAIN_MODE_JOINT;
- case AudioGainMode::CHANNELS:
- return AUDIO_GAIN_MODE_CHANNELS;
- case AudioGainMode::RAMP:
- return AUDIO_GAIN_MODE_RAMP;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<AudioGainMode> legacy2aidl_audio_gain_mode_t_AudioGainMode(
- audio_gain_mode_t legacy) {
- switch (legacy) {
- case AUDIO_GAIN_MODE_JOINT:
- return AudioGainMode::JOINT;
- case AUDIO_GAIN_MODE_CHANNELS:
- return AudioGainMode::CHANNELS;
- case AUDIO_GAIN_MODE_RAMP:
- return AudioGainMode::RAMP;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl) {
- return convertBitmask<audio_gain_mode_t, int32_t, audio_gain_mode_t, AudioGainMode>(
- aidl, aidl2legacy_AudioGainMode_audio_gain_mode_t,
- // AudioGainMode is index-based.
- indexToEnum_index<AudioGainMode>,
- // AUDIO_GAIN_MODE_* constants are mask-based.
- enumToMask_bitmask<audio_gain_mode_t, audio_gain_mode_t>);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy) {
- return convertBitmask<int32_t, audio_gain_mode_t, AudioGainMode, audio_gain_mode_t>(
- legacy, legacy2aidl_audio_gain_mode_t_AudioGainMode,
- // AUDIO_GAIN_MODE_* constants are mask-based.
- indexToEnum_bitmask<audio_gain_mode_t>,
- // AudioGainMode is index-based.
- enumToMask_index<int32_t, AudioGainMode>);
-}
-
-ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
- const AudioGainConfig& aidl, bool isInput) {
- audio_gain_config legacy;
- legacy.index = VALUE_OR_RETURN(convertIntegral<int>(aidl.index));
- legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
- legacy.channel_mask = VALUE_OR_RETURN(
- aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
- const bool isJoint = bitmaskIsSet(aidl.mode, AudioGainMode::JOINT);
- size_t numValues = isJoint ? 1
- : isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
- : audio_channel_count_from_out_mask(legacy.channel_mask);
- if (aidl.values.size() != numValues || aidl.values.size() > std::size(legacy.values)) {
- return unexpected(BAD_VALUE);
- }
- for (size_t i = 0; i < numValues; ++i) {
- legacy.values[i] = VALUE_OR_RETURN(convertIntegral<int>(aidl.values[i]));
- }
- legacy.ramp_duration_ms = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.rampDurationMs));
- return legacy;
-}
-
-ConversionResult<AudioGainConfig> legacy2aidl_audio_gain_config_AudioGainConfig(
- const audio_gain_config& legacy, bool isInput) {
- AudioGainConfig aidl;
- aidl.index = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.index));
- aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
- aidl.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
- const bool isJoint = (legacy.mode & AUDIO_GAIN_MODE_JOINT) != 0;
- size_t numValues = isJoint ? 1
- : isInput ? audio_channel_count_from_in_mask(legacy.channel_mask)
- : audio_channel_count_from_out_mask(legacy.channel_mask);
- aidl.values.resize(numValues);
- for (size_t i = 0; i < numValues; ++i) {
- aidl.values[i] = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.values[i]));
- }
- aidl.rampDurationMs = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.ramp_duration_ms));
- return aidl;
-}
-
-ConversionResult<audio_input_flags_t> aidl2legacy_AudioInputFlags_audio_input_flags_t(
- AudioInputFlags aidl) {
- switch (aidl) {
- case AudioInputFlags::FAST:
- return AUDIO_INPUT_FLAG_FAST;
- case AudioInputFlags::HW_HOTWORD:
- return AUDIO_INPUT_FLAG_HW_HOTWORD;
- case AudioInputFlags::RAW:
- return AUDIO_INPUT_FLAG_RAW;
- case AudioInputFlags::SYNC:
- return AUDIO_INPUT_FLAG_SYNC;
- case AudioInputFlags::MMAP_NOIRQ:
- return AUDIO_INPUT_FLAG_MMAP_NOIRQ;
- case AudioInputFlags::VOIP_TX:
- return AUDIO_INPUT_FLAG_VOIP_TX;
- case AudioInputFlags::HW_AV_SYNC:
- return AUDIO_INPUT_FLAG_HW_AV_SYNC;
- case AudioInputFlags::DIRECT:
- return AUDIO_INPUT_FLAG_DIRECT;
- case AudioInputFlags::ULTRASOUND:
- return AUDIO_INPUT_FLAG_ULTRASOUND;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<AudioInputFlags> legacy2aidl_audio_input_flags_t_AudioInputFlags(
- audio_input_flags_t legacy) {
- switch (legacy) {
- case AUDIO_INPUT_FLAG_NONE:
- break; // shouldn't get here. must be listed -Werror,-Wswitch
- case AUDIO_INPUT_FLAG_FAST:
- return AudioInputFlags::FAST;
- case AUDIO_INPUT_FLAG_HW_HOTWORD:
- return AudioInputFlags::HW_HOTWORD;
- case AUDIO_INPUT_FLAG_RAW:
- return AudioInputFlags::RAW;
- case AUDIO_INPUT_FLAG_SYNC:
- return AudioInputFlags::SYNC;
- case AUDIO_INPUT_FLAG_MMAP_NOIRQ:
- return AudioInputFlags::MMAP_NOIRQ;
- case AUDIO_INPUT_FLAG_VOIP_TX:
- return AudioInputFlags::VOIP_TX;
- case AUDIO_INPUT_FLAG_HW_AV_SYNC:
- return AudioInputFlags::HW_AV_SYNC;
- case AUDIO_INPUT_FLAG_DIRECT:
- return AudioInputFlags::DIRECT;
- case AUDIO_INPUT_FLAG_ULTRASOUND:
- return AudioInputFlags::ULTRASOUND;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<audio_output_flags_t> aidl2legacy_AudioOutputFlags_audio_output_flags_t(
- AudioOutputFlags aidl) {
- switch (aidl) {
- case AudioOutputFlags::DIRECT:
- return AUDIO_OUTPUT_FLAG_DIRECT;
- case AudioOutputFlags::PRIMARY:
- return AUDIO_OUTPUT_FLAG_PRIMARY;
- case AudioOutputFlags::FAST:
- return AUDIO_OUTPUT_FLAG_FAST;
- case AudioOutputFlags::DEEP_BUFFER:
- return AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
- case AudioOutputFlags::COMPRESS_OFFLOAD:
- return AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
- case AudioOutputFlags::NON_BLOCKING:
- return AUDIO_OUTPUT_FLAG_NON_BLOCKING;
- case AudioOutputFlags::HW_AV_SYNC:
- return AUDIO_OUTPUT_FLAG_HW_AV_SYNC;
- case AudioOutputFlags::TTS:
- return AUDIO_OUTPUT_FLAG_TTS;
- case AudioOutputFlags::RAW:
- return AUDIO_OUTPUT_FLAG_RAW;
- case AudioOutputFlags::SYNC:
- return AUDIO_OUTPUT_FLAG_SYNC;
- case AudioOutputFlags::IEC958_NONAUDIO:
- return AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
- case AudioOutputFlags::DIRECT_PCM:
- return AUDIO_OUTPUT_FLAG_DIRECT_PCM;
- case AudioOutputFlags::MMAP_NOIRQ:
- return AUDIO_OUTPUT_FLAG_MMAP_NOIRQ;
- case AudioOutputFlags::VOIP_RX:
- return AUDIO_OUTPUT_FLAG_VOIP_RX;
- case AudioOutputFlags::INCALL_MUSIC:
- return AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
- case AudioOutputFlags::GAPLESS_OFFLOAD:
- return AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD;
- case AudioOutputFlags::ULTRASOUND:
- return AUDIO_OUTPUT_FLAG_ULTRASOUND;
- case AudioOutputFlags::SPATIALIZER:
- return AUDIO_OUTPUT_FLAG_SPATIALIZER;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<AudioOutputFlags> legacy2aidl_audio_output_flags_t_AudioOutputFlags(
- audio_output_flags_t legacy) {
- switch (legacy) {
- case AUDIO_OUTPUT_FLAG_NONE:
- break; // shouldn't get here. must be listed -Werror,-Wswitch
- case AUDIO_OUTPUT_FLAG_DIRECT:
- return AudioOutputFlags::DIRECT;
- case AUDIO_OUTPUT_FLAG_PRIMARY:
- return AudioOutputFlags::PRIMARY;
- case AUDIO_OUTPUT_FLAG_FAST:
- return AudioOutputFlags::FAST;
- case AUDIO_OUTPUT_FLAG_DEEP_BUFFER:
- return AudioOutputFlags::DEEP_BUFFER;
- case AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD:
- return AudioOutputFlags::COMPRESS_OFFLOAD;
- case AUDIO_OUTPUT_FLAG_NON_BLOCKING:
- return AudioOutputFlags::NON_BLOCKING;
- case AUDIO_OUTPUT_FLAG_HW_AV_SYNC:
- return AudioOutputFlags::HW_AV_SYNC;
- case AUDIO_OUTPUT_FLAG_TTS:
- return AudioOutputFlags::TTS;
- case AUDIO_OUTPUT_FLAG_RAW:
- return AudioOutputFlags::RAW;
- case AUDIO_OUTPUT_FLAG_SYNC:
- return AudioOutputFlags::SYNC;
- case AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO:
- return AudioOutputFlags::IEC958_NONAUDIO;
- case AUDIO_OUTPUT_FLAG_DIRECT_PCM:
- return AudioOutputFlags::DIRECT_PCM;
- case AUDIO_OUTPUT_FLAG_MMAP_NOIRQ:
- return AudioOutputFlags::MMAP_NOIRQ;
- case AUDIO_OUTPUT_FLAG_VOIP_RX:
- return AudioOutputFlags::VOIP_RX;
- case AUDIO_OUTPUT_FLAG_INCALL_MUSIC:
- return AudioOutputFlags::INCALL_MUSIC;
- case AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD:
- return AudioOutputFlags::GAPLESS_OFFLOAD;
- case AUDIO_OUTPUT_FLAG_ULTRASOUND:
- return AudioOutputFlags::ULTRASOUND;
- case AUDIO_OUTPUT_FLAG_SPATIALIZER:
- return AudioOutputFlags::SPATIALIZER;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<audio_input_flags_t> aidl2legacy_int32_t_audio_input_flags_t_mask(
- int32_t aidl) {
- using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
-
- LegacyMask converted = VALUE_OR_RETURN(
- (convertBitmask<LegacyMask, int32_t, audio_input_flags_t, AudioInputFlags>(
- aidl, aidl2legacy_AudioInputFlags_audio_input_flags_t,
- indexToEnum_index<AudioInputFlags>,
- enumToMask_bitmask<LegacyMask, audio_input_flags_t>)));
- return static_cast<audio_input_flags_t>(converted);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_input_flags_t_int32_t_mask(
- audio_input_flags_t legacy) {
- using LegacyMask = std::underlying_type_t<audio_input_flags_t>;
-
- LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
- return convertBitmask<int32_t, LegacyMask, AudioInputFlags, audio_input_flags_t>(
- legacyMask, legacy2aidl_audio_input_flags_t_AudioInputFlags,
- indexToEnum_bitmask<audio_input_flags_t>,
- enumToMask_index<int32_t, AudioInputFlags>);
-}
-
-ConversionResult<audio_output_flags_t> aidl2legacy_int32_t_audio_output_flags_t_mask(
- int32_t aidl) {
- return convertBitmask<audio_output_flags_t,
- int32_t,
- audio_output_flags_t,
- AudioOutputFlags>(
- aidl, aidl2legacy_AudioOutputFlags_audio_output_flags_t,
- indexToEnum_index<AudioOutputFlags>,
- enumToMask_bitmask<audio_output_flags_t, audio_output_flags_t>);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_output_flags_t_int32_t_mask(
- audio_output_flags_t legacy) {
- using LegacyMask = std::underlying_type_t<audio_output_flags_t>;
-
- LegacyMask legacyMask = static_cast<LegacyMask>(legacy);
- return convertBitmask<int32_t, LegacyMask, AudioOutputFlags, audio_output_flags_t>(
- legacyMask, legacy2aidl_audio_output_flags_t_AudioOutputFlags,
- indexToEnum_bitmask<audio_output_flags_t>,
- enumToMask_index<int32_t, AudioOutputFlags>);
-}
-
-ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
- const AudioIoFlags& aidl, bool isInput) {
- audio_io_flags legacy;
- if (isInput) {
- legacy.input = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_input_flags_t_mask(
- VALUE_OR_RETURN(UNION_GET(aidl, input))));
- } else {
- legacy.output = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_output_flags_t_mask(
- VALUE_OR_RETURN(UNION_GET(aidl, output))));
- }
- return legacy;
-}
-
-ConversionResult<AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
- const audio_io_flags& legacy, bool isInput) {
- AudioIoFlags aidl;
- if (isInput) {
- UNION_SET(aidl, input,
- VALUE_OR_RETURN(legacy2aidl_audio_input_flags_t_int32_t_mask(legacy.input)));
- } else {
- UNION_SET(aidl, output,
- VALUE_OR_RETURN(legacy2aidl_audio_output_flags_t_int32_t_mask(legacy.output)));
- }
- return aidl;
-}
-
-ConversionResult<audio_port_config_device_ext>
-aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
- const AudioPortDeviceExt& aidl, const media::AudioPortDeviceExtSys& aidlDeviceExt) {
- audio_port_config_device_ext legacy;
- legacy.hw_module = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_module_handle_t(aidlDeviceExt.hwModule));
- RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
- aidl.device, &legacy.type, legacy.address));
- return legacy;
-}
-
-status_t legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
- const audio_port_config_device_ext& legacy,
- AudioPortDeviceExt* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
- aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidl->device = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
- return OK;
-}
-
-ConversionResult<audio_stream_type_t> aidl2legacy_AudioStreamType_audio_stream_type_t(
- AudioStreamType aidl) {
- switch (aidl) {
- case AudioStreamType::INVALID:
- break; // return error
- case AudioStreamType::SYS_RESERVED_DEFAULT:
- return AUDIO_STREAM_DEFAULT;
- case AudioStreamType::VOICE_CALL:
- return AUDIO_STREAM_VOICE_CALL;
- case AudioStreamType::SYSTEM:
- return AUDIO_STREAM_SYSTEM;
- case AudioStreamType::RING:
- return AUDIO_STREAM_RING;
- case AudioStreamType::MUSIC:
- return AUDIO_STREAM_MUSIC;
- case AudioStreamType::ALARM:
- return AUDIO_STREAM_ALARM;
- case AudioStreamType::NOTIFICATION:
- return AUDIO_STREAM_NOTIFICATION;
- case AudioStreamType::BLUETOOTH_SCO:
- return AUDIO_STREAM_BLUETOOTH_SCO;
- case AudioStreamType::ENFORCED_AUDIBLE:
- return AUDIO_STREAM_ENFORCED_AUDIBLE;
- case AudioStreamType::DTMF:
- return AUDIO_STREAM_DTMF;
- case AudioStreamType::TTS:
- return AUDIO_STREAM_TTS;
- case AudioStreamType::ACCESSIBILITY:
- return AUDIO_STREAM_ACCESSIBILITY;
- case AudioStreamType::ASSISTANT:
- return AUDIO_STREAM_ASSISTANT;
- case AudioStreamType::SYS_RESERVED_REROUTING:
- return AUDIO_STREAM_REROUTING;
- case AudioStreamType::SYS_RESERVED_PATCH:
- return AUDIO_STREAM_PATCH;
- case AudioStreamType::CALL_ASSISTANT:
- return AUDIO_STREAM_CALL_ASSISTANT;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<AudioStreamType> legacy2aidl_audio_stream_type_t_AudioStreamType(
- audio_stream_type_t legacy) {
- switch (legacy) {
- case AUDIO_STREAM_DEFAULT:
- return AudioStreamType::SYS_RESERVED_DEFAULT;
- case AUDIO_STREAM_VOICE_CALL:
- return AudioStreamType::VOICE_CALL;
- case AUDIO_STREAM_SYSTEM:
- return AudioStreamType::SYSTEM;
- case AUDIO_STREAM_RING:
- return AudioStreamType::RING;
- case AUDIO_STREAM_MUSIC:
- return AudioStreamType::MUSIC;
- case AUDIO_STREAM_ALARM:
- return AudioStreamType::ALARM;
- case AUDIO_STREAM_NOTIFICATION:
- return AudioStreamType::NOTIFICATION;
- case AUDIO_STREAM_BLUETOOTH_SCO:
- return AudioStreamType::BLUETOOTH_SCO;
- case AUDIO_STREAM_ENFORCED_AUDIBLE:
- return AudioStreamType::ENFORCED_AUDIBLE;
- case AUDIO_STREAM_DTMF:
- return AudioStreamType::DTMF;
- case AUDIO_STREAM_TTS:
- return AudioStreamType::TTS;
- case AUDIO_STREAM_ACCESSIBILITY:
- return AudioStreamType::ACCESSIBILITY;
- case AUDIO_STREAM_ASSISTANT:
- return AudioStreamType::ASSISTANT;
- case AUDIO_STREAM_REROUTING:
- return AudioStreamType::SYS_RESERVED_REROUTING;
- case AUDIO_STREAM_PATCH:
- return AudioStreamType::SYS_RESERVED_PATCH;
- case AUDIO_STREAM_CALL_ASSISTANT:
- return AudioStreamType::CALL_ASSISTANT;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
- AudioSource aidl) {
- switch (aidl) {
- case AudioSource::SYS_RESERVED_INVALID:
- return AUDIO_SOURCE_INVALID;
- case AudioSource::DEFAULT:
- return AUDIO_SOURCE_DEFAULT;
- case AudioSource::MIC:
- return AUDIO_SOURCE_MIC;
- case AudioSource::VOICE_UPLINK:
- return AUDIO_SOURCE_VOICE_UPLINK;
- case AudioSource::VOICE_DOWNLINK:
- return AUDIO_SOURCE_VOICE_DOWNLINK;
- case AudioSource::VOICE_CALL:
- return AUDIO_SOURCE_VOICE_CALL;
- case AudioSource::CAMCORDER:
- return AUDIO_SOURCE_CAMCORDER;
- case AudioSource::VOICE_RECOGNITION:
- return AUDIO_SOURCE_VOICE_RECOGNITION;
- case AudioSource::VOICE_COMMUNICATION:
- return AUDIO_SOURCE_VOICE_COMMUNICATION;
- case AudioSource::REMOTE_SUBMIX:
- return AUDIO_SOURCE_REMOTE_SUBMIX;
- case AudioSource::UNPROCESSED:
- return AUDIO_SOURCE_UNPROCESSED;
- case AudioSource::VOICE_PERFORMANCE:
- return AUDIO_SOURCE_VOICE_PERFORMANCE;
- case AudioSource::ULTRASOUND:
- return AUDIO_SOURCE_ULTRASOUND;
- case AudioSource::ECHO_REFERENCE:
- return AUDIO_SOURCE_ECHO_REFERENCE;
- case AudioSource::FM_TUNER:
- return AUDIO_SOURCE_FM_TUNER;
- case AudioSource::HOTWORD:
- return AUDIO_SOURCE_HOTWORD;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<AudioSource> legacy2aidl_audio_source_t_AudioSource(
- audio_source_t legacy) {
- switch (legacy) {
- case AUDIO_SOURCE_INVALID:
- return AudioSource::SYS_RESERVED_INVALID;
- case AUDIO_SOURCE_DEFAULT:
- return AudioSource::DEFAULT;
- case AUDIO_SOURCE_MIC:
- return AudioSource::MIC;
- case AUDIO_SOURCE_VOICE_UPLINK:
- return AudioSource::VOICE_UPLINK;
- case AUDIO_SOURCE_VOICE_DOWNLINK:
- return AudioSource::VOICE_DOWNLINK;
- case AUDIO_SOURCE_VOICE_CALL:
- return AudioSource::VOICE_CALL;
- case AUDIO_SOURCE_CAMCORDER:
- return AudioSource::CAMCORDER;
- case AUDIO_SOURCE_VOICE_RECOGNITION:
- return AudioSource::VOICE_RECOGNITION;
- case AUDIO_SOURCE_VOICE_COMMUNICATION:
- return AudioSource::VOICE_COMMUNICATION;
- case AUDIO_SOURCE_REMOTE_SUBMIX:
- return AudioSource::REMOTE_SUBMIX;
- case AUDIO_SOURCE_UNPROCESSED:
- return AudioSource::UNPROCESSED;
- case AUDIO_SOURCE_VOICE_PERFORMANCE:
- return AudioSource::VOICE_PERFORMANCE;
- case AUDIO_SOURCE_ULTRASOUND:
- return AudioSource::ULTRASOUND;
- case AUDIO_SOURCE_ECHO_REFERENCE:
- return AudioSource::ECHO_REFERENCE;
- case AUDIO_SOURCE_FM_TUNER:
- return AudioSource::FM_TUNER;
- case AUDIO_SOURCE_HOTWORD:
- return AudioSource::HOTWORD;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<audio_session_t> aidl2legacy_int32_t_audio_session_t(int32_t aidl) {
- return convertReinterpret<audio_session_t>(aidl);
-}
-
-ConversionResult<int32_t> legacy2aidl_audio_session_t_int32_t(audio_session_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
// This type is unnamed in the original definition, thus we name it here.
using audio_port_config_mix_ext_usecase = decltype(audio_port_config_mix_ext::usecase);
@@ -1915,6 +326,27 @@
return legacy2aidl_audio_session_t_int32_t(legacy.session);
}
+ConversionResult<audio_port_config_device_ext>
+aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+ const AudioPortDeviceExt& aidl, const media::AudioPortDeviceExtSys& aidlDeviceExt) {
+ audio_port_config_device_ext legacy;
+ legacy.hw_module = VALUE_OR_RETURN(
+ aidl2legacy_int32_t_audio_module_handle_t(aidlDeviceExt.hwModule));
+ RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+ aidl.device, &legacy.type, legacy.address));
+ return legacy;
+}
+
+status_t legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+ const audio_port_config_device_ext& legacy,
+ AudioPortDeviceExt* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
+ aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+ aidl->device = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+ return OK;
+}
+
// This type is unnamed in the original definition, thus we name it here.
using audio_port_config_ext = decltype(audio_port_config::ext);
@@ -2159,148 +591,6 @@
return aidl;
}
-ConversionResult<audio_content_type_t>
-aidl2legacy_AudioContentType_audio_content_type_t(AudioContentType aidl) {
- switch (aidl) {
- case AudioContentType::UNKNOWN:
- return AUDIO_CONTENT_TYPE_UNKNOWN;
- case AudioContentType::SPEECH:
- return AUDIO_CONTENT_TYPE_SPEECH;
- case AudioContentType::MUSIC:
- return AUDIO_CONTENT_TYPE_MUSIC;
- case AudioContentType::MOVIE:
- return AUDIO_CONTENT_TYPE_MOVIE;
- case AudioContentType::SONIFICATION:
- return AUDIO_CONTENT_TYPE_SONIFICATION;
- case AudioContentType::ULTRASOUND:
- return AUDIO_CONTENT_TYPE_ULTRASOUND;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<AudioContentType>
-legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy) {
- switch (legacy) {
- case AUDIO_CONTENT_TYPE_UNKNOWN:
- return AudioContentType::UNKNOWN;
- case AUDIO_CONTENT_TYPE_SPEECH:
- return AudioContentType::SPEECH;
- case AUDIO_CONTENT_TYPE_MUSIC:
- return AudioContentType::MUSIC;
- case AUDIO_CONTENT_TYPE_MOVIE:
- return AudioContentType::MOVIE;
- case AUDIO_CONTENT_TYPE_SONIFICATION:
- return AudioContentType::SONIFICATION;
- case AUDIO_CONTENT_TYPE_ULTRASOUND:
- return AudioContentType::ULTRASOUND;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<audio_usage_t>
-aidl2legacy_AudioUsage_audio_usage_t(AudioUsage aidl) {
- switch (aidl) {
- case AudioUsage::INVALID:
- break; // return error
- case AudioUsage::UNKNOWN:
- return AUDIO_USAGE_UNKNOWN;
- case AudioUsage::MEDIA:
- return AUDIO_USAGE_MEDIA;
- case AudioUsage::VOICE_COMMUNICATION:
- return AUDIO_USAGE_VOICE_COMMUNICATION;
- case AudioUsage::VOICE_COMMUNICATION_SIGNALLING:
- return AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
- case AudioUsage::ALARM:
- return AUDIO_USAGE_ALARM;
- case AudioUsage::NOTIFICATION:
- return AUDIO_USAGE_NOTIFICATION;
- case AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE:
- return AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
- case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST:
- return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST;
- case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT:
- return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT;
- case AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED:
- return AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED;
- case AudioUsage::NOTIFICATION_EVENT:
- return AUDIO_USAGE_NOTIFICATION_EVENT;
- case AudioUsage::ASSISTANCE_ACCESSIBILITY:
- return AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
- case AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE:
- return AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE;
- case AudioUsage::ASSISTANCE_SONIFICATION:
- return AUDIO_USAGE_ASSISTANCE_SONIFICATION;
- case AudioUsage::GAME:
- return AUDIO_USAGE_GAME;
- case AudioUsage::VIRTUAL_SOURCE:
- return AUDIO_USAGE_VIRTUAL_SOURCE;
- case AudioUsage::ASSISTANT:
- return AUDIO_USAGE_ASSISTANT;
- case AudioUsage::CALL_ASSISTANT:
- return AUDIO_USAGE_CALL_ASSISTANT;
- case AudioUsage::EMERGENCY:
- return AUDIO_USAGE_EMERGENCY;
- case AudioUsage::SAFETY:
- return AUDIO_USAGE_SAFETY;
- case AudioUsage::VEHICLE_STATUS:
- return AUDIO_USAGE_VEHICLE_STATUS;
- case AudioUsage::ANNOUNCEMENT:
- return AUDIO_USAGE_ANNOUNCEMENT;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<AudioUsage>
-legacy2aidl_audio_usage_t_AudioUsage(audio_usage_t legacy) {
- switch (legacy) {
- case AUDIO_USAGE_UNKNOWN:
- return AudioUsage::UNKNOWN;
- case AUDIO_USAGE_MEDIA:
- return AudioUsage::MEDIA;
- case AUDIO_USAGE_VOICE_COMMUNICATION:
- return AudioUsage::VOICE_COMMUNICATION;
- case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
- return AudioUsage::VOICE_COMMUNICATION_SIGNALLING;
- case AUDIO_USAGE_ALARM:
- return AudioUsage::ALARM;
- case AUDIO_USAGE_NOTIFICATION:
- return AudioUsage::NOTIFICATION;
- case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
- return AudioUsage::NOTIFICATION_TELEPHONY_RINGTONE;
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
- return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_REQUEST;
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
- return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_INSTANT;
- case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
- return AudioUsage::SYS_RESERVED_NOTIFICATION_COMMUNICATION_DELAYED;
- case AUDIO_USAGE_NOTIFICATION_EVENT:
- return AudioUsage::NOTIFICATION_EVENT;
- case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
- return AudioUsage::ASSISTANCE_ACCESSIBILITY;
- case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
- return AudioUsage::ASSISTANCE_NAVIGATION_GUIDANCE;
- case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
- return AudioUsage::ASSISTANCE_SONIFICATION;
- case AUDIO_USAGE_GAME:
- return AudioUsage::GAME;
- case AUDIO_USAGE_VIRTUAL_SOURCE:
- return AudioUsage::VIRTUAL_SOURCE;
- case AUDIO_USAGE_ASSISTANT:
- return AudioUsage::ASSISTANT;
- case AUDIO_USAGE_CALL_ASSISTANT:
- return AudioUsage::CALL_ASSISTANT;
- case AUDIO_USAGE_EMERGENCY:
- return AudioUsage::EMERGENCY;
- case AUDIO_USAGE_SAFETY:
- return AudioUsage::SAFETY;
- case AUDIO_USAGE_VEHICLE_STATUS:
- return AudioUsage::VEHICLE_STATUS;
- case AUDIO_USAGE_ANNOUNCEMENT:
- return AudioUsage::ANNOUNCEMENT;
- }
- return unexpected(BAD_VALUE);
-}
-
ConversionResult<audio_flags_mask_t>
aidl2legacy_AudioFlag_audio_flags_mask_t(media::AudioFlag aidl) {
switch (aidl) {
@@ -2424,140 +714,6 @@
return aidl;
}
-ConversionResult<audio_encapsulation_mode_t>
-aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(AudioEncapsulationMode aidl) {
- switch (aidl) {
- case AudioEncapsulationMode::INVALID:
- break; // return error
- case AudioEncapsulationMode::NONE:
- return AUDIO_ENCAPSULATION_MODE_NONE;
- case AudioEncapsulationMode::ELEMENTARY_STREAM:
- return AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM;
- case AudioEncapsulationMode::HANDLE:
- return AUDIO_ENCAPSULATION_MODE_HANDLE;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<AudioEncapsulationMode>
-legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy) {
- switch (legacy) {
- case AUDIO_ENCAPSULATION_MODE_NONE:
- return AudioEncapsulationMode::NONE;
- case AUDIO_ENCAPSULATION_MODE_ELEMENTARY_STREAM:
- return AudioEncapsulationMode::ELEMENTARY_STREAM;
- case AUDIO_ENCAPSULATION_MODE_HANDLE:
- return AudioEncapsulationMode::HANDLE;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<audio_offload_info_t>
-aidl2legacy_AudioOffloadInfo_audio_offload_info_t(const AudioOffloadInfo& aidl) {
- audio_offload_info_t legacy = AUDIO_INFO_INITIALIZER;
- audio_config_base_t base = VALUE_OR_RETURN(
- aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, false /*isInput*/));
- legacy.sample_rate = base.sample_rate;
- legacy.channel_mask = base.channel_mask;
- legacy.format = base.format;
- legacy.stream_type = VALUE_OR_RETURN(
- aidl2legacy_AudioStreamType_audio_stream_type_t(aidl.streamType));
- legacy.bit_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.bitRatePerSecond));
- legacy.duration_us = VALUE_OR_RETURN(convertIntegral<int64_t>(aidl.durationUs));
- legacy.has_video = aidl.hasVideo;
- legacy.is_streaming = aidl.isStreaming;
- legacy.bit_width = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.bitWidth));
- legacy.offload_buffer_size = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.offloadBufferSize));
- legacy.usage = VALUE_OR_RETURN(aidl2legacy_AudioUsage_audio_usage_t(aidl.usage));
- legacy.encapsulation_mode = VALUE_OR_RETURN(
- aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(aidl.encapsulationMode));
- legacy.content_id = VALUE_OR_RETURN(convertReinterpret<int32_t>(aidl.contentId));
- legacy.sync_id = VALUE_OR_RETURN(convertReinterpret<int32_t>(aidl.syncId));
- return legacy;
-}
-
-ConversionResult<AudioOffloadInfo>
-legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy) {
- AudioOffloadInfo aidl;
- // Version 0.1 fields.
- if (legacy.size < offsetof(audio_offload_info_t, usage) + sizeof(audio_offload_info_t::usage)) {
- return unexpected(BAD_VALUE);
- }
- const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
- .channel_mask = legacy.channel_mask, .format = legacy.format };
- aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(
- base, false /*isInput*/));
- aidl.streamType = VALUE_OR_RETURN(
- legacy2aidl_audio_stream_type_t_AudioStreamType(legacy.stream_type));
- aidl.bitRatePerSecond = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_rate));
- aidl.durationUs = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.duration_us));
- aidl.hasVideo = legacy.has_video;
- aidl.isStreaming = legacy.is_streaming;
- aidl.bitWidth = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.bit_width));
- aidl.offloadBufferSize = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.offload_buffer_size));
- aidl.usage = VALUE_OR_RETURN(legacy2aidl_audio_usage_t_AudioUsage(legacy.usage));
-
- // Version 0.2 fields.
- if (legacy.version >= AUDIO_OFFLOAD_INFO_VERSION_0_2) {
- if (legacy.size <
- offsetof(audio_offload_info_t, sync_id) + sizeof(audio_offload_info_t::sync_id)) {
- return unexpected(BAD_VALUE);
- }
- aidl.encapsulationMode = VALUE_OR_RETURN(
- legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(
- legacy.encapsulation_mode));
- aidl.contentId = VALUE_OR_RETURN(convertReinterpret<int32_t>(legacy.content_id));
- aidl.syncId = VALUE_OR_RETURN(convertReinterpret<int32_t>(legacy.sync_id));
- }
- return aidl;
-}
-
-ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const AudioConfig& aidl, bool isInput) {
- const audio_config_base_t legacyBase = VALUE_OR_RETURN(
- aidl2legacy_AudioConfigBase_audio_config_base_t(aidl.base, isInput));
- audio_config_t legacy = AUDIO_CONFIG_INITIALIZER;
- legacy.sample_rate = legacyBase.sample_rate;
- legacy.channel_mask = legacyBase.channel_mask;
- legacy.format = legacyBase.format;
- legacy.offload_info = VALUE_OR_RETURN(
- aidl2legacy_AudioOffloadInfo_audio_offload_info_t(aidl.offloadInfo));
- legacy.frame_count = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.frameCount));
- return legacy;
-}
-
-ConversionResult<AudioConfig>
-legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput) {
- const audio_config_base_t base = { .sample_rate = legacy.sample_rate,
- .channel_mask = legacy.channel_mask, .format = legacy.format };
- AudioConfig aidl;
- aidl.base = VALUE_OR_RETURN(legacy2aidl_audio_config_base_t_AudioConfigBase(base, isInput));
- aidl.offloadInfo = VALUE_OR_RETURN(
- legacy2aidl_audio_offload_info_t_AudioOffloadInfo(legacy.offload_info));
- aidl.frameCount = VALUE_OR_RETURN(convertIntegral<int64_t>(legacy.frame_count));
- return aidl;
-}
-
-ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(const AudioConfigBase& aidl, bool isInput) {
- audio_config_base_t legacy;
- legacy.sample_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.sampleRate));
- legacy.channel_mask = VALUE_OR_RETURN(
- aidl2legacy_AudioChannelLayout_audio_channel_mask_t(aidl.channelMask, isInput));
- legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
- return legacy;
-}
-
-ConversionResult<AudioConfigBase>
-legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput) {
- AudioConfigBase aidl;
- aidl.sampleRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
- aidl.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
- return aidl;
-}
-
ConversionResult<sp<IMemory>>
aidl2legacy_SharedFileRegion_IMemory(const media::SharedFileRegion& aidl) {
sp<IMemory> legacy;
@@ -2576,8 +732,8 @@
return aidl;
}
-ConversionResult<sp<IMemory>>
-aidl2legacy_NullableSharedFileRegion_IMemory(const std::optional<media::SharedFileRegion>& aidl) {
+ConversionResult<sp<IMemory>> aidl2legacy_NullableSharedFileRegion_IMemory(
+ const std::optional<media::SharedFileRegion>& aidl) {
sp<IMemory> legacy;
if (!convertNullableSharedFileRegionToIMemory(aidl, &legacy)) {
return unexpected(BAD_VALUE);
@@ -2612,31 +768,6 @@
return aidl;
}
-ConversionResult<audio_uuid_t>
-aidl2legacy_AudioUuid_audio_uuid_t(const AudioUuid& aidl) {
- audio_uuid_t legacy;
- legacy.timeLow = VALUE_OR_RETURN(convertReinterpret<uint32_t>(aidl.timeLow));
- legacy.timeMid = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.timeMid));
- legacy.timeHiAndVersion = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.timeHiAndVersion));
- legacy.clockSeq = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.clockSeq));
- if (aidl.node.size() != std::size(legacy.node)) {
- return unexpected(BAD_VALUE);
- }
- std::copy(aidl.node.begin(), aidl.node.end(), legacy.node);
- return legacy;
-}
-
-ConversionResult<AudioUuid>
-legacy2aidl_audio_uuid_t_AudioUuid(const audio_uuid_t& legacy) {
- AudioUuid aidl;
- aidl.timeLow = VALUE_OR_RETURN(convertReinterpret<int32_t>(legacy.timeLow));
- aidl.timeMid = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeMid));
- aidl.timeHiAndVersion = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.timeHiAndVersion));
- aidl.clockSeq = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.clockSeq));
- std::copy(legacy.node, legacy.node + std::size(legacy.node), std::back_inserter(aidl.node));
- return aidl;
-}
-
ConversionResult<effect_descriptor_t>
aidl2legacy_EffectDescriptor_effect_descriptor_t(const media::EffectDescriptor& aidl) {
effect_descriptor_t legacy;
@@ -2667,76 +798,34 @@
return aidl;
}
-ConversionResult<audio_encapsulation_metadata_type_t>
-aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
- AudioEncapsulationMetadataType aidl) {
- switch (aidl) {
- case AudioEncapsulationMetadataType::NONE:
- return AUDIO_ENCAPSULATION_METADATA_TYPE_NONE;
- case AudioEncapsulationMetadataType::FRAMEWORK_TUNER:
- return AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER;
- case AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR:
- return AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR;
- }
- return unexpected(BAD_VALUE);
+ConversionResult<audio_port_mix_ext>
+aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+ const AudioPortMixExt& aidl, const media::AudioPortMixExtSys& aidlSys) {
+ audio_port_mix_ext legacy{};
+ legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
+ legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
+ return legacy;
}
-ConversionResult<AudioEncapsulationMetadataType>
-legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
- audio_encapsulation_metadata_type_t legacy) {
- switch (legacy) {
- case AUDIO_ENCAPSULATION_METADATA_TYPE_NONE:
- return AudioEncapsulationMetadataType::NONE;
- case AUDIO_ENCAPSULATION_METADATA_TYPE_FRAMEWORK_TUNER:
- return AudioEncapsulationMetadataType::FRAMEWORK_TUNER;
- case AUDIO_ENCAPSULATION_METADATA_TYPE_DVB_AD_DESCRIPTOR:
- return AudioEncapsulationMetadataType::DVB_AD_DESCRIPTOR;
- }
- return unexpected(BAD_VALUE);
+status_t
+legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy,
+ AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
+ aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
+ aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+ return OK;
}
-ConversionResult<uint32_t>
-aidl2legacy_AudioEncapsulationMode_mask(int32_t aidl) {
- return convertBitmask<uint32_t,
- int32_t,
- audio_encapsulation_mode_t,
- AudioEncapsulationMode>(
- aidl, aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t,
- indexToEnum_index<AudioEncapsulationMode>,
- enumToMask_index<uint32_t, audio_encapsulation_mode_t>);
+ConversionResult<audio_port_session_ext>
+aidl2legacy_int32_t_audio_port_session_ext(int32_t aidl) {
+ audio_port_session_ext legacy;
+ legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl));
+ return legacy;
}
ConversionResult<int32_t>
-legacy2aidl_AudioEncapsulationMode_mask(uint32_t legacy) {
- return convertBitmask<int32_t,
- uint32_t,
- AudioEncapsulationMode,
- audio_encapsulation_mode_t>(
- legacy, legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode,
- indexToEnum_index<audio_encapsulation_mode_t>,
- enumToMask_index<int32_t, AudioEncapsulationMode>);
-}
-
-ConversionResult<uint32_t>
-aidl2legacy_AudioEncapsulationMetadataType_mask(int32_t aidl) {
- return convertBitmask<uint32_t,
- int32_t,
- audio_encapsulation_metadata_type_t,
- AudioEncapsulationMetadataType>(
- aidl, aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t,
- indexToEnum_index<AudioEncapsulationMetadataType>,
- enumToMask_index<uint32_t, audio_encapsulation_metadata_type_t>);
-}
-
-ConversionResult<int32_t>
-legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy) {
- return convertBitmask<int32_t,
- uint32_t,
- AudioEncapsulationMetadataType,
- audio_encapsulation_metadata_type_t>(
- legacy, legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType,
- indexToEnum_index<audio_encapsulation_metadata_type_t>,
- enumToMask_index<int32_t, AudioEncapsulationMetadataType>);
+legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy) {
+ return legacy2aidl_audio_session_t_int32_t(legacy.session);
}
ConversionResult<audio_port_device_ext>
@@ -2769,36 +858,6 @@
return OK;
}
-ConversionResult<audio_port_mix_ext>
-aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
- const AudioPortMixExt& aidl, const media::AudioPortMixExtSys& aidlSys) {
- audio_port_mix_ext legacy{};
- legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
- legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
- return legacy;
-}
-
-status_t
-legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy,
- AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
- aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
- return OK;
-}
-
-ConversionResult<audio_port_session_ext>
-aidl2legacy_int32_t_audio_port_session_ext(int32_t aidl) {
- audio_port_session_ext legacy;
- legacy.session = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_session_t(aidl));
- return legacy;
-}
-
-ConversionResult<int32_t>
-legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy) {
- return legacy2aidl_audio_session_t_int32_t(legacy.session);
-}
-
// This type is unnamed in the original definition, thus we name it here.
using audio_port_v7_ext = decltype(audio_port_v7::ext);
@@ -2870,93 +929,6 @@
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
}
-ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(const AudioProfile& aidl, bool isInput) {
- audio_profile legacy;
- legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
-
- if (aidl.sampleRates.size() > std::size(legacy.sample_rates)) {
- return unexpected(BAD_VALUE);
- }
- RETURN_IF_ERROR(
- convertRange(aidl.sampleRates.begin(), aidl.sampleRates.end(), legacy.sample_rates,
- convertIntegral<int32_t, unsigned int>));
- legacy.num_sample_rates = aidl.sampleRates.size();
-
- if (aidl.channelMasks.size() > std::size(legacy.channel_masks)) {
- return unexpected(BAD_VALUE);
- }
- RETURN_IF_ERROR(
- convertRange(aidl.channelMasks.begin(), aidl.channelMasks.end(), legacy.channel_masks,
- [isInput](const AudioChannelLayout& l) {
- return aidl2legacy_AudioChannelLayout_audio_channel_mask_t(l, isInput);
- }));
- legacy.num_channel_masks = aidl.channelMasks.size();
-
- legacy.encapsulation_type = VALUE_OR_RETURN(
- aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(aidl.encapsulationType));
- return legacy;
-}
-
-ConversionResult<AudioProfile>
-legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput) {
- AudioProfile aidl;
- aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
-
- if (legacy.num_sample_rates > std::size(legacy.sample_rates)) {
- return unexpected(BAD_VALUE);
- }
- RETURN_IF_ERROR(
- convertRange(legacy.sample_rates, legacy.sample_rates + legacy.num_sample_rates,
- std::back_inserter(aidl.sampleRates),
- convertIntegral<unsigned int, int32_t>));
-
- if (legacy.num_channel_masks > std::size(legacy.channel_masks)) {
- return unexpected(BAD_VALUE);
- }
- RETURN_IF_ERROR(
- convertRange(legacy.channel_masks, legacy.channel_masks + legacy.num_channel_masks,
- std::back_inserter(aidl.channelMasks),
- [isInput](audio_channel_mask_t m) {
- return legacy2aidl_audio_channel_mask_t_AudioChannelLayout(m, isInput);
- }));
-
- aidl.encapsulationType = VALUE_OR_RETURN(
- legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
- legacy.encapsulation_type));
- return aidl;
-}
-
-ConversionResult<audio_gain>
-aidl2legacy_AudioGain_audio_gain(const AudioGain& aidl, bool isInput) {
- audio_gain legacy;
- legacy.mode = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_gain_mode_t_mask(aidl.mode));
- legacy.channel_mask = VALUE_OR_RETURN(aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
- aidl.channelMask, isInput));
- legacy.min_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.minValue));
- legacy.max_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.maxValue));
- legacy.default_value = VALUE_OR_RETURN(convertIntegral<int>(aidl.defaultValue));
- legacy.step_value = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.stepValue));
- legacy.min_ramp_ms = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.minRampMs));
- legacy.max_ramp_ms = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.maxRampMs));
- return legacy;
-}
-
-ConversionResult<AudioGain>
-legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput) {
- AudioGain aidl;
- aidl.mode = VALUE_OR_RETURN(legacy2aidl_audio_gain_mode_t_int32_t_mask(legacy.mode));
- aidl.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
- aidl.minValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_value));
- aidl.maxValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.max_value));
- aidl.defaultValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.default_value));
- aidl.stepValue = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.step_value));
- aidl.minRampMs = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.min_ramp_ms));
- aidl.maxRampMs = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.max_ramp_ms));
- return aidl;
-}
-
ConversionResult<audio_port_v7>
aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl) {
audio_port_v7 legacy;
@@ -3051,58 +1023,6 @@
return aidl;
}
-ConversionResult<audio_mode_t>
-aidl2legacy_AudioMode_audio_mode_t(AudioMode aidl) {
- switch (aidl) {
- case AudioMode::SYS_RESERVED_INVALID:
- return AUDIO_MODE_INVALID;
- case AudioMode::SYS_RESERVED_CURRENT:
- return AUDIO_MODE_CURRENT;
- case AudioMode::NORMAL:
- return AUDIO_MODE_NORMAL;
- case AudioMode::RINGTONE:
- return AUDIO_MODE_RINGTONE;
- case AudioMode::IN_CALL:
- return AUDIO_MODE_IN_CALL;
- case AudioMode::IN_COMMUNICATION:
- return AUDIO_MODE_IN_COMMUNICATION;
- case AudioMode::CALL_SCREEN:
- return AUDIO_MODE_CALL_SCREEN;
- case AudioMode::SYS_RESERVED_CALL_REDIRECT:
- return AUDIO_MODE_CALL_REDIRECT;
- case AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT:
- return AUDIO_MODE_COMMUNICATION_REDIRECT;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<AudioMode>
-legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy) {
- switch (legacy) {
- case AUDIO_MODE_INVALID:
- return AudioMode::SYS_RESERVED_INVALID;
- case AUDIO_MODE_CURRENT:
- return AudioMode::SYS_RESERVED_CURRENT;
- case AUDIO_MODE_NORMAL:
- return AudioMode::NORMAL;
- case AUDIO_MODE_RINGTONE:
- return AudioMode::RINGTONE;
- case AUDIO_MODE_IN_CALL:
- return AudioMode::IN_CALL;
- case AUDIO_MODE_IN_COMMUNICATION:
- return AudioMode::IN_COMMUNICATION;
- case AUDIO_MODE_CALL_SCREEN:
- return AudioMode::CALL_SCREEN;
- case AUDIO_MODE_CALL_REDIRECT:
- return AudioMode::SYS_RESERVED_CALL_REDIRECT;
- case AUDIO_MODE_COMMUNICATION_REDIRECT:
- return AudioMode::SYS_RESERVED_COMMUNICATION_REDIRECT;
- case AUDIO_MODE_CNT:
- break;
- }
- return unexpected(BAD_VALUE);
-}
-
ConversionResult<audio_unique_id_use_t>
aidl2legacy_AudioUniqueIdUse_audio_unique_id_use_t(media::AudioUniqueIdUse aidl) {
switch (aidl) {
@@ -3171,160 +1091,6 @@
return convertReinterpret<int32_t>(legacy);
}
-ConversionResult<audio_dual_mono_mode_t>
-aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(media::AudioDualMonoMode aidl) {
- switch (aidl) {
- case media::AudioDualMonoMode::OFF:
- return AUDIO_DUAL_MONO_MODE_OFF;
- case media::AudioDualMonoMode::LR:
- return AUDIO_DUAL_MONO_MODE_LR;
- case media::AudioDualMonoMode::LL:
- return AUDIO_DUAL_MONO_MODE_LL;
- case media::AudioDualMonoMode::RR:
- return AUDIO_DUAL_MONO_MODE_RR;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<media::AudioDualMonoMode>
-legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(audio_dual_mono_mode_t legacy) {
- switch (legacy) {
- case AUDIO_DUAL_MONO_MODE_OFF:
- return media::AudioDualMonoMode::OFF;
- case AUDIO_DUAL_MONO_MODE_LR:
- return media::AudioDualMonoMode::LR;
- case AUDIO_DUAL_MONO_MODE_LL:
- return media::AudioDualMonoMode::LL;
- case AUDIO_DUAL_MONO_MODE_RR:
- return media::AudioDualMonoMode::RR;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<audio_timestretch_fallback_mode_t>
-aidl2legacy_int32_t_audio_timestretch_fallback_mode_t(int32_t aidl) {
- return convertReinterpret<audio_timestretch_fallback_mode_t>(aidl);
-}
-
-ConversionResult<int32_t>
-legacy2aidl_audio_timestretch_fallback_mode_t_int32_t(audio_timestretch_fallback_mode_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<audio_timestretch_stretch_mode_t>
-aidl2legacy_int32_t_audio_timestretch_stretch_mode_t(int32_t aidl) {
- return convertReinterpret<audio_timestretch_stretch_mode_t>(aidl);
-}
-
-ConversionResult<int32_t>
-legacy2aidl_audio_timestretch_stretch_mode_t_int32_t(audio_timestretch_stretch_mode_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
-ConversionResult<audio_playback_rate_t>
-aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(const media::AudioPlaybackRate& aidl) {
- audio_playback_rate_t legacy;
- legacy.mSpeed = aidl.speed;
- legacy.mPitch = aidl.pitch;
- legacy.mFallbackMode = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_timestretch_fallback_mode_t(aidl.fallbackMode));
- legacy.mStretchMode = VALUE_OR_RETURN(
- aidl2legacy_int32_t_audio_timestretch_stretch_mode_t(aidl.stretchMode));
- return legacy;
-}
-
-ConversionResult<media::AudioPlaybackRate>
-legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(const audio_playback_rate_t& legacy) {
- media::AudioPlaybackRate aidl;
- aidl.speed = legacy.mSpeed;
- aidl.pitch = legacy.mPitch;
- aidl.fallbackMode = VALUE_OR_RETURN(
- legacy2aidl_audio_timestretch_fallback_mode_t_int32_t(legacy.mFallbackMode));
- aidl.stretchMode = VALUE_OR_RETURN(
- legacy2aidl_audio_timestretch_stretch_mode_t_int32_t(legacy.mStretchMode));
- return aidl;
-}
-
-ConversionResult<audio_standard_t>
-aidl2legacy_AudioStandard_audio_standard_t(AudioStandard aidl) {
- switch (aidl) {
- case AudioStandard::NONE:
- return AUDIO_STANDARD_NONE;
- case AudioStandard::EDID:
- return AUDIO_STANDARD_EDID;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<AudioStandard>
-legacy2aidl_audio_standard_t_AudioStandard(audio_standard_t legacy) {
- switch (legacy) {
- case AUDIO_STANDARD_NONE:
- return AudioStandard::NONE;
- case AUDIO_STANDARD_EDID:
- return AudioStandard::EDID;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<audio_extra_audio_descriptor>
-aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
- const ExtraAudioDescriptor& aidl) {
- audio_extra_audio_descriptor legacy;
- legacy.standard = VALUE_OR_RETURN(aidl2legacy_AudioStandard_audio_standard_t(aidl.standard));
- if (aidl.audioDescriptor.size() > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
- return unexpected(BAD_VALUE);
- }
- legacy.descriptor_length = aidl.audioDescriptor.size();
- std::copy(aidl.audioDescriptor.begin(), aidl.audioDescriptor.end(),
- std::begin(legacy.descriptor));
- legacy.encapsulation_type =
- VALUE_OR_RETURN(aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
- aidl.encapsulationType));
- return legacy;
-}
-
-ConversionResult<ExtraAudioDescriptor>
-legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
- const audio_extra_audio_descriptor& legacy) {
- ExtraAudioDescriptor aidl;
- aidl.standard = VALUE_OR_RETURN(legacy2aidl_audio_standard_t_AudioStandard(legacy.standard));
- if (legacy.descriptor_length > EXTRA_AUDIO_DESCRIPTOR_SIZE) {
- return unexpected(BAD_VALUE);
- }
- aidl.audioDescriptor.resize(legacy.descriptor_length);
- std::copy(legacy.descriptor, legacy.descriptor + legacy.descriptor_length,
- aidl.audioDescriptor.begin());
- aidl.encapsulationType =
- VALUE_OR_RETURN(legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
- legacy.encapsulation_type));
- return aidl;
-}
-
-ConversionResult<audio_encapsulation_type_t>
-aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
- const AudioEncapsulationType& aidl) {
- switch (aidl) {
- case AudioEncapsulationType::NONE:
- return AUDIO_ENCAPSULATION_TYPE_NONE;
- case AudioEncapsulationType::IEC61937:
- return AUDIO_ENCAPSULATION_TYPE_IEC61937;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<AudioEncapsulationType>
-legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
- const audio_encapsulation_type_t & legacy) {
- switch (legacy) {
- case AUDIO_ENCAPSULATION_TYPE_NONE:
- return AudioEncapsulationType::NONE;
- case AUDIO_ENCAPSULATION_TYPE_IEC61937:
- return AudioEncapsulationType::IEC61937;
- }
- return unexpected(BAD_VALUE);
-}
-
ConversionResult<TrackSecondaryOutputInfoPair>
aidl2legacy_TrackSecondaryOutputInfo_TrackSecondaryOutputInfoPair(
const media::TrackSecondaryOutputInfo& aidl) {
@@ -3397,25 +1163,4 @@
indexToEnum_bitmask<audio_direct_mode_t>,
enumToMask_index<int32_t, media::AudioDirectMode>);
}
-
-ConversionResult<audio_latency_mode_t>
-aidl2legacy_LatencyMode_audio_latency_mode_t(media::LatencyMode aidl) {
- switch (aidl) {
- case media::LatencyMode::FREE:
- return AUDIO_LATENCY_MODE_FREE;
- case media::LatencyMode::LOW:
- return AUDIO_LATENCY_MODE_LOW;
- }
- return unexpected(BAD_VALUE);
-}
-ConversionResult<media::LatencyMode>
-legacy2aidl_audio_latency_mode_t_LatencyMode(audio_latency_mode_t legacy) {
- switch (legacy) {
- case AUDIO_LATENCY_MODE_FREE:
- return media::LatencyMode::FREE;
- case AUDIO_LATENCY_MODE_LOW:
- return media::LatencyMode::LOW;
- }
- return unexpected(BAD_VALUE);
-}
} // namespace android
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index b6ddf56..bcaaa6e 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -128,6 +128,7 @@
"audiopolicy-types-aidl-cpp",
"av-types-aidl-cpp",
"capture_state_listener-aidl-cpp",
+ "libaudio_aidl_conversion_common_cpp",
"libaudioclient_aidl_conversion",
"libaudiofoundation",
"libaudioutils",
@@ -193,88 +194,29 @@
},
}
-// This is intended for clients needing to include AidlConversionUtil.h, without dragging in a lot of extra
-// dependencies.
-cc_library_headers {
- name: "libaudioclient_aidl_conversion_util",
- host_supported: true,
- vendor_available: true,
- double_loadable: true,
- min_sdk_version: "29",
- export_include_dirs: [
- "include",
- ],
- header_libs: [
- "libbase_headers",
- "liberror_headers",
- ],
- export_header_lib_headers: [
- "libbase_headers",
- "liberror_headers",
- ],
- apex_available: [
- "//apex_available:platform",
- "com.android.btservices",
- "com.android.media",
- "com.android.media.swcodec",
- ],
- target: {
- darwin: {
- enabled: false,
- },
- },
-}
-
cc_library {
name: "libaudioclient_aidl_conversion",
srcs: ["AidlConversion.cpp"],
- export_include_dirs: ["include"],
- host_supported: true,
- vendor_available: true,
- double_loadable: true,
- min_sdk_version: "29",
- header_libs: [
- "libaudioclient_aidl_conversion_util",
- "libaudio_system_headers",
- ],
- export_header_lib_headers: [
- "libaudioclient_aidl_conversion_util",
- ],
- defaults: [
- "latest_android_media_audio_common_types_cpp_export_shared",
- ],
shared_libs: [
"audioclient-types-aidl-cpp",
- "libbase",
- "libbinder",
- "liblog",
- "libshmemcompat",
- "libstagefright_foundation",
- "libutils",
- "shared-file-region-aidl-cpp",
- "framework-permission-aidl-cpp",
+ ],
+ static_libs: [
+ "libaudio_aidl_conversion_common_cpp",
+ ],
+ export_include_dirs: ["include"],
+ header_libs: [
+ "libaudio_aidl_conversion_common_util_cpp",
+ ],
+ export_header_lib_headers: [
+ "libaudio_aidl_conversion_common_util_cpp",
],
export_shared_lib_headers: [
"audioclient-types-aidl-cpp",
- "libbase",
- "shared-file-region-aidl-cpp",
],
- cflags: [
- "-Wall",
- "-Werror",
- "-Wno-error=deprecated-declarations",
+ defaults: [
+ "audio_aidl_conversion_common_default",
+ "latest_android_media_audio_common_types_cpp_export_shared",
],
- sanitize: {
- misc_undefined: [
- "unsigned-integer-overflow",
- "signed-integer-overflow",
- ],
- },
- target: {
- darwin: {
- enabled: false,
- },
- },
}
// AIDL interface between libaudioclient and framework.jar
@@ -282,6 +224,7 @@
name: "libaudioclient_aidl",
srcs: [
"aidl/android/media/IPlayer.aidl",
+ "aidl/android/media/AudioHalVersion.aidl",
],
path: "aidl",
}
@@ -339,13 +282,12 @@
"aidl/android/media/AudioAttributesInternal.aidl",
"aidl/android/media/AudioClient.aidl",
"aidl/android/media/AudioDirectMode.aidl",
- "aidl/android/media/AudioDualMonoMode.aidl",
"aidl/android/media/AudioFlag.aidl",
"aidl/android/media/AudioGainSys.aidl",
+ "aidl/android/media/AudioHalVersion.aidl",
"aidl/android/media/AudioIoConfigEvent.aidl",
"aidl/android/media/AudioIoDescriptor.aidl",
"aidl/android/media/AudioPatch.aidl",
- "aidl/android/media/AudioPlaybackRate.aidl",
"aidl/android/media/AudioPort.aidl",
"aidl/android/media/AudioPortSys.aidl",
"aidl/android/media/AudioPortConfig.aidl",
@@ -360,7 +302,6 @@
"aidl/android/media/AudioUniqueIdUse.aidl",
"aidl/android/media/AudioVibratorInfo.aidl",
"aidl/android/media/EffectDescriptor.aidl",
- "aidl/android/media/LatencyMode.aidl",
"aidl/android/media/TrackSecondaryOutputInfo.aidl",
],
imports: [
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index 641c4d9..2870c4c 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -568,7 +568,6 @@
if (cb != nullptr) {
cb->onError(mStatus);
}
- mIEffect.clear();
}
// -------------------------------------------------------------------------
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 141e1e3..0d16f47 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -671,12 +671,12 @@
}
Status AudioSystem::AudioFlingerClient::onSupportedLatencyModesChanged(
- int output, const std::vector<media::LatencyMode>& latencyModes) {
+ int output, const std::vector<media::audio::common::AudioLatencyMode>& latencyModes) {
audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_io_handle_t(output));
std::vector<audio_latency_mode_t> modesLegacy = VALUE_OR_RETURN_BINDER_STATUS(
convertContainer<std::vector<audio_latency_mode_t>>(
- latencyModes, aidl2legacy_LatencyMode_audio_latency_mode_t));
+ latencyModes, aidl2legacy_AudioLatencyMode_audio_latency_mode_t));
std::vector<sp<SupportedLatencyModesCallback>> callbacks;
{
@@ -2426,6 +2426,32 @@
return af->getSupportedLatencyModes(output, modes);
}
+status_t AudioSystem::setBluetoothVariableLatencyEnabled(bool enabled) {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ return PERMISSION_DENIED;
+ }
+ return af->setBluetoothVariableLatencyEnabled(enabled);
+}
+
+status_t AudioSystem::isBluetoothVariableLatencyEnabled(
+ bool *enabled) {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ return PERMISSION_DENIED;
+ }
+ return af->isBluetoothVariableLatencyEnabled(enabled);
+}
+
+status_t AudioSystem::supportsBluetoothVariableLatency(
+ bool *support) {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ return PERMISSION_DENIED;
+ }
+ return af->supportsBluetoothVariableLatency(support);
+}
+
class CaptureStateListenerImpl : public media::BnCaptureStateListener,
public IBinder::DeathRecipient {
public:
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 96fc544..ff4b071 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -1248,7 +1248,7 @@
status_t AudioTrack::getDualMonoMode(audio_dual_mono_mode_t* mode) const
{
AutoMutex lock(mLock);
- media::AudioDualMonoMode mediaMode;
+ media::audio::common::AudioDualMonoMode mediaMode;
const status_t status = statusTFromBinderStatus(mAudioTrack->getDualMonoMode(&mediaMode));
if (status == NO_ERROR) {
*mode = VALUE_OR_RETURN_STATUS(
@@ -1363,7 +1363,7 @@
{
AutoMutex lock(mLock);
if (isOffloadedOrDirect_l()) {
- media::AudioPlaybackRate playbackRateTemp;
+ media::audio::common::AudioPlaybackRate playbackRateTemp;
const status_t status = statusTFromBinderStatus(
mAudioTrack->getPlaybackRateParameters(&playbackRateTemp));
if (status == NO_ERROR) { // update local version if changed.
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 6ad97d1..b3c9f07 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -22,10 +22,10 @@
#include <stdint.h>
#include <sys/types.h>
-
+#include "IAudioFlinger.h"
#include <binder/IPCThreadState.h>
#include <binder/Parcel.h>
-#include "IAudioFlinger.h"
+#include <system/thread_defs.h>
namespace android {
@@ -813,8 +813,8 @@
status_t AudioFlingerClientAdapter::setRequestedLatencyMode(
audio_io_handle_t output, audio_latency_mode_t mode) {
int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
- media::LatencyMode modeAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_latency_mode_t_LatencyMode(mode));
+ media::audio::common::AudioLatencyMode modeAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_latency_mode_t_AudioLatencyMode(mode));
return statusTFromBinderStatus(mDelegate->setRequestedLatencyMode(outputAidl, modeAidl));
}
@@ -825,14 +825,40 @@
}
int32_t outputAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
- std::vector<media::LatencyMode> modesAidl;
+ std::vector<media::audio::common::AudioLatencyMode> modesAidl;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
mDelegate->getSupportedLatencyModes(outputAidl, &modesAidl)));
*modes = VALUE_OR_RETURN_STATUS(
convertContainer<std::vector<audio_latency_mode_t>>(modesAidl,
- aidl2legacy_LatencyMode_audio_latency_mode_t));
+ aidl2legacy_AudioLatencyMode_audio_latency_mode_t));
+
+ return NO_ERROR;
+}
+
+status_t AudioFlingerClientAdapter::setBluetoothVariableLatencyEnabled(bool enabled) {
+ return statusTFromBinderStatus(mDelegate->setBluetoothVariableLatencyEnabled(enabled));
+}
+
+status_t AudioFlingerClientAdapter::isBluetoothVariableLatencyEnabled(bool* enabled) {
+ if (enabled == nullptr) {
+ return BAD_VALUE;
+ }
+
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ mDelegate->isBluetoothVariableLatencyEnabled(enabled)));
+
+ return NO_ERROR;
+}
+
+status_t AudioFlingerClientAdapter::supportsBluetoothVariableLatency(bool* support) {
+ if (support == nullptr) {
+ return BAD_VALUE;
+ }
+
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ mDelegate->supportsBluetoothVariableLatency(support)));
return NO_ERROR;
}
@@ -840,7 +866,9 @@
////////////////////////////////////////////////////////////////////////////////////////////////////
// AudioFlingerServerAdapter
AudioFlingerServerAdapter::AudioFlingerServerAdapter(
- const sp<AudioFlingerServerAdapter::Delegate>& delegate) : mDelegate(delegate) {}
+ const sp<AudioFlingerServerAdapter::Delegate>& delegate) : mDelegate(delegate) {
+ setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
+}
status_t AudioFlingerServerAdapter::onTransact(uint32_t code,
const Parcel& data,
@@ -1332,17 +1360,17 @@
}
Status AudioFlingerServerAdapter::setRequestedLatencyMode(
- int32_t output, media::LatencyMode modeAidl) {
+ int32_t output, media::audio::common::AudioLatencyMode modeAidl) {
audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_int32_t_audio_io_handle_t(output));
audio_latency_mode_t modeLegacy = VALUE_OR_RETURN_BINDER(
- aidl2legacy_LatencyMode_audio_latency_mode_t(modeAidl));
+ aidl2legacy_AudioLatencyMode_audio_latency_mode_t(modeAidl));
return Status::fromStatusT(mDelegate->setRequestedLatencyMode(
outputLegacy, modeLegacy));
}
Status AudioFlingerServerAdapter::getSupportedLatencyModes(
- int output, std::vector<media::LatencyMode>* _aidl_return) {
+ int output, std::vector<media::audio::common::AudioLatencyMode>* _aidl_return) {
audio_io_handle_t outputLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_int32_t_audio_io_handle_t(output));
std::vector<audio_latency_mode_t> modesLegacy;
@@ -1350,9 +1378,21 @@
RETURN_BINDER_IF_ERROR(mDelegate->getSupportedLatencyModes(outputLegacy, &modesLegacy));
*_aidl_return = VALUE_OR_RETURN_BINDER(
- convertContainer<std::vector<media::LatencyMode>>(
- modesLegacy, legacy2aidl_audio_latency_mode_t_LatencyMode));
+ convertContainer<std::vector<media::audio::common::AudioLatencyMode>>(
+ modesLegacy, legacy2aidl_audio_latency_mode_t_AudioLatencyMode));
return Status::ok();
}
+Status AudioFlingerServerAdapter::setBluetoothVariableLatencyEnabled(bool enabled) {
+ return Status::fromStatusT(mDelegate->setBluetoothVariableLatencyEnabled(enabled));
+}
+
+Status AudioFlingerServerAdapter::isBluetoothVariableLatencyEnabled(bool *enabled) {
+ return Status::fromStatusT(mDelegate->isBluetoothVariableLatencyEnabled(enabled));
+}
+
+Status AudioFlingerServerAdapter::supportsBluetoothVariableLatency(bool *support) {
+ return Status::fromStatusT(mDelegate->supportsBluetoothVariableLatency(support));
+}
+
} // namespace android
diff --git a/media/libaudioclient/TEST_MAPPING b/media/libaudioclient/TEST_MAPPING
index 51080ef..10f9d9b 100644
--- a/media/libaudioclient/TEST_MAPPING
+++ b/media/libaudioclient/TEST_MAPPING
@@ -32,5 +32,10 @@
{
"name": "audiosystem_tests"
}
+ ],
+ "postsubmit": [
+ {
+ "name": "audioeffect_analysis"
+ }
]
}
diff --git a/media/libaudioclient/aidl/android/media/AudioDualMonoMode.aidl b/media/libaudioclient/aidl/android/media/AudioDualMonoMode.aidl
deleted file mode 100644
index f6220c2..0000000
--- a/media/libaudioclient/aidl/android/media/AudioDualMonoMode.aidl
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package android.media;
-
-// TODO(b/175167149): Reconcile AudioDualMonoMode with framework-media-sources
-
-@Backing(type="int")
-enum AudioDualMonoMode {
- OFF = 0,
- LR = 1,
- LL = 2,
- RR = 3,
-}
diff --git a/media/libaudioclient/aidl/android/media/AudioHalVersion.aidl b/media/libaudioclient/aidl/android/media/AudioHalVersion.aidl
new file mode 100644
index 0000000..49048040
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/AudioHalVersion.aidl
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * The audio HAL version definition.
+ *
+ * {@hide}
+ */
+parcelable AudioHalVersion {
+
+ @Backing(type="int")
+ enum Type {
+ /**
+ * Indicate the audio HAL is implemented with HIDL (HAL interface definition language).
+ * @see <a href="https://source.android.com/docs/core/architecture/hidl/">HIDL</a>
+ */
+ HIDL = 0,
+
+ /**
+ * Indicate the audio HAL is implemented with AIDL (Android Interface Definition Language).
+ * @see <a href="https://source.android.com/docs/core/architecture/aidl/">AIDL</a>
+ */
+ AIDL
+ }
+
+ Type type = Type.HIDL;
+
+ /**
+ * Major version number.
+ */
+ int major;
+
+ /**
+ * Minor version number.
+ */
+ int minor;
+}
diff --git a/media/libaudioclient/aidl/android/media/AudioPlaybackRate.aidl b/media/libaudioclient/aidl/android/media/AudioPlaybackRate.aidl
deleted file mode 100644
index e29d398..0000000
--- a/media/libaudioclient/aidl/android/media/AudioPlaybackRate.aidl
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * The AudioPlaybackRate.
- *
- * See https://developer.android.com/reference/android/media/PlaybackParams.
- * TODO(b/175166815): Reconcile with framework-media-sources PlaybackParams.aidl.
- * As this is used for native wire serialization, no need to define
- * audio_timestretch_stretch_mode_t and audio_timestretch_fallback_mode_t enums
- * until we attempt to unify with PlaybackParams.
- *
- * {@hide}
- */
-parcelable AudioPlaybackRate {
- /** Speed of audio playback, >= 0.f, 1.f nominal (system limits are further restrictive) */
- float speed;
- /** Pitch of audio, >= 0.f, 1.f nominal (system limits are further restrictive) */
- float pitch;
- /** Interpreted as audio_timestretch_stretch_mode_t */
- int stretchMode;
- /** Interpreted as audio_timestretch_fallback_mode_t */
- int fallbackMode;
-}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerClient.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerClient.aidl
index a2bb024..f055cc4 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerClient.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerClient.aidl
@@ -18,7 +18,7 @@
import android.media.AudioIoConfigEvent;
import android.media.AudioIoDescriptor;
-import android.media.LatencyMode;
+import android.media.audio.common.AudioLatencyMode;
/**
* A callback interface for AudioFlinger.
@@ -31,7 +31,8 @@
/**
* Called when the latency modes supported on a given output stream change.
* output is the I/O handle of the output stream for which the change is signalled.
- * latencyModes is the new list of supported latency modes (See LatencyMode.aidl).
+ * latencyModes is the new list of supported latency modes (See AudioLatencyMode.aidl).
*/
- oneway void onSupportedLatencyModesChanged(int output, in LatencyMode[] latencyModes);
+ oneway void onSupportedLatencyModesChanged(
+ int output, in AudioLatencyMode[] latencyModes);
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index 9b8a843..1111160 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -35,12 +35,12 @@
import android.media.IAudioFlingerClient;
import android.media.IAudioRecord;
import android.media.IAudioTrack;
-import android.media.LatencyMode;
import android.media.MicrophoneInfoData;
import android.media.RenderPosition;
import android.media.TrackSecondaryOutputInfo;
import android.media.audio.common.AudioChannelLayout;
import android.media.audio.common.AudioFormatDescription;
+import android.media.audio.common.AudioLatencyMode;
import android.media.audio.common.AudioMMapPolicyInfo;
import android.media.audio.common.AudioMMapPolicyType;
import android.media.audio.common.AudioMode;
@@ -230,21 +230,41 @@
void setDeviceConnectedState(in AudioPort devicePort, boolean connected);
/**
- * Requests a given latency mode (See LatencyMode.aidl) on an output stream.
+ * Requests a given latency mode (See AudioLatencyMode.aidl) on an output stream.
* This can be used when some use case on a given mixer/stream can only be enabled
* if a specific latency mode is selected on the audio path below the HAL.
* For instance spatial audio with head tracking.
* output is the I/O handle of the output stream for which the request is made.
* latencyMode is the requested latency mode.
*/
- void setRequestedLatencyMode(int output, LatencyMode latencyMode);
+ void setRequestedLatencyMode(int output, AudioLatencyMode latencyMode);
/**
* Queries the list of latency modes (See LatencyMode.aidl) supported by an output stream.
* output is the I/O handle of the output stream to which the query applies.
* returns the list of supported latency modes.
*/
- LatencyMode[] getSupportedLatencyModes(int output);
+ AudioLatencyMode[] getSupportedLatencyModes(int output);
+
+ /**
+ * Requests if the implementation supports controlling the latency modes
+ * over the Bluetooth A2DP or LE Audio links. If it does,
+ * setRequestedLatencyMode() and getSupportedLatencyModes() APIs can also be used
+ * for streams routed to Bluetooth and not just for the spatializer output.
+ */
+ boolean supportsBluetoothVariableLatency();
+
+ /**
+ * Enables or disables the variable Bluetooth latency control mechanism in the
+ * audio framework and the audio HAL. This does not apply to the latency mode control
+ * on the spatializer output as this is a built-in feature.
+ */
+ void setBluetoothVariableLatencyEnabled(boolean enabled);
+
+ /**
+ * Indicates if the variable Bluetooth latency control mechanism is enabled or disabled.
+ */
+ boolean isBluetoothVariableLatencyEnabled();
// When adding a new method, please review and update
// IAudioFlinger.h AudioFlingerServerAdapter::Delegate::TransactionCode
diff --git a/media/libaudioclient/aidl/android/media/IAudioTrack.aidl b/media/libaudioclient/aidl/android/media/IAudioTrack.aidl
index ac58925..c3a2dbe 100644
--- a/media/libaudioclient/aidl/android/media/IAudioTrack.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioTrack.aidl
@@ -16,13 +16,13 @@
package android.media;
-import android.media.AudioDualMonoMode;
-import android.media.AudioPlaybackRate;
import android.media.AudioTimestampInternal;
import android.media.SharedFileRegion;
import android.media.VolumeShaperConfiguration;
import android.media.VolumeShaperOperation;
import android.media.VolumeShaperState;
+import android.media.audio.common.AudioDualMonoMode;
+import android.media.audio.common.AudioPlaybackRate;
/**
* Unless otherwise noted, methods returning int expect it to be interpreted as a status_t.
diff --git a/media/libaudioclient/aidl/android/media/LatencyMode.aidl b/media/libaudioclient/aidl/android/media/LatencyMode.aidl
deleted file mode 100644
index 0b2a72b..0000000
--- a/media/libaudioclient/aidl/android/media/LatencyMode.aidl
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package android.media;
-
-/**
- * The latency mode currently used by the spatializer mixer.
- * {@hide}
- */
-@Backing(type="byte")
-enum LatencyMode {
- /** No specific constraint on the latency */
- FREE = 0,
- /** A relatively low latency compatible with head tracking operation (e.g less than 100ms) */
- LOW = 1,
-}
diff --git a/media/libaudioclient/fuzzer/Android.bp b/media/libaudioclient/fuzzer/Android.bp
index dd3e3ba..b1feb60 100644
--- a/media/libaudioclient/fuzzer/Android.bp
+++ b/media/libaudioclient/fuzzer/Android.bp
@@ -56,6 +56,7 @@
"av-types-aidl-cpp",
"capture_state_listener-aidl-cpp",
"libaudioclient_aidl_conversion",
+ "libaudio_aidl_conversion_common_cpp",
"libaudioflinger",
"libaudiofoundation",
"libaudiomanager",
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index 1e66164..b0f84a4 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -24,46 +24,20 @@
#include <android/media/AudioAttributesInternal.h>
#include <android/media/AudioClient.h>
#include <android/media/AudioDirectMode.h>
-#include <android/media/AudioDualMonoMode.h>
#include <android/media/AudioFlag.h>
#include <android/media/AudioIoConfigEvent.h>
#include <android/media/AudioIoDescriptor.h>
-#include <android/media/AudioPlaybackRate.h>
#include <android/media/AudioPort.h>
#include <android/media/AudioPortConfig.h>
#include <android/media/AudioPortDeviceExtSys.h>
#include <android/media/AudioTimestampInternal.h>
#include <android/media/AudioUniqueIdUse.h>
#include <android/media/EffectDescriptor.h>
-#include <android/media/LatencyMode.h>
#include <android/media/TrackSecondaryOutputInfo.h>
-#include <android/media/audio/common/AudioChannelLayout.h>
-#include <android/media/audio/common/AudioConfig.h>
-#include <android/media/audio/common/AudioConfigBase.h>
-#include <android/media/audio/common/AudioContentType.h>
-#include <android/media/audio/common/AudioDeviceDescription.h>
-#include <android/media/audio/common/AudioEncapsulationMetadataType.h>
-#include <android/media/audio/common/AudioEncapsulationMode.h>
-#include <android/media/audio/common/AudioEncapsulationType.h>
-#include <android/media/audio/common/AudioFormatDescription.h>
-#include <android/media/audio/common/AudioGain.h>
-#include <android/media/audio/common/AudioGainConfig.h>
-#include <android/media/audio/common/AudioGainMode.h>
-#include <android/media/audio/common/AudioInputFlags.h>
-#include <android/media/audio/common/AudioMode.h>
-#include <android/media/audio/common/AudioOffloadInfo.h>
-#include <android/media/audio/common/AudioOutputFlags.h>
-#include <android/media/audio/common/AudioPortExt.h>
-#include <android/media/audio/common/AudioPortMixExt.h>
-#include <android/media/audio/common/AudioProfile.h>
-#include <android/media/audio/common/AudioSource.h>
-#include <android/media/audio/common/AudioStandard.h>
-#include <android/media/audio/common/AudioUsage.h>
-#include <android/media/audio/common/AudioUuid.h>
-#include <android/media/audio/common/ExtraAudioDescriptor.h>
#include <android/media/SharedFileRegion.h>
#include <binder/IMemory.h>
+#include <media/AidlConversionCppNdk.h>
#include <media/AidlConversionUtil.h>
#include <media/AudioClient.h>
#include <media/AudioCommonTypes.h>
@@ -73,49 +47,6 @@
namespace android {
-// maxSize is the size of the C-string buffer (including the 0-terminator), NOT the max length of
-// the string.
-status_t aidl2legacy_string(std::string_view aidl, char* dest, size_t maxSize);
-ConversionResult<std::string> legacy2aidl_string(const char* legacy, size_t maxSize);
-
-ConversionResult<audio_module_handle_t> aidl2legacy_int32_t_audio_module_handle_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_module_handle_t_int32_t(audio_module_handle_t legacy);
-
-ConversionResult<audio_io_handle_t> aidl2legacy_int32_t_audio_io_handle_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_io_handle_t_int32_t(audio_io_handle_t legacy);
-
-ConversionResult<audio_port_handle_t> aidl2legacy_int32_t_audio_port_handle_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_port_handle_t_int32_t(audio_port_handle_t legacy);
-
-ConversionResult<audio_patch_handle_t> aidl2legacy_int32_t_audio_patch_handle_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_patch_handle_t_int32_t(audio_patch_handle_t legacy);
-
-ConversionResult<audio_unique_id_t> aidl2legacy_int32_t_audio_unique_id_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_unique_id_t_int32_t(audio_unique_id_t legacy);
-
-ConversionResult<audio_hw_sync_t> aidl2legacy_int32_t_audio_hw_sync_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_hw_sync_t_int32_t(audio_hw_sync_t legacy);
-
-ConversionResult<unsigned int> aidl2legacy_int32_t_config_mask(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_config_mask_int32_t(unsigned int legacy);
-
-ConversionResult<pid_t> aidl2legacy_int32_t_pid_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_pid_t_int32_t(pid_t legacy);
-
-ConversionResult<uid_t> aidl2legacy_int32_t_uid_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_uid_t_int32_t(uid_t legacy);
-
-ConversionResult<String8> aidl2legacy_string_view_String8(std::string_view aidl);
-ConversionResult<std::string> legacy2aidl_String8_string(const String8& legacy);
-
-ConversionResult<String16> aidl2legacy_string_view_String16(std::string_view aidl);
-ConversionResult<std::string> legacy2aidl_String16_string(const String16& legacy);
-
-ConversionResult<std::optional<String16>>
-aidl2legacy_optional_string_view_optional_String16(std::optional<std::string_view> aidl);
-ConversionResult<std::optional<std::string_view>>
-legacy2aidl_optional_String16_optional_string(std::optional<String16> legacy);
-
ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
media::AudioIoConfigEvent aidl);
ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(
@@ -131,75 +62,6 @@
ConversionResult<media::AudioPortType> legacy2aidl_audio_port_type_t_AudioPortType(
audio_port_type_t legacy);
-ConversionResult<audio_channel_mask_t> aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
- const media::audio::common::AudioChannelLayout& aidl, bool isInput);
-ConversionResult<media::audio::common::AudioChannelLayout>
-legacy2aidl_audio_channel_mask_t_AudioChannelLayout(audio_channel_mask_t legacy, bool isInput);
-
-ConversionResult<audio_devices_t> aidl2legacy_AudioDeviceDescription_audio_devices_t(
- const media::audio::common::AudioDeviceDescription& aidl);
-ConversionResult<media::audio::common::AudioDeviceDescription>
-legacy2aidl_audio_devices_t_AudioDeviceDescription(audio_devices_t legacy);
-
-status_t aidl2legacy_AudioDevice_audio_device(
- const media::audio::common::AudioDevice& aidl,
- audio_devices_t* legacyType, char* legacyAddress);
-status_t aidl2legacy_AudioDevice_audio_device(
- const media::audio::common::AudioDevice& aidl,
- audio_devices_t* legacyType, String8* legacyAddress);
-status_t aidl2legacy_AudioDevice_audio_device(
- const media::audio::common::AudioDevice& aidl,
- audio_devices_t* legacyType, std::string* legacyAddress);
-ConversionResult<media::audio::common::AudioDevice>
-legacy2aidl_audio_device_AudioDevice(
- audio_devices_t legacyType, const char* legacyAddress);
-ConversionResult<media::audio::common::AudioDevice>
-legacy2aidl_audio_device_AudioDevice(
- audio_devices_t legacyType, const String8& legacyAddress);
-
-ConversionResult<audio_format_t> aidl2legacy_AudioFormatDescription_audio_format_t(
- const media::audio::common::AudioFormatDescription& aidl);
-ConversionResult<media::audio::common::AudioFormatDescription>
-legacy2aidl_audio_format_t_AudioFormatDescription(audio_format_t legacy);
-
-ConversionResult<audio_gain_mode_t>
-aidl2legacy_AudioGainMode_audio_gain_mode_t(media::audio::common::AudioGainMode aidl);
-ConversionResult<media::audio::common::AudioGainMode>
-legacy2aidl_audio_gain_mode_t_AudioGainMode(audio_gain_mode_t legacy);
-
-ConversionResult<audio_gain_mode_t> aidl2legacy_int32_t_audio_gain_mode_t_mask(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_gain_mode_t_int32_t_mask(audio_gain_mode_t legacy);
-
-ConversionResult<audio_gain_config> aidl2legacy_AudioGainConfig_audio_gain_config(
- const media::audio::common::AudioGainConfig& aidl, bool isInput);
-ConversionResult<media::audio::common::AudioGainConfig>
-legacy2aidl_audio_gain_config_AudioGainConfig(const audio_gain_config& legacy, bool isInput);
-
-ConversionResult<audio_input_flags_t>
-aidl2legacy_AudioInputFlags_audio_input_flags_t(media::audio::common::AudioInputFlags aidl);
-ConversionResult<media::audio::common::AudioInputFlags>
-legacy2aidl_audio_input_flags_t_AudioInputFlags(audio_input_flags_t legacy);
-
-ConversionResult<audio_output_flags_t>
-aidl2legacy_AudioOutputFlags_audio_output_flags_t(media::audio::common::AudioOutputFlags aidl);
-ConversionResult<media::audio::common::AudioOutputFlags>
-legacy2aidl_audio_output_flags_t_AudioOutputFlags(audio_output_flags_t legacy);
-
-ConversionResult<audio_input_flags_t> aidl2legacy_int32_t_audio_input_flags_t_mask(
- int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_input_flags_t_int32_t_mask(
- audio_input_flags_t legacy);
-
-ConversionResult<audio_output_flags_t> aidl2legacy_int32_t_audio_output_flags_t_mask(
- int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_output_flags_t_int32_t_mask(
- audio_output_flags_t legacy);
-
-ConversionResult<audio_io_flags> aidl2legacy_AudioIoFlags_audio_io_flags(
- const media::audio::common::AudioIoFlags& aidl, bool isInput);
-ConversionResult<media::audio::common::AudioIoFlags> legacy2aidl_audio_io_flags_AudioIoFlags(
- const audio_io_flags& legacy, bool isInput);
-
ConversionResult<audio_port_config_device_ext>
aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
const media::audio::common::AudioPortDeviceExt& aidl,
@@ -214,15 +76,6 @@
ConversionResult<media::audio::common::AudioStreamType>
legacy2aidl_audio_stream_type_t_AudioStreamType(audio_stream_type_t legacy);
-ConversionResult<audio_source_t> aidl2legacy_AudioSource_audio_source_t(
- media::audio::common::AudioSource aidl);
-ConversionResult<media::audio::common::AudioSource>
- legacy2aidl_audio_source_t_AudioSource(
- audio_source_t legacy);
-
-ConversionResult<audio_session_t> aidl2legacy_int32_t_audio_session_t(int32_t aidl);
-ConversionResult<int32_t> legacy2aidl_audio_session_t_int32_t(audio_session_t legacy);
-
ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt(
const media::audio::common::AudioPortMixExt& aidl, media::AudioPortRole role,
const media::AudioPortMixExtSys& aidlMixExt);
@@ -255,17 +108,6 @@
ConversionResult<media::AudioClient> legacy2aidl_AudioClient_AudioClient(
const AudioClient& legacy);
-ConversionResult<audio_content_type_t>
-aidl2legacy_AudioContentType_audio_content_type_t(
- media::audio::common::AudioContentType aidl);
-ConversionResult<media::audio::common::AudioContentType>
-legacy2aidl_audio_content_type_t_AudioContentType(audio_content_type_t legacy);
-
-ConversionResult<audio_usage_t>
-aidl2legacy_AudioUsage_audio_usage_t(media::audio::common::AudioUsage aidl);
-ConversionResult<media::audio::common::AudioUsage>
-legacy2aidl_audio_usage_t_AudioUsage(audio_usage_t legacy);
-
ConversionResult<audio_flags_mask_t>
aidl2legacy_AudioFlag_audio_flags_mask_t(media::AudioFlag aidl);
ConversionResult<media::AudioFlag>
@@ -281,36 +123,13 @@
ConversionResult<media::AudioAttributesInternal>
legacy2aidl_audio_attributes_t_AudioAttributesInternal(const audio_attributes_t& legacy);
-ConversionResult<audio_encapsulation_mode_t>
-aidl2legacy_AudioEncapsulationMode_audio_encapsulation_mode_t(
- media::audio::common::AudioEncapsulationMode aidl);
-ConversionResult<media::audio::common::AudioEncapsulationMode>
-legacy2aidl_audio_encapsulation_mode_t_AudioEncapsulationMode(audio_encapsulation_mode_t legacy);
-
-ConversionResult<audio_offload_info_t>
-aidl2legacy_AudioOffloadInfo_audio_offload_info_t(
- const media::audio::common::AudioOffloadInfo& aidl);
-ConversionResult<media::audio::common::AudioOffloadInfo>
-legacy2aidl_audio_offload_info_t_AudioOffloadInfo(const audio_offload_info_t& legacy);
-
-ConversionResult<audio_config_t>
-aidl2legacy_AudioConfig_audio_config_t(const media::audio::common::AudioConfig& aidl, bool isInput);
-ConversionResult<media::audio::common::AudioConfig>
-legacy2aidl_audio_config_t_AudioConfig(const audio_config_t& legacy, bool isInput);
-
-ConversionResult<audio_config_base_t>
-aidl2legacy_AudioConfigBase_audio_config_base_t(
- const media::audio::common::AudioConfigBase& aidl, bool isInput);
-ConversionResult<media::audio::common::AudioConfigBase>
-legacy2aidl_audio_config_base_t_AudioConfigBase(const audio_config_base_t& legacy, bool isInput);
-
ConversionResult<sp<IMemory>>
aidl2legacy_SharedFileRegion_IMemory(const media::SharedFileRegion& aidl);
ConversionResult<media::SharedFileRegion>
legacy2aidl_IMemory_SharedFileRegion(const sp<IMemory>& legacy);
-ConversionResult<sp<IMemory>>
-aidl2legacy_NullableSharedFileRegion_IMemory(const std::optional<media::SharedFileRegion>& aidl);
+ConversionResult<sp<::android::IMemory>> aidl2legacy_NullableSharedFileRegion_IMemory(
+ const std::optional<media::SharedFileRegion>& aidl);
ConversionResult<std::optional<media::SharedFileRegion>>
legacy2aidl_NullableIMemory_SharedFileRegion(const sp<IMemory>& legacy);
@@ -319,32 +138,10 @@
ConversionResult<media::AudioTimestampInternal>
legacy2aidl_AudioTimestamp_AudioTimestampInternal(const AudioTimestamp& legacy);
-ConversionResult<audio_uuid_t>
-aidl2legacy_AudioUuid_audio_uuid_t(const media::audio::common::AudioUuid& aidl);
-ConversionResult<media::audio::common::AudioUuid>
-legacy2aidl_audio_uuid_t_AudioUuid(const audio_uuid_t& legacy);
-
ConversionResult<effect_descriptor_t>
aidl2legacy_EffectDescriptor_effect_descriptor_t(const media::EffectDescriptor& aidl);
-ConversionResult<media::EffectDescriptor>
-legacy2aidl_effect_descriptor_t_EffectDescriptor(const effect_descriptor_t& legacy);
-
-ConversionResult<audio_encapsulation_metadata_type_t>
-aidl2legacy_AudioEncapsulationMetadataType_audio_encapsulation_metadata_type_t(
- media::audio::common::AudioEncapsulationMetadataType aidl);
-ConversionResult<media::audio::common::AudioEncapsulationMetadataType>
-legacy2aidl_audio_encapsulation_metadata_type_t_AudioEncapsulationMetadataType(
- audio_encapsulation_metadata_type_t legacy);
-
-ConversionResult<uint32_t>
-aidl2legacy_AudioEncapsulationMode_mask(int32_t aidl);
-ConversionResult<int32_t>
-legacy2aidl_AudioEncapsulationMode_mask(uint32_t legacy);
-
-ConversionResult<uint32_t>
-aidl2legacy_AudioEncapsulationMetadataType_mask(int32_t aidl);
-ConversionResult<int32_t>
-legacy2aidl_AudioEncapsulationMetadataType_mask(uint32_t legacy);
+ConversionResult<media::EffectDescriptor> legacy2aidl_effect_descriptor_t_EffectDescriptor(
+ const effect_descriptor_t& legacy);
ConversionResult<audio_port_device_ext>
aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
@@ -369,27 +166,11 @@
ConversionResult<int32_t>
legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy);
-ConversionResult<audio_profile>
-aidl2legacy_AudioProfile_audio_profile(
- const media::audio::common::AudioProfile& aidl, bool isInput);
-ConversionResult<media::audio::common::AudioProfile>
-legacy2aidl_audio_profile_AudioProfile(const audio_profile& legacy, bool isInput);
-
-ConversionResult<audio_gain>
-aidl2legacy_AudioGain_audio_gain(const media::audio::common::AudioGain& aidl, bool isInput);
-ConversionResult<media::audio::common::AudioGain>
-legacy2aidl_audio_gain_AudioGain(const audio_gain& legacy, bool isInput);
-
ConversionResult<audio_port_v7>
aidl2legacy_AudioPort_audio_port_v7(const media::AudioPort& aidl);
ConversionResult<media::AudioPort>
legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy);
-ConversionResult<audio_mode_t>
-aidl2legacy_AudioMode_audio_mode_t(media::audio::common::AudioMode aidl);
-ConversionResult<media::audio::common::AudioMode>
-legacy2aidl_audio_mode_t_AudioMode(audio_mode_t legacy);
-
ConversionResult<audio_unique_id_use_t>
aidl2legacy_AudioUniqueIdUse_audio_unique_id_use_t(media::AudioUniqueIdUse aidl);
ConversionResult<media::AudioUniqueIdUse>
@@ -400,45 +181,6 @@
ConversionResult<int32_t>
legacy2aidl_volume_group_t_int32_t(volume_group_t legacy);
-ConversionResult<audio_dual_mono_mode_t>
-aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(media::AudioDualMonoMode aidl);
-ConversionResult<media::AudioDualMonoMode>
-legacy2aidl_audio_dual_mono_mode_t_AudioDualMonoMode(audio_dual_mono_mode_t legacy);
-
-ConversionResult<audio_timestretch_fallback_mode_t>
-aidl2legacy_int32_t_audio_timestretch_fallback_mode_t(int32_t aidl);
-ConversionResult<int32_t>
-legacy2aidl_audio_timestretch_fallback_mode_t_int32_t(audio_timestretch_fallback_mode_t legacy);
-
-ConversionResult<audio_timestretch_stretch_mode_t>
-aidl2legacy_int32_t_audio_timestretch_stretch_mode_t(int32_t aidl);
-ConversionResult<int32_t>
-legacy2aidl_audio_timestretch_stretch_mode_t_int32_t(audio_timestretch_stretch_mode_t legacy);
-
-ConversionResult<audio_playback_rate_t>
-aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(const media::AudioPlaybackRate& aidl);
-ConversionResult<media::AudioPlaybackRate>
-legacy2aidl_audio_playback_rate_t_AudioPlaybackRate(const audio_playback_rate_t& legacy);
-
-ConversionResult<audio_standard_t>
-aidl2legacy_AudioStandard_audio_standard_t(media::audio::common::AudioStandard aidl);
-ConversionResult<media::audio::common::AudioStandard>
-legacy2aidl_audio_standard_t_AudioStandard(audio_standard_t legacy);
-
-ConversionResult<audio_extra_audio_descriptor>
-aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor(
- const media::audio::common::ExtraAudioDescriptor& aidl);
-ConversionResult<media::audio::common::ExtraAudioDescriptor>
-legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor(
- const audio_extra_audio_descriptor& legacy);
-
-ConversionResult<audio_encapsulation_type_t>
-aidl2legacy_AudioEncapsulationType_audio_encapsulation_type_t(
- const media::audio::common::AudioEncapsulationType& aidl);
-ConversionResult<media::audio::common::AudioEncapsulationType>
-legacy2aidl_audio_encapsulation_type_t_AudioEncapsulationType(
- const audio_encapsulation_type_t & legacy);
-
using TrackSecondaryOutputInfoPair = std::pair<audio_port_handle_t, std::vector<audio_io_handle_t>>;
ConversionResult<TrackSecondaryOutputInfoPair>
aidl2legacy_TrackSecondaryOutputInfo_TrackSecondaryOutputInfoPair(
@@ -455,9 +197,4 @@
ConversionResult<audio_direct_mode_t> aidl2legacy_int32_t_audio_direct_mode_t_mask(int32_t aidl);
ConversionResult<int32_t> legacy2aidl_audio_direct_mode_t_int32_t_mask(audio_direct_mode_t legacy);
-ConversionResult<audio_latency_mode_t>
-aidl2legacy_LatencyMode_audio_latency_mode_t(media::LatencyMode aidl);
-ConversionResult<media::LatencyMode>
-legacy2aidl_audio_latency_mode_t_LatencyMode(audio_latency_mode_t legacy);
-
} // namespace android
diff --git a/media/libaudioclient/include/media/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h
index d42d962..cab476e 100644
--- a/media/libaudioclient/include/media/AudioPolicy.h
+++ b/media/libaudioclient/include/media/AudioPolicy.h
@@ -139,6 +139,11 @@
== MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER;
}
+static inline bool is_mix_loopback(uint32_t routeFlags) {
+ return (routeFlags & MIX_ROUTE_FLAG_LOOP_BACK)
+ == MIX_ROUTE_FLAG_LOOP_BACK;
+}
+
}; // namespace android
#endif // ANDROID_AUDIO_POLICY_H
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 9411f46..3b7cc39 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -575,6 +575,12 @@
static status_t getSupportedLatencyModes(audio_io_handle_t output,
std::vector<audio_latency_mode_t>* modes);
+ static status_t setBluetoothVariableLatencyEnabled(bool enabled);
+
+ static status_t isBluetoothVariableLatencyEnabled(bool *enabled);
+
+ static status_t supportsBluetoothVariableLatency(bool *support);
+
// A listener for capture state changes.
class CaptureStateListener : public virtual RefBase {
public:
@@ -705,7 +711,8 @@
const media::AudioIoDescriptor& ioDesc) override;
binder::Status onSupportedLatencyModesChanged(
- int output, const std::vector<media::LatencyMode>& latencyModes) override;
+ int output,
+ const std::vector<media::audio::common::AudioLatencyMode>& latencyModes) override;
status_t addAudioDeviceCallback(const wp<AudioDeviceCallback>& callback,
audio_io_handle_t audioIo,
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index c891ae6..36ee96b 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -367,6 +367,11 @@
virtual status_t getSupportedLatencyModes(audio_io_handle_t output,
std::vector<audio_latency_mode_t>* modes) = 0;
+ virtual status_t setBluetoothVariableLatencyEnabled(bool enabled) = 0;
+
+ virtual status_t isBluetoothVariableLatencyEnabled(bool* enabled) = 0;
+
+ virtual status_t supportsBluetoothVariableLatency(bool* support) = 0;
};
/**
@@ -473,6 +478,9 @@
audio_latency_mode_t mode) override;
status_t getSupportedLatencyModes(
audio_io_handle_t output, std::vector<audio_latency_mode_t>* modes) override;
+ status_t setBluetoothVariableLatencyEnabled(bool enabled) override;
+ status_t isBluetoothVariableLatencyEnabled(bool* enabled) override;
+ status_t supportsBluetoothVariableLatency(bool* support) override;
private:
const sp<media::IAudioFlingerService> mDelegate;
@@ -564,6 +572,12 @@
SET_DEVICE_CONNECTED_STATE = media::BnAudioFlingerService::TRANSACTION_setDeviceConnectedState,
SET_REQUESTED_LATENCY_MODE = media::BnAudioFlingerService::TRANSACTION_setRequestedLatencyMode,
GET_SUPPORTED_LATENCY_MODES = media::BnAudioFlingerService::TRANSACTION_getSupportedLatencyModes,
+ SET_BLUETOOTH_VARIABLE_LATENCY_ENABLED =
+ media::BnAudioFlingerService::TRANSACTION_setBluetoothVariableLatencyEnabled,
+ IS_BLUETOOTH_VARIABLE_LATENCY_ENABLED =
+ media::BnAudioFlingerService::TRANSACTION_isBluetoothVariableLatencyEnabled,
+ SUPPORTS_BLUETOOTH_VARIABLE_LATENCY =
+ media::BnAudioFlingerService::TRANSACTION_supportsBluetoothVariableLatency,
};
protected:
@@ -685,9 +699,13 @@
Status getAAudioMixerBurstCount(int32_t* _aidl_return) override;
Status getAAudioHardwareBurstMinUsec(int32_t* _aidl_return) override;
Status setDeviceConnectedState(const media::AudioPort& port, bool connected) override;
- Status setRequestedLatencyMode(int output, media::LatencyMode mode) override;
+ Status setRequestedLatencyMode(
+ int output, media::audio::common::AudioLatencyMode mode) override;
Status getSupportedLatencyModes(int output,
- std::vector<media::LatencyMode>* _aidl_return) override;
+ std::vector<media::audio::common::AudioLatencyMode>* _aidl_return) override;
+ Status setBluetoothVariableLatencyEnabled(bool enabled) override;
+ Status isBluetoothVariableLatencyEnabled(bool* enabled) override;
+ Status supportsBluetoothVariableLatency(bool* support) override;
private:
const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
};
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index 1b09173..dcb6c25 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -36,8 +36,9 @@
"libutils",
],
static_libs: [
- "audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
+ "libaudio_aidl_conversion_common_cpp",
+ "audioclient-types-aidl-cpp",
"libstagefright_foundation",
],
}
@@ -108,6 +109,8 @@
shared_libs: [
"capture_state_listener-aidl-cpp",
"framework-permission-aidl-cpp",
+ "libaudioclient_aidl_conversion",
+ "libaudio_aidl_conversion_common_cpp",
"libbase",
"libbinder",
"libcgrouprc",
@@ -137,7 +140,6 @@
"av-types-aidl-cpp",
"effect-aidl-cpp",
"libaudioclient",
- "libaudioclient_aidl_conversion",
"libaudiofoundation",
"libaudiomanager",
"libaudiopolicy",
@@ -176,6 +178,22 @@
}
cc_test {
+ name: "audioeffect_analysis",
+ defaults: ["libaudioclient_gtests_defaults"],
+ // flag needed for pfft/pffft.hpp
+ cflags: [
+ "-Wno-error=unused-parameter",
+ ],
+ srcs: [
+ "audioeffect_analyser.cpp",
+ "audio_test_utils.cpp",
+ ],
+ static_libs: [
+ "libpffft",
+ ],
+}
+
+cc_test {
name: "audiorouting_tests",
defaults: ["libaudioclient_gtests_defaults"],
srcs: [
diff --git a/media/libaudioclient/tests/audio_test_utils.cpp b/media/libaudioclient/tests/audio_test_utils.cpp
index 850eb34..1e26ff6 100644
--- a/media/libaudioclient/tests/audio_test_utils.cpp
+++ b/media/libaudioclient/tests/audio_test_utils.cpp
@@ -17,11 +17,15 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "AudioTestUtils"
+#include <android-base/file.h>
#include <system/audio_config.h>
#include <utils/Log.h>
#include "audio_test_utils.h"
+#define WAIT_PERIOD_MS 10 // from AudioTrack.cpp
+#define MAX_WAIT_TIME_MS 5000
+
template <class T>
constexpr void (*xmlDeleter)(T* t);
template <>
@@ -37,12 +41,6 @@
return std::unique_ptr<T, decltype(deleter)>{t, deleter};
}
-// Generates a random string.
-void CreateRandomFile(int& fd) {
- std::string filename = "/data/local/tmp/record-XXXXXX";
- fd = mkstemp(filename.data());
-}
-
void OnAudioDeviceUpdateNotifier::onAudioDeviceUpdate(audio_io_handle_t audioIo,
audio_port_handle_t deviceId) {
std::unique_lock<std::mutex> lock{mMutex};
@@ -52,11 +50,14 @@
mCondition.notify_all();
}
-status_t OnAudioDeviceUpdateNotifier::waitForAudioDeviceCb() {
+status_t OnAudioDeviceUpdateNotifier::waitForAudioDeviceCb(audio_port_handle_t expDeviceId) {
std::unique_lock<std::mutex> lock{mMutex};
- if (mAudioIo == AUDIO_IO_HANDLE_NONE) {
+ if (mAudioIo == AUDIO_IO_HANDLE_NONE ||
+ (expDeviceId != AUDIO_PORT_HANDLE_NONE && expDeviceId != mDeviceId)) {
mCondition.wait_for(lock, std::chrono::milliseconds(500));
- if (mAudioIo == AUDIO_IO_HANDLE_NONE) return TIMED_OUT;
+ if (mAudioIo == AUDIO_IO_HANDLE_NONE ||
+ (expDeviceId != AUDIO_PORT_HANDLE_NONE && expDeviceId != mDeviceId))
+ return TIMED_OUT;
}
return OK;
}
@@ -167,15 +168,16 @@
}
status_t AudioPlayback::fillBuffer() {
- if (PLAY_STARTED != mState && PLAY_STOPPED != mState) return INVALID_OPERATION;
- int retry = 25;
+ if (PLAY_STARTED != mState) return INVALID_OPERATION;
+ const int maxTries = MAX_WAIT_TIME_MS / WAIT_PERIOD_MS;
+ int counter = 0;
uint8_t* ipBuffer = static_cast<uint8_t*>(static_cast<void*>(mMemory->unsecurePointer()));
size_t nonContig = 0;
size_t bytesAvailable = mMemCapacity - mBytesUsedSoFar;
while (bytesAvailable > 0) {
AudioTrack::Buffer trackBuffer;
trackBuffer.frameCount = mTrack->frameCount() * 2;
- status_t status = mTrack->obtainBuffer(&trackBuffer, retry, &nonContig);
+ status_t status = mTrack->obtainBuffer(&trackBuffer, 1, &nonContig);
if (OK == status) {
size_t bytesToCopy = std::min(bytesAvailable, trackBuffer.size());
if (bytesToCopy > 0) {
@@ -184,14 +186,11 @@
mTrack->releaseBuffer(&trackBuffer);
mBytesUsedSoFar += bytesToCopy;
bytesAvailable = mMemCapacity - mBytesUsedSoFar;
- if (bytesAvailable == 0) {
- stop();
- }
+ counter = 0;
} else if (WOULD_BLOCK == status) {
- if (mStopPlaying)
- return OK;
- else
- return TIMED_OUT;
+ // if not received a buffer for MAX_WAIT_TIME_MS, something has gone wrong
+ if (counter == maxTries) return TIMED_OUT;
+ counter++;
}
}
return OK;
@@ -199,14 +198,15 @@
status_t AudioPlayback::waitForConsumption(bool testSeek) {
if (PLAY_STARTED != mState) return INVALID_OPERATION;
- // in static buffer mode, lets not play clips with duration > 30 sec
- int retry = 300;
- // Total number of frames in the input file.
+
+ const int maxTries = MAX_WAIT_TIME_MS / WAIT_PERIOD_MS;
+ int counter = 0;
size_t totalFrameCount = mMemCapacity / mTrack->frameSize();
- while (!mStopPlaying && retry > 0) {
- // Get the total numbers of frames played.
+ while (!mStopPlaying && counter < maxTries) {
uint32_t currPosition;
mTrack->getPosition(&currPosition);
+ if (currPosition >= totalFrameCount) counter++;
+
if (testSeek && (currPosition > totalFrameCount * 0.6)) {
testSeek = false;
if (!mTrack->hasStarted()) return BAD_VALUE;
@@ -227,10 +227,9 @@
if (bufferPosition != setPosition) return BAD_VALUE;
mTrack->start();
}
- std::this_thread::sleep_for(std::chrono::milliseconds(100));
- retry--;
+ std::this_thread::sleep_for(std::chrono::milliseconds(WAIT_PERIOD_MS));
}
- if (!mStopPlaying) return TIMED_OUT;
+ if (!mStopPlaying && counter == maxTries) return TIMED_OUT;
return OK;
}
@@ -246,7 +245,7 @@
void AudioPlayback::stop() {
std::unique_lock<std::mutex> lock{mMutex};
mStopPlaying = true;
- if (mState != PLAY_STOPPED) {
+ if (mState != PLAY_STOPPED && mState != PLAY_NO_INIT) {
int32_t msec = 0;
(void)mTrack->pendingDuration(&msec);
mTrack->stopAndJoinCallbacks();
@@ -274,7 +273,7 @@
}
// no more frames to read
- if (mNumFramesReceived > mNumFramesToRecord || mStopRecording) {
+ if (mNumFramesReceived >= mNumFramesToRecord || mStopRecording) {
mStopRecording = true;
return 0;
}
@@ -369,14 +368,16 @@
AudioCapture::AudioCapture(audio_source_t inputSource, uint32_t sampleRate, audio_format_t format,
audio_channel_mask_t channelMask, audio_input_flags_t flags,
- audio_session_t sessionId, AudioRecord::transfer_type transferType)
+ audio_session_t sessionId, AudioRecord::transfer_type transferType,
+ const audio_attributes_t* attributes)
: mInputSource(inputSource),
mSampleRate(sampleRate),
mFormat(format),
mChannelMask(channelMask),
mFlags(flags),
mSessionId(sessionId),
- mTransferType(transferType) {
+ mTransferType(transferType),
+ mAttributes(attributes) {
mFrameCount = 0;
mNotificationFrames = 0;
mNumFramesToRecord = 0;
@@ -389,9 +390,6 @@
mReceivedCbMarkerCount = 0;
mState = REC_NO_INIT;
mStopRecording = false;
-#if RECORD_TO_FILE
- CreateRandomFile(mOutFileFd);
-#endif
}
AudioCapture::~AudioCapture() {
@@ -431,19 +429,20 @@
if (mSampleRate == 48000) { // test all available constructors
mRecord = new AudioRecord(mInputSource, mSampleRate, mFormat, mChannelMask,
attributionSource, mFrameCount, nullptr /* callback */,
- mNotificationFrames, mSessionId, mTransferType, mFlags);
+ mNotificationFrames, mSessionId, mTransferType, mFlags,
+ mAttributes);
} else {
mRecord = new AudioRecord(attributionSource);
status = mRecord->set(mInputSource, mSampleRate, mFormat, mChannelMask, mFrameCount,
nullptr /* callback */, 0 /* notificationFrames */,
false /* canCallJava */, mSessionId, mTransferType, mFlags,
- attributionSource.uid, attributionSource.pid);
+ attributionSource.uid, attributionSource.pid, mAttributes);
}
if (NO_ERROR != status) return status;
} else if (mTransferType == AudioRecord::TRANSFER_CALLBACK) {
mRecord = new AudioRecord(mInputSource, mSampleRate, mFormat, mChannelMask,
attributionSource, mFrameCount, this, mNotificationFrames,
- mSessionId, mTransferType, mFlags);
+ mSessionId, mTransferType, mFlags, mAttributes);
} else {
ALOGE("Test application is not handling transfer type %s",
AudioRecord::convertTransferToText(mTransferType));
@@ -460,6 +459,26 @@
return status;
}
+status_t AudioCapture::setRecordDuration(float durationInSec) {
+ if (REC_READY != mState) {
+ return INVALID_OPERATION;
+ }
+ uint32_t sampleRate = mSampleRate == 0 ? mRecord->getSampleRate() : mSampleRate;
+ mNumFramesToRecord = (sampleRate * durationInSec);
+ return OK;
+}
+
+status_t AudioCapture::enableRecordDump() {
+ if (mOutFileFd != -1) {
+ return INVALID_OPERATION;
+ }
+ TemporaryFile tf("/data/local/tmp");
+ tf.DoNotRemove();
+ mOutFileFd = tf.release();
+ mFileName = std::string{tf.path};
+ return OK;
+}
+
sp<AudioRecord> AudioCapture::getAudioRecordHandle() {
return (REC_NO_INIT == mState) ? nullptr : mRecord;
}
@@ -481,7 +500,7 @@
status_t AudioCapture::stop() {
status_t status = OK;
mStopRecording = true;
- if (mState != REC_STOPPED) {
+ if (mState != REC_STOPPED && mState != REC_NO_INIT) {
if (mInputSource != AUDIO_SOURCE_DEFAULT) {
bool state = false;
status = AudioSystem::isSourceActive(mInputSource, &state);
@@ -495,81 +514,66 @@
}
status_t AudioCapture::obtainBuffer(RawBuffer& buffer) {
- if (REC_STARTED != mState && REC_STOPPED != mState) return INVALID_OPERATION;
- int retry = 25;
- AudioRecord::Buffer recordBuffer;
- recordBuffer.frameCount = mNotificationFrames;
+ if (REC_STARTED != mState) return INVALID_OPERATION;
+ const int maxTries = MAX_WAIT_TIME_MS / WAIT_PERIOD_MS;
+ int counter = 0;
size_t nonContig = 0;
- status_t status = mRecord->obtainBuffer(&recordBuffer, retry, &nonContig);
- if (OK == status) {
- const int64_t timestampUs =
- ((1000000LL * mNumFramesReceived) + (mRecord->getSampleRate() >> 1)) /
- mRecord->getSampleRate();
- RawBuffer buff{-1, timestampUs, static_cast<int32_t>(recordBuffer.size())};
- memcpy(buff.mData.get(), recordBuffer.data(), recordBuffer.size());
- buffer = std::move(buff);
- mNumFramesReceived += recordBuffer.size() / mRecord->frameSize();
- mRecord->releaseBuffer(&recordBuffer);
- if (mNumFramesReceived > mNumFramesToRecord) {
- stop();
+ while (mNumFramesReceived < mNumFramesToRecord) {
+ AudioRecord::Buffer recordBuffer;
+ recordBuffer.frameCount = mNotificationFrames;
+ status_t status = mRecord->obtainBuffer(&recordBuffer, 1, &nonContig);
+ if (OK == status) {
+ const int64_t timestampUs =
+ ((1000000LL * mNumFramesReceived) + (mRecord->getSampleRate() >> 1)) /
+ mRecord->getSampleRate();
+ RawBuffer buff{-1, timestampUs, static_cast<int32_t>(recordBuffer.size())};
+ memcpy(buff.mData.get(), recordBuffer.data(), recordBuffer.size());
+ buffer = std::move(buff);
+ mNumFramesReceived += recordBuffer.size() / mRecord->frameSize();
+ mRecord->releaseBuffer(&recordBuffer);
+ counter = 0;
+ } else if (WOULD_BLOCK == status) {
+ // if not received a buffer for MAX_WAIT_TIME_MS, something has gone wrong
+ if (counter == maxTries) return TIMED_OUT;
+ counter++;
}
- } else if (status == WOULD_BLOCK) {
- if (mStopRecording)
- return WOULD_BLOCK;
- else
- return TIMED_OUT;
}
return OK;
}
status_t AudioCapture::obtainBufferCb(RawBuffer& buffer) {
if (REC_STARTED != mState) return INVALID_OPERATION;
- int retry = 10;
+ const int maxTries = MAX_WAIT_TIME_MS / WAIT_PERIOD_MS;
+ int counter = 0;
std::unique_lock<std::mutex> lock{mMutex};
- while (mBuffersReceived.empty() && !mStopRecording && retry > 0) {
- mCondition.wait_for(lock, std::chrono::milliseconds(100));
- retry--;
+ while (mBuffersReceived.empty() && !mStopRecording && counter < maxTries) {
+ mCondition.wait_for(lock, std::chrono::milliseconds(WAIT_PERIOD_MS));
+ counter++;
}
if (!mBuffersReceived.empty()) {
auto it = mBuffersReceived.begin();
buffer = std::move(*it);
mBuffersReceived.erase(it);
} else {
- if (retry == 0) return TIMED_OUT;
- if (mStopRecording)
- return WOULD_BLOCK;
- else
- return UNKNOWN_ERROR;
+ if (!mStopRecording && counter == maxTries) return TIMED_OUT;
}
return OK;
}
status_t AudioCapture::audioProcess() {
RawBuffer buffer;
- while (true) {
- status_t status;
+ status_t status = OK;
+ while (mNumFramesReceived < mNumFramesToRecord && status == OK) {
if (mTransferType == AudioRecord::TRANSFER_CALLBACK)
status = obtainBufferCb(buffer);
else
status = obtainBuffer(buffer);
- switch (status) {
- case OK:
- if (mOutFileFd > 0) {
- const char* ptr =
- static_cast<const char*>(static_cast<void*>(buffer.mData.get()));
- write(mOutFileFd, ptr, buffer.mCapacity);
- }
- break;
- case WOULD_BLOCK:
- return OK;
- case TIMED_OUT: // "recorder application timed out from receiving buffers"
- case NO_INIT: // "recorder not initialized"
- case INVALID_OPERATION: // "recorder not started"
- case UNKNOWN_ERROR: // "Unknown error"
- default:
- return status;
+ if (OK == status && mOutFileFd > 0) {
+ const char* ptr = static_cast<const char*>(static_cast<void*>(buffer.mData.get()));
+ write(mOutFileFd, ptr, buffer.mCapacity);
}
}
+ return OK;
}
status_t listAudioPorts(std::vector<audio_port_v7>& portsVec) {
@@ -613,13 +617,15 @@
}
status_t getPortByAttributes(audio_port_role_t role, audio_port_type_t type,
- audio_devices_t deviceType, audio_port_v7& port) {
+ audio_devices_t deviceType, const std::string& address,
+ audio_port_v7& port) {
std::vector<struct audio_port_v7> ports;
status_t status = listAudioPorts(ports);
if (status != OK) return status;
for (auto i = 0; i < ports.size(); i++) {
if (ports[i].role == role && ports[i].type == type &&
- ports[i].ext.device.type == deviceType) {
+ ports[i].ext.device.type == deviceType &&
+ !strncmp(ports[i].ext.device.address, address.c_str(), AUDIO_DEVICE_MAX_ADDRESS_LEN)) {
port = ports[i];
return OK;
}
diff --git a/media/libaudioclient/tests/audio_test_utils.h b/media/libaudioclient/tests/audio_test_utils.h
index f35b65d..90c30c2 100644
--- a/media/libaudioclient/tests/audio_test_utils.h
+++ b/media/libaudioclient/tests/audio_test_utils.h
@@ -34,8 +34,6 @@
#include <media/AudioRecord.h>
#include <media/AudioTrack.h>
-#define RECORD_TO_FILE 0
-
using namespace android;
struct MixPort {
@@ -53,11 +51,11 @@
status_t parse_audio_policy_configuration_xml(std::vector<std::string>& attachedDevices,
std::vector<MixPort>& mixPorts,
std::vector<Route>& routes);
-void CreateRandomFile(int& fd);
status_t listAudioPorts(std::vector<audio_port_v7>& portsVec);
status_t listAudioPatches(std::vector<struct audio_patch>& patchesVec);
status_t getPortByAttributes(audio_port_role_t role, audio_port_type_t type,
- audio_devices_t deviceType, audio_port_v7& port);
+ audio_devices_t deviceType, const std::string& address,
+ audio_port_v7& port);
status_t getPatchForOutputMix(audio_io_handle_t audioIo, audio_patch& patch);
status_t getPatchForInputMix(audio_io_handle_t audioIo, audio_patch& patch);
bool patchContainsOutputDevice(audio_port_handle_t deviceId, audio_patch patch);
@@ -76,7 +74,7 @@
std::condition_variable mCondition;
void onAudioDeviceUpdate(audio_io_handle_t audioIo, audio_port_handle_t deviceId);
- status_t waitForAudioDeviceCb();
+ status_t waitForAudioDeviceCb(audio_port_handle_t expDeviceId = AUDIO_PORT_HANDLE_NONE);
};
// Simple AudioPlayback class.
@@ -148,7 +146,8 @@
audio_channel_mask_t channelMask,
audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
- AudioRecord::transfer_type transferType = AudioRecord::TRANSFER_CALLBACK);
+ AudioRecord::transfer_type transferType = AudioRecord::TRANSFER_CALLBACK,
+ const audio_attributes_t* attributes = nullptr);
~AudioCapture();
size_t onMoreData(const AudioRecord::Buffer& buffer) override;
void onOverrun() override;
@@ -156,6 +155,9 @@
void onNewPos(uint32_t newPos) override;
void onNewIAudioRecord() override;
status_t create();
+ status_t setRecordDuration(float durationInSec);
+ status_t enableRecordDump();
+ std::string getRecordDumpFileName() const { return mFileName; }
sp<AudioRecord> getAudioRecordHandle();
status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
audio_session_t triggerSession = AUDIO_SESSION_NONE);
@@ -190,11 +192,13 @@
const audio_input_flags_t mFlags;
const audio_session_t mSessionId;
const AudioRecord::transfer_type mTransferType;
+ const audio_attributes_t* mAttributes;
size_t mMaxBytesPerCallback = 2048;
sp<AudioRecord> mRecord;
State mState;
bool mStopRecording;
+ std::string mFileName;
int mOutFileFd = -1;
std::mutex mMutex;
diff --git a/media/libaudioclient/tests/audioeffect_analyser.cpp b/media/libaudioclient/tests/audioeffect_analyser.cpp
new file mode 100644
index 0000000..94accae
--- /dev/null
+++ b/media/libaudioclient/tests/audioeffect_analyser.cpp
@@ -0,0 +1,419 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "AudioEffectAnalyser"
+
+#include <android-base/file.h>
+#include <android-base/stringprintf.h>
+#include <gtest/gtest.h>
+#include <media/AudioEffect.h>
+#include <system/audio_effects/effect_bassboost.h>
+#include <system/audio_effects/effect_equalizer.h>
+#include <fstream>
+#include <iostream>
+#include <string>
+#include <tuple>
+#include <vector>
+
+#include "audio_test_utils.h"
+#include "pffft.hpp"
+
+#define CHECK_OK(expr, msg) \
+ mStatus = (expr); \
+ if (OK != mStatus) { \
+ mMsg = (msg); \
+ return; \
+ }
+
+using namespace android;
+
+constexpr float kDefAmplitude = 0.60f;
+
+constexpr float kPlayBackDurationSec = 1.5;
+constexpr float kCaptureDurationSec = 1.0;
+constexpr float kPrimeDurationInSec = 0.5;
+
+// chosen to safely sample largest center freq of eq bands
+constexpr uint32_t kSamplingFrequency = 48000;
+
+// allows no fmt conversion before fft
+constexpr audio_format_t kFormat = AUDIO_FORMAT_PCM_FLOAT;
+
+// playback and capture are done with channel mask configured to mono.
+// effect analysis should not depend on mask, mono makes it easier.
+
+constexpr int kNPointFFT = 16384;
+constexpr float kBinWidth = (float)kSamplingFrequency / kNPointFFT;
+
+const char* gPackageName = "AudioEffectAnalyser";
+
+static_assert(kPrimeDurationInSec + 2 * kNPointFFT / kSamplingFrequency < kCaptureDurationSec,
+ "capture at least, prime, pad, nPointFft size of samples");
+static_assert(kPrimeDurationInSec + 2 * kNPointFFT / kSamplingFrequency < kPlayBackDurationSec,
+ "playback needs to be active during capture");
+
+struct CaptureEnv {
+ // input args
+ uint32_t mSampleRate{kSamplingFrequency};
+ audio_format_t mFormat{kFormat};
+ audio_channel_mask_t mChannelMask{AUDIO_CHANNEL_IN_MONO};
+ float mCaptureDuration{kCaptureDurationSec};
+ // output val
+ status_t mStatus{OK};
+ std::string mMsg;
+ std::string mDumpFileName;
+
+ ~CaptureEnv();
+ void capture();
+};
+
+CaptureEnv::~CaptureEnv() {
+ if (!mDumpFileName.empty()) {
+ std::ifstream f(mDumpFileName);
+ if (f.good()) {
+ f.close();
+ remove(mDumpFileName.c_str());
+ }
+ }
+}
+
+void CaptureEnv::capture() {
+ audio_port_v7 port;
+ CHECK_OK(getPortByAttributes(AUDIO_PORT_ROLE_SOURCE, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, "0", port),
+ "Could not find port")
+ const auto capture =
+ sp<AudioCapture>::make(AUDIO_SOURCE_REMOTE_SUBMIX, mSampleRate, mFormat, mChannelMask);
+ CHECK_OK(capture->create(), "record creation failed")
+ CHECK_OK(capture->setRecordDuration(mCaptureDuration), "set record duration failed")
+ CHECK_OK(capture->enableRecordDump(), "enable record dump failed")
+ auto cbCapture = sp<OnAudioDeviceUpdateNotifier>::make();
+ CHECK_OK(capture->getAudioRecordHandle()->addAudioDeviceCallback(cbCapture),
+ "addAudioDeviceCallback failed")
+ CHECK_OK(capture->start(), "start recording failed")
+ CHECK_OK(capture->audioProcess(), "recording process failed")
+ CHECK_OK(cbCapture->waitForAudioDeviceCb(), "audio device callback notification timed out");
+ if (port.id != capture->getAudioRecordHandle()->getRoutedDeviceId()) {
+ CHECK_OK(BAD_VALUE, "Capture NOT routed on expected port")
+ }
+ CHECK_OK(getPortByAttributes(AUDIO_PORT_ROLE_SINK, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "0", port),
+ "Could not find port")
+ CHECK_OK(capture->stop(), "record stop failed")
+ mDumpFileName = capture->getRecordDumpFileName();
+}
+
+struct PlaybackEnv {
+ // input args
+ uint32_t mSampleRate{kSamplingFrequency};
+ audio_format_t mFormat{kFormat};
+ audio_channel_mask_t mChannelMask{AUDIO_CHANNEL_OUT_MONO};
+ audio_session_t mSessionId{AUDIO_SESSION_NONE};
+ std::string mRes;
+ // output val
+ status_t mStatus{OK};
+ std::string mMsg;
+
+ void play();
+};
+
+void PlaybackEnv::play() {
+ const auto ap =
+ sp<AudioPlayback>::make(mSampleRate, mFormat, mChannelMask, AUDIO_OUTPUT_FLAG_NONE,
+ mSessionId, AudioTrack::TRANSFER_OBTAIN);
+ CHECK_OK(ap->loadResource(mRes.c_str()), "Unable to open Resource")
+ const auto cbPlayback = sp<OnAudioDeviceUpdateNotifier>::make();
+ CHECK_OK(ap->create(), "track creation failed")
+ ap->getAudioTrackHandle()->setVolume(1.0f);
+ CHECK_OK(ap->getAudioTrackHandle()->addAudioDeviceCallback(cbPlayback),
+ "addAudioDeviceCallback failed")
+ CHECK_OK(ap->start(), "audio track start failed")
+ CHECK_OK(cbPlayback->waitForAudioDeviceCb(), "audio device callback notification timed out")
+ CHECK_OK(ap->onProcess(), "playback process failed")
+ ap->stop();
+}
+
+void generateMultiTone(const std::vector<int>& toneFrequencies, float samplingFrequency,
+ float duration, float amplitude, float* buffer, int numSamples) {
+ int totalFrameCount = (samplingFrequency * duration);
+ int limit = std::min(totalFrameCount, numSamples);
+
+ for (auto i = 0; i < limit; i++) {
+ buffer[i] = 0;
+ for (auto j = 0; j < toneFrequencies.size(); j++) {
+ buffer[i] += sin(2 * M_PI * toneFrequencies[j] * i / samplingFrequency);
+ }
+ buffer[i] *= (amplitude / toneFrequencies.size());
+ }
+}
+
+sp<AudioEffect> createEffect(const effect_uuid_t* type,
+ audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX) {
+ std::string packageName{gPackageName};
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = packageName;
+ attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+ attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
+ attributionSource.token = sp<BBinder>::make();
+ sp<AudioEffect> effect = sp<AudioEffect>::make(attributionSource);
+ effect->set(type, nullptr, 0, nullptr, sessionId, AUDIO_IO_HANDLE_NONE, {}, false, false);
+ return effect;
+}
+
+void computeFilterGainsAtTones(float captureDuration, int nPointFft, std::vector<int>& binOffsets,
+ float* inputMag, float* gaindB, const char* res,
+ audio_session_t sessionId) {
+ int totalFrameCount = captureDuration * kSamplingFrequency;
+ auto output = pffft::AlignedVector<float>(totalFrameCount);
+ auto fftOutput = pffft::AlignedVector<float>(nPointFft);
+ PlaybackEnv argsP;
+ argsP.mRes = std::string{res};
+ argsP.mSessionId = sessionId;
+ CaptureEnv argsR;
+ argsR.mCaptureDuration = captureDuration;
+ std::thread playbackThread(&PlaybackEnv::play, &argsP);
+ std::thread captureThread(&CaptureEnv::capture, &argsR);
+ captureThread.join();
+ playbackThread.join();
+ ASSERT_EQ(OK, argsR.mStatus) << argsR.mMsg;
+ ASSERT_EQ(OK, argsP.mStatus) << argsP.mMsg;
+ ASSERT_FALSE(argsR.mDumpFileName.empty()) << "recorded not written to file";
+ std::ifstream fin(argsR.mDumpFileName, std::ios::in | std::ios::binary);
+ fin.read((char*)output.data(), totalFrameCount * sizeof(output[0]));
+ fin.close();
+ PFFFT_Setup* handle = pffft_new_setup(nPointFft, PFFFT_REAL);
+ // ignore first few samples. This is to not analyse until audio track is re-routed to remote
+ // submix source, also for the effect filter response to reach steady-state (priming / pruning
+ // samples).
+ int rerouteOffset = kPrimeDurationInSec * kSamplingFrequency;
+ pffft_transform_ordered(handle, output.data() + rerouteOffset, fftOutput.data(), nullptr,
+ PFFFT_FORWARD);
+ pffft_destroy_setup(handle);
+ for (auto i = 0; i < binOffsets.size(); i++) {
+ auto k = binOffsets[i];
+ auto outputMag = sqrt((fftOutput[k * 2] * fftOutput[k * 2]) +
+ (fftOutput[k * 2 + 1] * fftOutput[k * 2 + 1]));
+ gaindB[i] = 20 * log10(outputMag / inputMag[i]);
+ }
+}
+
+std::tuple<int, int> roundToFreqCenteredToFftBin(float binWidth, float freq) {
+ int bin_index = std::round(freq / binWidth);
+ int cfreq = std::round(bin_index * binWidth);
+ return std::make_tuple(bin_index, cfreq);
+}
+
+TEST(AudioEffectTest, CheckEqualizerEffect) {
+ audio_session_t sessionId =
+ (audio_session_t)AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ sp<AudioEffect> equalizer = createEffect(SL_IID_EQUALIZER, sessionId);
+ ASSERT_EQ(OK, equalizer->initCheck());
+ ASSERT_EQ(NO_ERROR, equalizer->setEnabled(true));
+ if ((equalizer->descriptor().flags & EFFECT_FLAG_HW_ACC_MASK) != 0) {
+ GTEST_SKIP() << "effect processed output inaccessible, skipping test";
+ }
+#define MAX_PARAMS 64
+ uint32_t buf32[sizeof(effect_param_t) / sizeof(uint32_t) + MAX_PARAMS];
+ effect_param_t* eqParam = (effect_param_t*)(&buf32);
+
+ // get num of presets
+ eqParam->psize = sizeof(uint32_t);
+ eqParam->vsize = sizeof(uint16_t);
+ *(int32_t*)eqParam->data = EQ_PARAM_GET_NUM_OF_PRESETS;
+ EXPECT_EQ(0, equalizer->getParameter(eqParam));
+ EXPECT_EQ(0, eqParam->status);
+ int numPresets = *((uint16_t*)((int32_t*)eqParam->data + 1));
+
+ // get num of bands
+ eqParam->psize = sizeof(uint32_t);
+ eqParam->vsize = sizeof(uint16_t);
+ *(int32_t*)eqParam->data = EQ_PARAM_NUM_BANDS;
+ EXPECT_EQ(0, equalizer->getParameter(eqParam));
+ EXPECT_EQ(0, eqParam->status);
+ int numBands = *((uint16_t*)((int32_t*)eqParam->data + 1));
+
+ const int totalFrameCount = kSamplingFrequency * kPlayBackDurationSec;
+
+ // get band center frequencies
+ std::vector<int> centerFrequencies;
+ std::vector<int> binOffsets;
+ for (auto i = 0; i < numBands; i++) {
+ eqParam->psize = sizeof(uint32_t) * 2;
+ eqParam->vsize = sizeof(uint32_t);
+ *(int32_t*)eqParam->data = EQ_PARAM_CENTER_FREQ;
+ *((uint16_t*)((int32_t*)eqParam->data + 1)) = i;
+ EXPECT_EQ(0, equalizer->getParameter(eqParam));
+ EXPECT_EQ(0, eqParam->status);
+ float cfreq = *((int32_t*)eqParam->data + 2) / 1000; // milli hz
+ // pick frequency close to bin center frequency
+ auto [bin_index, bin_freq] = roundToFreqCenteredToFftBin(kBinWidth, cfreq);
+ centerFrequencies.push_back(bin_freq);
+ binOffsets.push_back(bin_index);
+ }
+
+ // input for effect module
+ auto input = pffft::AlignedVector<float>(totalFrameCount);
+ generateMultiTone(centerFrequencies, kSamplingFrequency, kPlayBackDurationSec, kDefAmplitude,
+ input.data(), totalFrameCount);
+ auto fftInput = pffft::AlignedVector<float>(kNPointFFT);
+ PFFFT_Setup* handle = pffft_new_setup(kNPointFFT, PFFFT_REAL);
+ pffft_transform_ordered(handle, input.data(), fftInput.data(), nullptr, PFFFT_FORWARD);
+ pffft_destroy_setup(handle);
+ float inputMag[numBands];
+ for (auto i = 0; i < numBands; i++) {
+ auto k = binOffsets[i];
+ inputMag[i] = sqrt((fftInput[k * 2] * fftInput[k * 2]) +
+ (fftInput[k * 2 + 1] * fftInput[k * 2 + 1]));
+ }
+ TemporaryFile tf("/data/local/tmp");
+ close(tf.release());
+ std::ofstream fout(tf.path, std::ios::out | std::ios::binary);
+ fout.write((char*)input.data(), input.size() * sizeof(input[0]));
+ fout.close();
+
+ float expGaindB[numBands], actGaindB[numBands];
+
+ std::string msg = "";
+ int numPresetsOk = 0;
+ for (auto preset = 0; preset < numPresets; preset++) {
+ // set preset
+ eqParam->psize = sizeof(uint32_t);
+ eqParam->vsize = sizeof(uint32_t);
+ *(int32_t*)eqParam->data = EQ_PARAM_CUR_PRESET;
+ *((uint16_t*)((int32_t*)eqParam->data + 1)) = preset;
+ EXPECT_EQ(0, equalizer->setParameter(eqParam));
+ EXPECT_EQ(0, eqParam->status);
+ // get preset gains
+ eqParam->psize = sizeof(uint32_t);
+ eqParam->vsize = (numBands + 1) * sizeof(uint32_t);
+ *(int32_t*)eqParam->data = EQ_PARAM_PROPERTIES;
+ EXPECT_EQ(0, equalizer->getParameter(eqParam));
+ EXPECT_EQ(0, eqParam->status);
+ t_equalizer_settings* settings =
+ reinterpret_cast<t_equalizer_settings*>((int32_t*)eqParam->data + 1);
+ EXPECT_EQ(preset, settings->curPreset);
+ EXPECT_EQ(numBands, settings->numBands);
+ for (auto i = 0; i < numBands; i++) {
+ expGaindB[i] = ((int16_t)settings->bandLevels[i]) / 100.0f; // gain in milli bels
+ }
+ memset(actGaindB, 0, sizeof(actGaindB));
+ ASSERT_NO_FATAL_FAILURE(computeFilterGainsAtTones(kCaptureDurationSec, kNPointFFT,
+ binOffsets, inputMag, actGaindB, tf.path,
+ sessionId));
+ bool isOk = true;
+ for (auto i = 0; i < numBands - 1; i++) {
+ auto diffA = expGaindB[i] - expGaindB[i + 1];
+ auto diffB = actGaindB[i] - actGaindB[i + 1];
+ if (diffA == 0 && fabs(diffA - diffB) > 1.0f) {
+ msg += (android::base::StringPrintf(
+ "For eq preset : %d, between bands %d and %d, expected relative gain is : "
+ "%f, got relative gain is : %f, error : %f \n",
+ preset, i, i + 1, diffA, diffB, diffA - diffB));
+ isOk = false;
+ } else if (diffA * diffB < 0) {
+ msg += (android::base::StringPrintf(
+ "For eq preset : %d, between bands %d and %d, expected relative gain and "
+ "seen relative gain are of opposite signs \n. Expected relative gain is : "
+ "%f, seen relative gain is : %f \n",
+ preset, i, i + 1, diffA, diffB));
+ isOk = false;
+ }
+ }
+ if (isOk) numPresetsOk++;
+ }
+ EXPECT_EQ(numPresetsOk, numPresets) << msg;
+}
+
+TEST(AudioEffectTest, CheckBassBoostEffect) {
+ audio_session_t sessionId =
+ (audio_session_t)AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ sp<AudioEffect> bassboost = createEffect(SL_IID_BASSBOOST, sessionId);
+ ASSERT_EQ(OK, bassboost->initCheck());
+ ASSERT_EQ(NO_ERROR, bassboost->setEnabled(true));
+ if ((bassboost->descriptor().flags & EFFECT_FLAG_HW_ACC_MASK) != 0) {
+ GTEST_SKIP() << "effect processed output inaccessible, skipping test";
+ }
+ int32_t buf32[sizeof(effect_param_t) / sizeof(int32_t) + MAX_PARAMS];
+ effect_param_t* bbParam = (effect_param_t*)(&buf32);
+
+ bbParam->psize = sizeof(int32_t);
+ bbParam->vsize = sizeof(int32_t);
+ *(int32_t*)bbParam->data = BASSBOOST_PARAM_STRENGTH_SUPPORTED;
+ EXPECT_EQ(0, bassboost->getParameter(bbParam));
+ EXPECT_EQ(0, bbParam->status);
+ bool strengthSupported = *((int32_t*)bbParam->data + 1);
+
+ const int totalFrameCount = kSamplingFrequency * kPlayBackDurationSec;
+
+ // selecting bass frequency, speech tone (for relative gain)
+ std::vector<int> testFrequencies{100, 1200};
+ std::vector<int> binOffsets;
+ for (auto i = 0; i < testFrequencies.size(); i++) {
+ // pick frequency close to bin center frequency
+ auto [bin_index, bin_freq] = roundToFreqCenteredToFftBin(kBinWidth, testFrequencies[i]);
+ testFrequencies[i] = bin_freq;
+ binOffsets.push_back(bin_index);
+ }
+
+ // input for effect module
+ auto input = pffft::AlignedVector<float>(totalFrameCount);
+ generateMultiTone(testFrequencies, kSamplingFrequency, kPlayBackDurationSec, kDefAmplitude,
+ input.data(), totalFrameCount);
+ auto fftInput = pffft::AlignedVector<float>(kNPointFFT);
+ PFFFT_Setup* handle = pffft_new_setup(kNPointFFT, PFFFT_REAL);
+ pffft_transform_ordered(handle, input.data(), fftInput.data(), nullptr, PFFFT_FORWARD);
+ pffft_destroy_setup(handle);
+ float inputMag[testFrequencies.size()];
+ for (auto i = 0; i < testFrequencies.size(); i++) {
+ auto k = binOffsets[i];
+ inputMag[i] = sqrt((fftInput[k * 2] * fftInput[k * 2]) +
+ (fftInput[k * 2 + 1] * fftInput[k * 2 + 1]));
+ }
+ TemporaryFile tf("/data/local/tmp");
+ close(tf.release());
+ std::ofstream fout(tf.path, std::ios::out | std::ios::binary);
+ fout.write((char*)input.data(), input.size() * sizeof(input[0]));
+ fout.close();
+
+ float gainWithOutFilter[testFrequencies.size()];
+ memset(gainWithOutFilter, 0, sizeof(gainWithOutFilter));
+ ASSERT_NO_FATAL_FAILURE(computeFilterGainsAtTones(kCaptureDurationSec, kNPointFFT, binOffsets,
+ inputMag, gainWithOutFilter, tf.path,
+ AUDIO_SESSION_OUTPUT_MIX));
+ float diffA = gainWithOutFilter[0] - gainWithOutFilter[1];
+ float prevGain = -100.f;
+ for (auto strength = 150; strength < 1000; strength += strengthSupported ? 150 : 1000) {
+ // configure filter strength
+ if (strengthSupported) {
+ bbParam->psize = sizeof(int32_t);
+ bbParam->vsize = sizeof(int16_t);
+ *(int32_t*)bbParam->data = BASSBOOST_PARAM_STRENGTH;
+ *((int16_t*)((int32_t*)bbParam->data + 1)) = strength;
+ EXPECT_EQ(0, bassboost->setParameter(bbParam));
+ EXPECT_EQ(0, bbParam->status);
+ }
+ float gainWithFilter[testFrequencies.size()];
+ memset(gainWithFilter, 0, sizeof(gainWithFilter));
+ ASSERT_NO_FATAL_FAILURE(computeFilterGainsAtTones(kCaptureDurationSec, kNPointFFT,
+ binOffsets, inputMag, gainWithFilter,
+ tf.path, sessionId));
+ float diffB = gainWithFilter[0] - gainWithFilter[1];
+ EXPECT_GT(diffB, diffA) << "bassboost effect not seen";
+ EXPECT_GE(diffB, prevGain) << "increase in boost strength causing fall in gain";
+ prevGain = diffB;
+ }
+}
diff --git a/media/libaudioclient/tests/audiorouting_tests.cpp b/media/libaudioclient/tests/audiorouting_tests.cpp
index 4bd81c8..2c5fcd7 100644
--- a/media/libaudioclient/tests/audiorouting_tests.cpp
+++ b/media/libaudioclient/tests/audiorouting_tests.cpp
@@ -46,10 +46,10 @@
attributes.usage = AUDIO_USAGE_MEDIA;
attributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
attributes.flags = flags[i];
- sp<AudioPlayback> ap = sp<AudioPlayback>::make(
- 0 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
- AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE, AudioTrack::TRANSFER_OBTAIN,
- &attributes);
+ sp<AudioPlayback> ap = sp<AudioPlayback>::make(0 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE,
+ AudioTrack::TRANSFER_OBTAIN, &attributes);
ASSERT_NE(nullptr, ap);
ASSERT_EQ(OK, ap->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"))
<< "Unable to open Resource";
@@ -77,46 +77,186 @@
}
}
-TEST(AudioTrackTest, TestRemoteSubmix) {
- std::vector<std::string> attachedDevices;
- std::vector<MixPort> mixPorts;
- std::vector<Route> routes;
- EXPECT_EQ(OK, parse_audio_policy_configuration_xml(attachedDevices, mixPorts, routes));
- bool hasFlag = false;
- for (int j = 0; j < attachedDevices.size() && !hasFlag; j++) {
- if (attachedDevices[j].find("Remote Submix") != -1) hasFlag = true;
+TEST(AudioTrackTest, DefaultRoutingTest) {
+ audio_port_v7 port;
+ if (OK != getPortByAttributes(AUDIO_PORT_ROLE_SOURCE, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, "0", port)) {
+ GTEST_SKIP() << "remote submix in device not connected";
}
- if (!hasFlag) GTEST_SKIP() << " Device does not have Remote Submix port.";
- sp<AudioCapture> capture = new AudioCapture(AUDIO_SOURCE_REMOTE_SUBMIX, 48000,
- AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_STEREO);
- ASSERT_NE(nullptr, capture);
- ASSERT_EQ(OK, capture->create()) << "record creation failed";
+ // create record instance
+ sp<AudioCapture> capture = sp<AudioCapture>::make(
+ AUDIO_SOURCE_REMOTE_SUBMIX, 48000, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_STEREO);
+ ASSERT_NE(nullptr, capture);
+ EXPECT_EQ(OK, capture->create()) << "record creation failed";
+ sp<OnAudioDeviceUpdateNotifier> cbCapture = sp<OnAudioDeviceUpdateNotifier>::make();
+ EXPECT_EQ(OK, capture->getAudioRecordHandle()->addAudioDeviceCallback(cbCapture));
+
+ // create playback instance
sp<AudioPlayback> playback = sp<AudioPlayback>::make(
48000 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE);
ASSERT_NE(nullptr, playback);
ASSERT_EQ(OK, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"))
<< "Unable to open Resource";
- ASSERT_EQ(OK, playback->create()) << "track creation failed";
+ EXPECT_EQ(OK, playback->create()) << "track creation failed";
+ sp<OnAudioDeviceUpdateNotifier> cbPlayback = sp<OnAudioDeviceUpdateNotifier>::make();
+ EXPECT_EQ(OK, playback->getAudioTrackHandle()->addAudioDeviceCallback(cbPlayback));
- audio_port_v7 port;
- status_t status = getPortByAttributes(AUDIO_PORT_ROLE_SOURCE, AUDIO_PORT_TYPE_DEVICE,
- AUDIO_DEVICE_IN_REMOTE_SUBMIX, port);
- EXPECT_EQ(OK, status) << "Could not find port";
-
+ // capture should be routed to submix in port
EXPECT_EQ(OK, capture->start()) << "start recording failed";
+ EXPECT_EQ(OK, cbCapture->waitForAudioDeviceCb());
EXPECT_EQ(port.id, capture->getAudioRecordHandle()->getRoutedDeviceId())
<< "Capture NOT routed on expected port";
- status = getPortByAttributes(AUDIO_PORT_ROLE_SINK, AUDIO_PORT_TYPE_DEVICE,
- AUDIO_DEVICE_OUT_REMOTE_SUBMIX, port);
+ // capture start should create submix out port
+ status_t status = getPortByAttributes(AUDIO_PORT_ROLE_SINK, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "0", port);
EXPECT_EQ(OK, status) << "Could not find port";
+ // playback should be routed to submix out as long as capture is active
EXPECT_EQ(OK, playback->start()) << "audio track start failed";
- EXPECT_EQ(OK, playback->onProcess());
- ASSERT_EQ(port.id, playback->getAudioTrackHandle()->getRoutedDeviceId())
+ EXPECT_EQ(OK, cbPlayback->waitForAudioDeviceCb());
+ EXPECT_EQ(port.id, playback->getAudioTrackHandle()->getRoutedDeviceId())
<< "Playback NOT routed on expected port";
+
capture->stop();
playback->stop();
}
+
+class AudioRoutingTest : public ::testing::Test {
+ public:
+ void SetUp() override {
+ audio_port_v7 port;
+ if (OK != getPortByAttributes(AUDIO_PORT_ROLE_SOURCE, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, "0", port)) {
+ GTEST_SKIP() << "remote submix in device not connected";
+ }
+ uint32_t mixType = MIX_TYPE_PLAYERS;
+ uint32_t mixFlag = MIX_ROUTE_FLAG_LOOP_BACK;
+ audio_devices_t deviceType = AUDIO_DEVICE_OUT_REMOTE_SUBMIX;
+ AudioMixMatchCriterion criterion(AUDIO_USAGE_MEDIA, AUDIO_SOURCE_DEFAULT,
+ RULE_MATCH_ATTRIBUTE_USAGE);
+ std::vector<AudioMixMatchCriterion> criteria{criterion};
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ config.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ config.format = AUDIO_FORMAT_PCM_16_BIT;
+ config.sample_rate = 48000;
+ AudioMix mix(criteria, mixType, config, mixFlag, String8{mAddress.c_str()}, 0);
+ mix.mDeviceType = deviceType;
+ mMixes.push(mix);
+ if (OK == AudioSystem::registerPolicyMixes(mMixes, true)) {
+ mPolicyMixRegistered = true;
+ }
+ ASSERT_TRUE(mPolicyMixRegistered) << "register policy mix failed";
+ }
+
+ void TearDown() override {
+ if (mPolicyMixRegistered) {
+ EXPECT_EQ(OK, AudioSystem::registerPolicyMixes(mMixes, false));
+ }
+ }
+
+ bool mPolicyMixRegistered{false};
+ std::string mAddress{"mix_1"};
+ Vector<AudioMix> mMixes;
+};
+
+TEST_F(AudioRoutingTest, ConcurrentDynamicRoutingTest) {
+ audio_port_v7 port, port_mix;
+ // expect legacy submix in port to be connected
+ status_t status = getPortByAttributes(AUDIO_PORT_ROLE_SOURCE, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, "0", port);
+ EXPECT_EQ(OK, status) << "Could not find port";
+
+ // as policy mix is registered, expect submix in port with mAddress to be connected
+ status = getPortByAttributes(AUDIO_PORT_ROLE_SOURCE, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, mAddress, port_mix);
+ EXPECT_EQ(OK, status) << "Could not find port";
+
+ // create playback instance
+ sp<AudioPlayback> playback = sp<AudioPlayback>::make(
+ 48000 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE, AudioTrack::TRANSFER_OBTAIN);
+ ASSERT_NE(nullptr, playback);
+ ASSERT_EQ(OK, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"))
+ << "Unable to open Resource";
+ EXPECT_EQ(OK, playback->create()) << "track creation failed";
+ sp<OnAudioDeviceUpdateNotifier> cbPlayback = sp<OnAudioDeviceUpdateNotifier>::make();
+ EXPECT_EQ(OK, playback->getAudioTrackHandle()->addAudioDeviceCallback(cbPlayback));
+
+ // create capture instances on different ports
+ sp<AudioCapture> captureA = sp<AudioCapture>::make(
+ AUDIO_SOURCE_REMOTE_SUBMIX, 48000, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_STEREO);
+ ASSERT_NE(nullptr, captureA);
+ EXPECT_EQ(OK, captureA->create()) << "record creation failed";
+ sp<OnAudioDeviceUpdateNotifier> cbCaptureA = sp<OnAudioDeviceUpdateNotifier>::make();
+ EXPECT_EQ(OK, captureA->getAudioRecordHandle()->addAudioDeviceCallback(cbCaptureA));
+
+ audio_attributes_t attr = AUDIO_ATTRIBUTES_INITIALIZER;
+ attr.source = AUDIO_SOURCE_REMOTE_SUBMIX;
+ sprintf(attr.tags, "addr=%s", mAddress.c_str());
+ sp<AudioCapture> captureB = sp<AudioCapture>::make(
+ AUDIO_SOURCE_REMOTE_SUBMIX, 48000, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_STEREO,
+ AUDIO_INPUT_FLAG_NONE, AUDIO_SESSION_ALLOCATE, AudioRecord::TRANSFER_CALLBACK, &attr);
+ ASSERT_NE(nullptr, captureB);
+ EXPECT_EQ(OK, captureB->create()) << "record creation failed";
+ sp<OnAudioDeviceUpdateNotifier> cbCaptureB = sp<OnAudioDeviceUpdateNotifier>::make();
+ EXPECT_EQ(OK, captureB->getAudioRecordHandle()->addAudioDeviceCallback(cbCaptureB));
+
+ // launch
+ EXPECT_EQ(OK, captureA->start()) << "start recording failed";
+ EXPECT_EQ(OK, cbCaptureA->waitForAudioDeviceCb());
+ EXPECT_EQ(port.id, captureA->getAudioRecordHandle()->getRoutedDeviceId())
+ << "Capture NOT routed on expected port";
+
+ EXPECT_EQ(OK, captureB->start()) << "start recording failed";
+ EXPECT_EQ(OK, cbCaptureB->waitForAudioDeviceCb());
+ EXPECT_EQ(port_mix.id, captureB->getAudioRecordHandle()->getRoutedDeviceId())
+ << "Capture NOT routed on expected port";
+
+ // as record started, expect submix out ports to be connected
+ status = getPortByAttributes(AUDIO_PORT_ROLE_SINK, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "0", port);
+ EXPECT_EQ(OK, status) << "unexpected submix out port found";
+
+ status = getPortByAttributes(AUDIO_PORT_ROLE_SINK, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mAddress, port_mix);
+ EXPECT_EQ(OK, status) << "Could not find port";
+
+ // check if playback routed to desired port
+ EXPECT_EQ(OK, playback->start());
+ EXPECT_EQ(OK, cbPlayback->waitForAudioDeviceCb());
+ EXPECT_EQ(port_mix.id, playback->getAudioTrackHandle()->getRoutedDeviceId())
+ << "Playback NOT routed on expected port";
+
+ captureB->stop();
+
+ // check if mAddress submix out is disconnected as capture session is stopped
+ status = getPortByAttributes(AUDIO_PORT_ROLE_SINK, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, mAddress, port_mix);
+ EXPECT_NE(OK, status) << "unexpected submix in port found";
+
+ // check if legacy submix out is connected
+ status = getPortByAttributes(AUDIO_PORT_ROLE_SINK, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, "0", port);
+ EXPECT_EQ(OK, status) << "port not found";
+
+ // unregister policy
+ EXPECT_EQ(OK, AudioSystem::registerPolicyMixes(mMixes, false));
+ mPolicyMixRegistered = false;
+
+ // as policy mix is unregistered, expect submix in port with mAddress to be disconnected
+ status = getPortByAttributes(AUDIO_PORT_ROLE_SOURCE, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, mAddress, port_mix);
+ EXPECT_NE(OK, status) << "unexpected submix in port found";
+
+ playback->onProcess();
+ // as captureA is active, it should re route to legacy submix
+ EXPECT_EQ(OK, cbPlayback->waitForAudioDeviceCb(port.id));
+ EXPECT_EQ(port.id, playback->getAudioTrackHandle()->getRoutedDeviceId())
+ << "Playback NOT routed on expected port";
+
+ captureA->stop();
+ playback->stop();
+}
diff --git a/media/libaudiofoundation/Android.bp b/media/libaudiofoundation/Android.bp
index a456a2a..c758fcd 100644
--- a/media/libaudiofoundation/Android.bp
+++ b/media/libaudiofoundation/Android.bp
@@ -18,12 +18,12 @@
export_include_dirs: ["include"],
header_libs: [
- "libaudioclient_aidl_conversion_util",
+ "libaudio_aidl_conversion_common_util_cpp",
"libaudio_system_headers",
"libmedia_helper_headers",
],
export_header_lib_headers: [
- "libaudioclient_aidl_conversion_util",
+ "libaudio_aidl_conversion_common_util_cpp",
"libaudio_system_headers",
"libmedia_helper_headers",
],
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
index b6e6c84..6c01e29 100644
--- a/media/libaudiofoundation/include/media/AudioContainers.h
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -66,6 +66,9 @@
for (const auto &channel : channelMasks) {
if (audio_channel_mask_out_to_in(channel) != AUDIO_CHANNEL_INVALID) {
inMaskSet.insert(audio_channel_mask_out_to_in(channel));
+ } else if (audio_channel_mask_get_representation(channel)
+ == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ inMaskSet.insert(channel);
}
}
return inMaskSet;
@@ -76,6 +79,9 @@
for (const auto &channel : channelMasks) {
if (audio_channel_mask_in_to_out(channel) != AUDIO_CHANNEL_INVALID) {
outMaskSet.insert(audio_channel_mask_in_to_out(channel));
+ } else if (audio_channel_mask_get_representation(channel)
+ == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ outMaskSet.insert(channel);
}
}
return outMaskSet;
diff --git a/media/libaudiofoundation/tests/Android.bp b/media/libaudiofoundation/tests/Android.bp
index 0e733d3..2f4aee0 100644
--- a/media/libaudiofoundation/tests/Android.bp
+++ b/media/libaudiofoundation/tests/Android.bp
@@ -23,6 +23,7 @@
static_libs: [
"audioclient-types-aidl-cpp",
"libaudioclient_aidl_conversion",
+ "libaudio_aidl_conversion_common_cpp",
"libaudiofoundation",
"libstagefright_foundation",
],
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index 5f63e8d..f47dd0b 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -13,7 +13,7 @@
srcs: [
"DevicesFactoryHalInterface.cpp",
"EffectsFactoryHalInterface.cpp",
- "FactoryHalHidl.cpp",
+ "FactoryHal.cpp",
],
cflags: [
@@ -28,10 +28,12 @@
"libaudiohal@6.0",
"libaudiohal@7.0",
"libaudiohal@7.1",
+ "libaudiohal@aidl",
],
shared_libs: [
"audioclient-types-aidl-cpp",
+ "libbinder_ndk",
"libdl",
"libhidlbase",
"liblog",
@@ -41,6 +43,7 @@
header_libs: [
"libaudiohal_headers",
"libbase_headers",
+ "liberror_headers",
"libmediautils_headers",
]
}
@@ -65,15 +68,11 @@
header_libs: [
"libaudiohal_headers"
- ]
+ ],
}
cc_library_headers {
name: "libaudiohal_headers",
export_include_dirs: ["include"],
-
- // This is needed because the stream interface includes media/MicrophoneInfo.h
- header_libs: ["av-headers"],
- export_header_lib_headers: ["av-headers"],
}
diff --git a/media/libaudiohal/DevicesFactoryHalInterface.cpp b/media/libaudiohal/DevicesFactoryHalInterface.cpp
index 5ad26fc..adce681 100644
--- a/media/libaudiohal/DevicesFactoryHalInterface.cpp
+++ b/media/libaudiohal/DevicesFactoryHalInterface.cpp
@@ -14,19 +14,14 @@
* limitations under the License.
*/
-#include <string>
-
#include <media/audiohal/DevicesFactoryHalInterface.h>
-#include <media/audiohal/FactoryHalHidl.h>
+#include <media/audiohal/FactoryHal.h>
namespace android {
// static
sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
- using namespace std::string_literals;
- return createPreferredImpl<DevicesFactoryHalInterface>(
- std::make_pair("android.hardware.audio"s, "IDevicesFactory"s),
- std::make_pair("android.hardware.audio.effect"s, "IEffectsFactory"s));
+ return createPreferredImpl<DevicesFactoryHalInterface>(true /* isCore */);
}
} // namespace android
diff --git a/media/libaudiohal/EffectsFactoryHalInterface.cpp b/media/libaudiohal/EffectsFactoryHalInterface.cpp
index 8a28f64..3a05f21 100644
--- a/media/libaudiohal/EffectsFactoryHalInterface.cpp
+++ b/media/libaudiohal/EffectsFactoryHalInterface.cpp
@@ -14,19 +14,14 @@
* limitations under the License.
*/
-#include <string>
-
#include <media/audiohal/EffectsFactoryHalInterface.h>
-#include <media/audiohal/FactoryHalHidl.h>
+#include <media/audiohal/FactoryHal.h>
namespace android {
// static
sp<EffectsFactoryHalInterface> EffectsFactoryHalInterface::create() {
- using namespace std::string_literals;
- return createPreferredImpl<EffectsFactoryHalInterface>(
- std::make_pair("android.hardware.audio.effect"s, "IEffectsFactory"s),
- std::make_pair("android.hardware.audio"s, "IDevicesFactory"s));
+ return createPreferredImpl<EffectsFactoryHalInterface>(false /* isCore */);
}
// static
diff --git a/media/libaudiohal/FactoryHal.cpp b/media/libaudiohal/FactoryHal.cpp
new file mode 100644
index 0000000..84ac64c
--- /dev/null
+++ b/media/libaudiohal/FactoryHal.cpp
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <map>
+#include <memory>
+#define LOG_TAG "FactoryHal"
+
+#include <algorithm>
+#include <array>
+#include <cstddef>
+#include <dlfcn.h>
+#include <utility>
+
+#include <android/binder_manager.h>
+#include <android/hidl/manager/1.0/IServiceManager.h>
+#include <hidl/ServiceManagement.h>
+#include <hidl/Status.h>
+#include <utils/Log.h>
+
+#include "include/media/audiohal/AudioHalVersionInfo.h"
+#include "include/media/audiohal/FactoryHal.h"
+
+namespace android::detail {
+
+namespace {
+
+using ::android::detail::AudioHalVersionInfo;
+
+// The pair of the interface's package name and the interface name,
+// e.g. <"android.hardware.audio", "IDevicesFactory"> for HIDL, <"android.hardware.audio.core",
+// "IModule"> for AIDL.
+// Splitting is used for easier construction of versioned names (FQNs).
+using InterfaceName = std::pair<std::string, std::string>;
+
+/**
+ * Supported HAL versions, from most recent to least recent.
+ * This list need to keep sync with AudioHalVersionInfo.VERSIONS in
+ * media/java/android/media/AudioHalVersionInfo.java.
+ */
+static const std::array<AudioHalVersionInfo, 5> sAudioHALVersions = {
+ // TODO: remove this comment to get AIDL
+ // AudioHalVersionInfo(AudioHalVersionInfo::Type::AIDL, 1, 0),
+ AudioHalVersionInfo(AudioHalVersionInfo::Type::HIDL, 7, 1),
+ AudioHalVersionInfo(AudioHalVersionInfo::Type::HIDL, 7, 0),
+ AudioHalVersionInfo(AudioHalVersionInfo::Type::HIDL, 6, 0),
+ AudioHalVersionInfo(AudioHalVersionInfo::Type::HIDL, 5, 0),
+ AudioHalVersionInfo(AudioHalVersionInfo::Type::HIDL, 4, 0),
+};
+
+static const std::map<AudioHalVersionInfo::Type, InterfaceName> sDevicesHALInterfaces = {
+ {AudioHalVersionInfo::Type::AIDL, std::make_pair("android.hardware.audio.core", "IModule")},
+ {AudioHalVersionInfo::Type::HIDL,
+ std::make_pair("android.hardware.audio", "IDevicesFactory")},
+};
+
+static const std::map<AudioHalVersionInfo::Type, InterfaceName> sEffectsHALInterfaces = {
+ {AudioHalVersionInfo::Type::AIDL,
+ std::make_pair("android.hardware.audio.effect", "IFactory")},
+ {AudioHalVersionInfo::Type::HIDL,
+ std::make_pair("android.hardware.audio.effect", "IEffectsFactory")},
+};
+
+bool createHalService(const AudioHalVersionInfo& version, bool isDevice, void** rawInterface) {
+ const std::string libName = "libaudiohal@" + version.toVersionString() + ".so";
+ const std::string factoryFunctionName =
+ isDevice ? "createIDevicesFactory" : "createIEffectsFactory";
+ constexpr int dlMode = RTLD_LAZY;
+ void* handle = nullptr;
+ dlerror(); // clear
+ handle = dlopen(libName.c_str(), dlMode);
+ if (handle == nullptr) {
+ const char* error = dlerror();
+ ALOGE("Failed to dlopen %s: %s", libName.c_str(),
+ error != nullptr ? error : "unknown error");
+ return false;
+ }
+ void* (*factoryFunction)();
+ *(void **)(&factoryFunction) = dlsym(handle, factoryFunctionName.c_str());
+ if (!factoryFunction) {
+ const char* error = dlerror();
+ ALOGE("Factory function %s not found in library %s: %s",
+ factoryFunctionName.c_str(), libName.c_str(),
+ error != nullptr ? error : "unknown error");
+ dlclose(handle);
+ return false;
+ }
+ *rawInterface = (*factoryFunction)();
+ ALOGW_IF(!*rawInterface, "Factory function %s from %s returned nullptr",
+ factoryFunctionName.c_str(), libName.c_str());
+ return true;
+}
+
+bool hasAidlHalService(const InterfaceName& interface, const AudioHalVersionInfo& version) {
+ const std::string name = interface.first + "." + interface.second + "/default";
+ AIBinder* binder = AServiceManager_checkService(name.c_str());
+ if (binder == nullptr) {
+ ALOGW("%s Service %s doesn't exist", __func__, name.c_str());
+ return false;
+ }
+ ALOGI("%s AIDL Service %s exist: %s", __func__, name.c_str(), version.toString().c_str());
+ return true;
+}
+
+bool hasHidlHalService(const InterfaceName& interface, const AudioHalVersionInfo& version) {
+ using ::android::hidl::manager::V1_0::IServiceManager;
+ sp<IServiceManager> sm = ::android::hardware::defaultServiceManager();
+ if (!sm) {
+ ALOGW("Failed to obtain HIDL ServiceManager");
+ return false;
+ }
+ // Since audio HAL doesn't support multiple clients, avoid instantiating
+ // the interface right away. Instead, query the transport type for it.
+ using ::android::hardware::Return;
+ using Transport = IServiceManager::Transport;
+ const std::string fqName =
+ interface.first + "@" + version.toVersionString() + "::" + interface.second;
+ const std::string instance = "default";
+ Return<Transport> transport = sm->getTransport(fqName, instance);
+ if (!transport.isOk()) {
+ ALOGW("Failed to obtain transport type for %s/%s: %s",
+ fqName.c_str(), instance.c_str(), transport.description().c_str());
+ return false;
+ }
+ return transport != Transport::EMPTY;
+}
+
+bool hasHalService(const InterfaceName& interface, const AudioHalVersionInfo& version) {
+ auto halType = version.getType();
+ if (halType == AudioHalVersionInfo::Type::AIDL) {
+ return hasAidlHalService(interface, version);
+ } else if (halType == AudioHalVersionInfo::Type::HIDL) {
+ return hasHidlHalService(interface, version);
+ } else {
+ ALOGE("HalType not supported %s", version.toString().c_str());
+ return false;
+ }
+}
+
+} // namespace
+
+void *createPreferredImpl(bool isDevice) {
+ auto findMostRecentVersion = [](const auto& iMap) {
+ return std::find_if(sAudioHALVersions.begin(), sAudioHALVersions.end(),
+ [iMap](const auto& v) {
+ auto iface = iMap.find(v.getType());
+ return hasHalService(iface->second, v);
+ });
+ };
+
+ auto interfaceMap = isDevice ? sDevicesHALInterfaces : sEffectsHALInterfaces;
+ auto siblingInterfaceMap = isDevice ? sEffectsHALInterfaces : sDevicesHALInterfaces;
+ auto ifaceVersionIt = findMostRecentVersion(interfaceMap);
+ auto siblingVersionIt = findMostRecentVersion(siblingInterfaceMap);
+ if (ifaceVersionIt != sAudioHALVersions.end() && siblingVersionIt != sAudioHALVersions.end() &&
+ // same HAL type (HIDL/AIDL) and same major version
+ ifaceVersionIt->getType() == siblingVersionIt->getType() &&
+ ifaceVersionIt->getMajorVersion() == siblingVersionIt->getMajorVersion()) {
+ void* rawInterface;
+ if (createHalService(std::max(*ifaceVersionIt, *siblingVersionIt), isDevice,
+ &rawInterface)) {
+ return rawInterface;
+ } else {
+ ALOGE("Failed to create HAL services with major %s, sibling %s!",
+ ifaceVersionIt->toString().c_str(), siblingVersionIt->toString().c_str());
+ }
+ } else {
+ ALOGE("Found no HAL version, main(%s) %s %s!", isDevice ? "Device" : "Effect",
+ (ifaceVersionIt == sAudioHALVersions.end()) ? "null"
+ : ifaceVersionIt->toString().c_str(),
+ (siblingVersionIt == sAudioHALVersions.end()) ? "null"
+ : siblingVersionIt->toString().c_str());
+ }
+ return nullptr;
+}
+
+} // namespace android::detail
diff --git a/media/libaudiohal/FactoryHalHidl.cpp b/media/libaudiohal/FactoryHalHidl.cpp
deleted file mode 100644
index 590fec5..0000000
--- a/media/libaudiohal/FactoryHalHidl.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/*
- * Copyright (C) 2020 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define LOG_TAG "FactoryHalHidl"
-
-#include <algorithm>
-#include <array>
-#include <utility>
-
-#include <media/audiohal/FactoryHalHidl.h>
-
-#include <dlfcn.h>
-
-#include <android/hidl/manager/1.0/IServiceManager.h>
-#include <hidl/ServiceManagement.h>
-#include <hidl/Status.h>
-#include <utils/Log.h>
-
-namespace android::detail {
-
-namespace {
-/** Supported HAL versions, from most recent to least recent.
- */
-#define CONC_VERSION(maj, min) #maj "." #min
-#define DECLARE_VERSION(maj, min) std::make_pair(std::make_pair(maj, min), CONC_VERSION(maj, min))
-static constexpr std::array<std::pair<std::pair<int, int>, const char*>, 5> sAudioHALVersions = {
- DECLARE_VERSION(7, 1),
- DECLARE_VERSION(7, 0),
- DECLARE_VERSION(6, 0),
- DECLARE_VERSION(5, 0),
- DECLARE_VERSION(4, 0)
-};
-
-bool createHalService(const std::string& version, const std::string& interface,
- void** rawInterface) {
- const std::string libName = "libaudiohal@" + version + ".so";
- const std::string factoryFunctionName = "create" + interface;
- constexpr int dlMode = RTLD_LAZY;
- void* handle = nullptr;
- dlerror(); // clear
- handle = dlopen(libName.c_str(), dlMode);
- if (handle == nullptr) {
- const char* error = dlerror();
- ALOGE("Failed to dlopen %s: %s", libName.c_str(),
- error != nullptr ? error : "unknown error");
- return false;
- }
- void* (*factoryFunction)();
- *(void **)(&factoryFunction) = dlsym(handle, factoryFunctionName.c_str());
- if (!factoryFunction) {
- const char* error = dlerror();
- ALOGE("Factory function %s not found in library %s: %s",
- factoryFunctionName.c_str(), libName.c_str(),
- error != nullptr ? error : "unknown error");
- dlclose(handle);
- return false;
- }
- *rawInterface = (*factoryFunction)();
- ALOGW_IF(!*rawInterface, "Factory function %s from %s returned nullptr",
- factoryFunctionName.c_str(), libName.c_str());
- return true;
-}
-
-bool hasHalService(const std::string& package, const std::string& version,
- const std::string& interface) {
- using ::android::hidl::manager::V1_0::IServiceManager;
- sp<IServiceManager> sm = ::android::hardware::defaultServiceManager();
- if (!sm) {
- ALOGE("Failed to obtain HIDL ServiceManager");
- return false;
- }
- // Since audio HAL doesn't support multiple clients, avoid instantiating
- // the interface right away. Instead, query the transport type for it.
- using ::android::hardware::Return;
- using Transport = IServiceManager::Transport;
- const std::string fqName = package + "@" + version + "::" + interface;
- const std::string instance = "default";
- Return<Transport> transport = sm->getTransport(fqName, instance);
- if (!transport.isOk()) {
- ALOGE("Failed to obtain transport type for %s/%s: %s",
- fqName.c_str(), instance.c_str(), transport.description().c_str());
- return false;
- }
- return transport != Transport::EMPTY;
-}
-
-} // namespace
-
-void* createPreferredImpl(const InterfaceName& iface, const InterfaceName& siblingIface) {
- auto findMostRecentVersion = [](const InterfaceName& iface) {
- return std::find_if(detail::sAudioHALVersions.begin(), detail::sAudioHALVersions.end(),
- [&](const auto& v) { return hasHalService(iface.first, v.second, iface.second); });
- };
- auto ifaceVersionIt = findMostRecentVersion(iface);
- auto siblingVersionIt = findMostRecentVersion(siblingIface);
- if (ifaceVersionIt != detail::sAudioHALVersions.end() &&
- siblingVersionIt != detail::sAudioHALVersions.end() &&
- // same major version
- ifaceVersionIt->first.first == siblingVersionIt->first.first) {
- std::string libraryVersion =
- ifaceVersionIt->first >= siblingVersionIt->first ?
- ifaceVersionIt->second : siblingVersionIt->second;
- void* rawInterface;
- if (createHalService(libraryVersion, iface.second, &rawInterface)) {
- return rawInterface;
- }
- }
- return nullptr;
-}
-
-} // namespace android::detail
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index d30883a..d151817 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -12,13 +12,14 @@
srcs: [
"CoreConversionHelperHidl.cpp",
"DeviceHalHidl.cpp",
+ "DevicesFactoryHalEntry.cpp",
"DevicesFactoryHalHidl.cpp",
"StreamHalHidl.cpp",
],
}
filegroup {
- name: "audio_effect_hal_client_sources",
+ name: "audio_effect_hidl_hal_client_sources",
srcs: [
"EffectBufferHalHidl.cpp",
"EffectConversionHelperHidl.cpp",
@@ -28,6 +29,21 @@
}
cc_defaults {
+ name: "libaudiohal_hidl_default",
+ shared_libs: [
+ "android.hardware.audio.common-util",
+ "android.hidl.allocator@1.0",
+ "android.hidl.memory@1.0",
+ "libaudiohal_deathhandler",
+ "libhidlbase",
+ "libhidlmemory",
+ ],
+ header_libs: [
+ "android.hardware.audio.common.util@all-versions",
+ ]
+}
+
+cc_defaults {
name: "libaudiohal_default",
cflags: [
@@ -37,28 +53,21 @@
"-fvisibility=hidden",
],
shared_libs: [
- "android.hardware.audio.common-util",
- "android.hidl.allocator@1.0",
- "android.hidl.memory@1.0",
+ "audioclient-types-aidl-cpp",
"av-types-aidl-cpp",
"libaudiofoundation",
- "libaudiohal_deathhandler",
"libaudioutils",
"libbase",
"libbinder",
"libcutils",
"libfmq",
"libhardware",
- "libhidlbase",
- "libhidlmemory",
"liblog",
"libmedia_helper",
"libmediautils",
"libutils",
- "audioclient-types-aidl-cpp",
],
header_libs: [
- "android.hardware.audio.common.util@all-versions",
"libaudioclient_headers",
"libaudiohal_headers"
],
@@ -70,11 +79,14 @@
cc_library_shared {
name: "libaudiohal@4.0",
- defaults: ["libaudiohal_default"],
+ defaults: [
+ "libaudiohal_default",
+ "libaudiohal_hidl_default"
+ ],
srcs: [
":audio_core_hal_client_sources",
- ":audio_effect_hal_client_sources",
- "EffectsFactoryHalHidlEntry.cpp",
+ ":audio_effect_hidl_hal_client_sources",
+ "EffectsFactoryHalEntry.cpp",
],
shared_libs: [
"android.hardware.audio.common@4.0",
@@ -93,11 +105,14 @@
cc_library_shared {
name: "libaudiohal@5.0",
- defaults: ["libaudiohal_default"],
+ defaults: [
+ "libaudiohal_default",
+ "libaudiohal_hidl_default"
+ ],
srcs: [
":audio_core_hal_client_sources",
- ":audio_effect_hal_client_sources",
- "EffectsFactoryHalHidlEntry.cpp",
+ ":audio_effect_hidl_hal_client_sources",
+ "EffectsFactoryHalEntry.cpp",
],
shared_libs: [
"android.hardware.audio.common@5.0",
@@ -116,11 +131,14 @@
cc_library_shared {
name: "libaudiohal@6.0",
- defaults: ["libaudiohal_default"],
+ defaults: [
+ "libaudiohal_default",
+ "libaudiohal_hidl_default"
+ ],
srcs: [
":audio_core_hal_client_sources",
- ":audio_effect_hal_client_sources",
- "EffectsFactoryHalHidlEntry.cpp",
+ ":audio_effect_hidl_hal_client_sources",
+ "EffectsFactoryHalEntry.cpp",
],
shared_libs: [
"android.hardware.audio.common@6.0",
@@ -139,9 +157,12 @@
cc_library_static {
name: "libaudiohal.effect@7.0",
- defaults: ["libaudiohal_default"],
+ defaults: [
+ "libaudiohal_default",
+ "libaudiohal_hidl_default"
+ ],
srcs: [
- ":audio_effect_hal_client_sources",
+ ":audio_effect_hidl_hal_client_sources",
],
static_libs: [
"android.hardware.audio.common@7.0",
@@ -158,10 +179,13 @@
cc_library_shared {
name: "libaudiohal@7.0",
- defaults: ["libaudiohal_default"],
+ defaults: [
+ "libaudiohal_default",
+ "libaudiohal_hidl_default"
+ ],
srcs: [
":audio_core_hal_client_sources",
- "EffectsFactoryHalHidlEntry.cpp",
+ "EffectsFactoryHalEntry.cpp",
],
static_libs: [
"android.hardware.audio.common@7.0",
@@ -182,10 +206,13 @@
cc_library_shared {
name: "libaudiohal@7.1",
- defaults: ["libaudiohal_default"],
+ defaults: [
+ "libaudiohal_default",
+ "libaudiohal_hidl_default"
+ ],
srcs: [
":audio_core_hal_client_sources",
- "EffectsFactoryHalHidlEntry.cpp",
+ "EffectsFactoryHalEntry.cpp",
],
static_libs: [
"android.hardware.audio.common@7.0",
@@ -207,3 +234,41 @@
"-include common/all-versions/VersionMacro.h",
]
}
+
+cc_library_shared {
+ name: "libaudiohal@aidl",
+ defaults: [
+ "libaudiohal_default",
+ "latest_android_hardware_audio_common_ndk_shared",
+ "latest_android_hardware_audio_core_ndk_shared",
+ "latest_android_hardware_audio_effect_ndk_static",
+ "latest_android_media_audio_common_types_ndk_shared",
+ ],
+ srcs: [
+ "DevicesFactoryHalEntry.cpp",
+ "DevicesFactoryHalAidl.cpp",
+ "EffectBufferHalAidl.cpp",
+ "EffectHalAidl.cpp",
+ "EffectsFactoryHalAidl.cpp",
+ "EffectsFactoryHalEntry.cpp",
+ ],
+ static_libs: [
+ "android.hardware.common-V2-ndk",
+ "android.hardware.common.fmq-V1-ndk",
+ ],
+ shared_libs: [
+ "libbinder_ndk",
+ "libaudio_aidl_conversion_common_ndk",
+ ],
+ header_libs: [
+ "libaudio_aidl_conversion_common_util_ndk",
+ "libaudio_system_headers",
+ ],
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ "-Wthread-safety",
+ "-DBACKEND_NDK",
+ ],
+}
\ No newline at end of file
diff --git a/media/libaudiohal/impl/DeviceHalAidl.cpp b/media/libaudiohal/impl/DeviceHalAidl.cpp
new file mode 100644
index 0000000..d85d960
--- /dev/null
+++ b/media/libaudiohal/impl/DeviceHalAidl.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DeviceHalAidl"
+
+#include "DeviceHalAidl.h"
+
+status_t DeviceHalAidl::getSupportedDevices(uint32_t* devices) {
+ ALOGE("%s not implemented yet devices %p", __func__, devices);
+ return OK;
+}
+
+status_t DeviceHalAidl::initCheck() {
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t DeviceHalAidl::setVoiceVolume(float volume) {
+ mVoiceVolume = volume;
+ ALOGE("%s not implemented yet %f", __func__, volume);
+ return OK;
+}
+
+status_t DeviceHalAidl::setMasterVolume(float volume) {
+ mMasterVolume = volume;
+ ALOGE("%s not implemented yet %f", __func__, volume);
+ return OK;
+}
+
+status_t DeviceHalAidl::getMasterVolume(float *volume) {
+ *volume = mMasterVolume;
+ ALOGE("%s not implemented yet %f", __func__, *volume);
+ return OK;
+}
+
+status_t DeviceHalAidl::setMode(audio_mode_t mode) {
+ ALOGE("%s not implemented yet %u", __func__, mode);
+ return OK;
+}
+
+status_t DeviceHalAidl::setMicMute(bool state) {
+ mMicMute = state;
+ ALOGE("%s not implemented yet %d", __func__, state);
+ return OK;
+}
+status_t DeviceHalAidl::getMicMute(bool *state) {
+ *state = mMicMute;
+ ALOGE("%s not implemented yet %d", __func__, *state);
+ return OK;
+}
+status_t DeviceHalAidl::setMasterMute(bool state) {
+ mMasterMute = state;
+ ALOGE("%s not implemented yet %d", __func__, state);
+ return OK;
+}
+status_t DeviceHalAidl::getMasterMute(bool *state) {
+ *state = mMasterMute;
+ ALOGE("%s not implemented yet %d", __func__, *state);
+ return OK;
+}
+
+status_t DeviceHalAidl::setParameters(const String8& kvPairs) {
+ ALOGE("%s not implemented yet %s", __func__, kvPairs.c_str());
+ return OK;
+}
+
+status_t DeviceHalAidl::getParameters(const String8& keys, String8 *values) {
+ ALOGE("%s not implemented yet %s %s", __func__, keys.c_str(), values->c_str());
+ return OK;
+}
+
+status_t DeviceHalAidl::getInputBufferSize(const struct audio_config* config, size_t* size) {
+ ALOGE("%s not implemented yet %p %zu", __func__, config, *size);
+ return OK;
+}
+
+status_t DeviceHalAidl::openOutputStream(audio_io_handle_t handle, audio_devices_t devices,
+ audio_output_flags_t flags, struct audio_config* config,
+ const char* address,
+ sp<StreamOutHalInterface>* outStream) {
+ ALOGE("%s not implemented yet %d %u %u %p %s %p", __func__, handle, devices, flags, config,
+ address, outStream);
+ return OK;
+}
+
+status_t DeviceHalAidl::openInputStream(audio_io_handle_t handle, audio_devices_t devices,
+ struct audio_config* config, audio_input_flags_t flags,
+ const char* address, audio_source_t source,
+ audio_devices_t outputDevice,
+ const char* outputDeviceAddress,
+ sp<StreamInHalInterface>* inStream) {
+ ALOGE("%s not implemented yet %d %u %u %u %p %s %s %p %d", __func__, handle, devices,
+ outputDevice, flags, config, address, outputDeviceAddress, inStream, source);
+ return OK;
+}
+
+status_t DeviceHalAidl::supportsAudioPatches(bool* supportsPatches) {
+ *supportsPatches = true;
+ return OK;
+}
+
+status_t DeviceHalAidl::createAudioPatch(unsigned int num_sources,
+ const struct audio_port_config* sources,
+ unsigned int num_sinks,
+ const struct audio_port_config* sinks,
+ audio_patch_handle_t* patch) {
+ ALOGE("%s not implemented yet %d %p %d %p %p", __func__, num_sources, sources, num_sinks,
+ sinks, patch);
+ return OK;
+}
+
+status_t DeviceHalAidl::releaseAudioPatch(audio_patch_handle_t patch) {
+ ALOGE("%s not implemented yet patch %d", __func__, patch);
+ return OK;
+}
+
+status_t DeviceHalAidl::setAudioPortConfig(const struct audio_port_config* config) {
+ ALOGE("%s not implemented yet config %p", __func__, config);
+ return OK;
+}
+
+status_t DeviceHalAidl::getMicrophones(
+ std::vector<audio_microphone_characteristic_t>* microphones) {
+ ALOGE("%s not implemented yet microphones %p", __func__, microphones);
+ return OK;
+}
+
+status_t DeviceHalAidl::addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) {
+ if (!effect) {
+ return BAD_VALUE;
+ }
+ ALOGE("%s not implemented yet device %d", __func__, device);
+ return OK;
+}
+status_t DeviceHalAidl::removeDeviceEffect(audio_port_handle_t device,
+ sp<EffectHalInterface> effect) {
+ if (!effect) {
+ return BAD_VALUE;
+ }
+ ALOGE("%s not implemented yet device %d", __func__, device);
+ return OK;
+}
+
+status_t DeviceHalAidl::getMmapPolicyInfos(
+ media::audio::common::AudioMMapPolicyType policyType __unused,
+ std::vector<media::audio::common::AudioMMapPolicyInfo>* policyInfos __unused) {
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+int32_t DeviceHalAidl::getAAudioMixerBurstCount() {
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+int32_t DeviceHalAidl::getAAudioHardwareBurstMinUsec() {
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+error::Result<audio_hw_sync_t> DeviceHalAidl::getHwAvSync() {
+ ALOGE("%s not implemented yet", __func__);
+ return base::unexpected(INVALID_OPERATION);
+}
+
+status_t DeviceHalAidl::dump(int __unused, const Vector<String16>& __unused) {
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+};
+
+int32_t DeviceHalAidl::supportsBluetoothVariableLatency(bool* supports __unused) override {
+ ALOGE("%s not implemented yet", __func__);
+ return INVALID_OPERATION;
+}
diff --git a/media/libaudiohal/impl/DeviceHalAidl.h b/media/libaudiohal/impl/DeviceHalAidl.h
new file mode 100644
index 0000000..5e8a8dd
--- /dev/null
+++ b/media/libaudiohal/impl/DeviceHalAidl.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <media/audiohal/DeviceHalInterface.h>
+#include <media/audiohal/EffectHalInterface.h>
+
+#include <aidl/android/hardware/audio/core/BpModule.h>
+
+namespace android {
+
+class DeviceHalAidl : public DeviceHalInterface {
+ public:
+ // Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
+ status_t getSupportedDevices(uint32_t *devices) override;
+
+ // Check to see if the audio hardware interface has been initialized.
+ status_t initCheck() override;
+
+ // Set the audio volume of a voice call. Range is between 0.0 and 1.0.
+ status_t setVoiceVolume(float volume) override;
+
+ // Set the audio volume for all audio activities other than voice call.
+ status_t setMasterVolume(float volume) override;
+
+ // Get the current master volume value for the HAL.
+ status_t getMasterVolume(float *volume) override;
+
+ // Called when the audio mode changes.
+ status_t setMode(audio_mode_t mode) override;
+
+ // Muting control.
+ status_t setMicMute(bool state) override;
+
+ status_t getMicMute(bool* state) override;
+
+ status_t setMasterMute(bool state) override;
+
+ status_t getMasterMute(bool *state) override;
+
+ // Set global audio parameters.
+ status_t setParameters(const String8& kvPairs) override;
+
+ // Get global audio parameters.
+ status_t getParameters(const String8& keys, String8 *values) override;
+
+ // Returns audio input buffer size according to parameters passed.
+ status_t getInputBufferSize(const struct audio_config* config, size_t* size) override;
+
+ // Creates and opens the audio hardware output stream. The stream is closed
+ // by releasing all references to the returned object.
+ status_t openOutputStream(audio_io_handle_t handle, audio_devices_t devices,
+ audio_output_flags_t flags, struct audio_config* config,
+ const char* address, sp<StreamOutHalInterface>* outStream) override;
+
+ // Creates and opens the audio hardware input stream. The stream is closed
+ // by releasing all references to the returned object.
+ status_t openInputStream(audio_io_handle_t handle, audio_devices_t devices,
+ struct audio_config* config, audio_input_flags_t flags,
+ const char* address, audio_source_t source,
+ audio_devices_t outputDevice, const char* outputDeviceAddress,
+ sp<StreamInHalInterface>* inStream) override;
+
+ // Returns whether createAudioPatch and releaseAudioPatch operations are supported.
+ status_t supportsAudioPatches(bool* supportsPatches) override;
+
+ // Creates an audio patch between several source and sink ports.
+ status_t createAudioPatch(unsigned int num_sources, const struct audio_port_config* sources,
+ unsigned int num_sinks, const struct audio_port_config* sinks,
+ audio_patch_handle_t* patch) override;
+
+ // Releases an audio patch.
+ status_t releaseAudioPatch(audio_patch_handle_t patch) override;
+
+ // Set audio port configuration.
+ status_t setAudioPortConfig(const struct audio_port_config* config) override;
+
+ // List microphones
+ status_t getMicrophones(std::vector<audio_microphone_characteristic_t>* microphones);
+
+ status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
+
+ status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
+
+ status_t getMmapPolicyInfos(media::audio::common::AudioMMapPolicyType policyType __unused,
+ std::vector<media::audio::common::AudioMMapPolicyInfo>* policyInfos
+ __unused) override;
+
+ int32_t getAAudioMixerBurstCount() override;
+
+ int32_t getAAudioHardwareBurstMinUsec() override;
+
+ error::Result<audio_hw_sync_t> getHwAvSync() override;
+
+ status_t dump(int __unused, const Vector<String16>& __unused) override;
+
+ int32_t supportsBluetoothVariableLatency(bool* supports __unused) override;
+
+ private:
+ friend class DevicesFactoryHalAidl;
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IModule> mCore;
+ float mMasterVolume = 0.0f;
+ float mVoiceVolume = 0.0f;
+ bool mMasterMute = false;
+ bool mMicMute = false;
+
+ // Can not be constructed directly by clients.
+ explicit DeviceHalAidl(
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IModule>& core)
+ : mCore(core) {}
+
+ // The destructor automatically closes the device.
+ ~DeviceHalAidl();
+};
+
+} // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 0cdf621..be063ab 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -459,12 +459,13 @@
#if MAJOR_VERSION == 2
status_t DeviceHalHidl::getMicrophones(
- std::vector<media::MicrophoneInfo> *microphonesInfo __unused) {
+ std::vector<audio_microphone_characteristic_t> *microphonesInfo __unused) {
if (mDevice == 0) return NO_INIT;
return INVALID_OPERATION;
}
#elif MAJOR_VERSION >= 4
-status_t DeviceHalHidl::getMicrophones(std::vector<media::MicrophoneInfo> *microphonesInfo) {
+status_t DeviceHalHidl::getMicrophones(
+ std::vector<audio_microphone_characteristic_t> *microphonesInfo) {
TIME_CHECK();
if (mDevice == 0) return NO_INIT;
Result retval;
@@ -475,8 +476,7 @@
audio_microphone_characteristic_t dst;
//convert
(void)CoreUtils::microphoneInfoToHal(micArrayHal[k], &dst);
- media::MicrophoneInfo microphone = media::MicrophoneInfo(dst);
- microphonesInfo->push_back(microphone);
+ microphonesInfo->push_back(dst);
}
});
return processReturn("getMicrophones", ret, retval);
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index f6519b6..052eb65 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -30,87 +30,74 @@
{
public:
// Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
- virtual status_t getSupportedDevices(uint32_t *devices);
+ status_t getSupportedDevices(uint32_t *devices) override;
// Check to see if the audio hardware interface has been initialized.
- virtual status_t initCheck();
+ status_t initCheck() override;
// Set the audio volume of a voice call. Range is between 0.0 and 1.0.
- virtual status_t setVoiceVolume(float volume);
+ status_t setVoiceVolume(float volume) override;
// Set the audio volume for all audio activities other than voice call.
- virtual status_t setMasterVolume(float volume);
+ status_t setMasterVolume(float volume) override;
// Get the current master volume value for the HAL.
- virtual status_t getMasterVolume(float *volume);
+ status_t getMasterVolume(float *volume) override;
// Called when the audio mode changes.
- virtual status_t setMode(audio_mode_t mode);
+ status_t setMode(audio_mode_t mode) override;
// Muting control.
- virtual status_t setMicMute(bool state);
- virtual status_t getMicMute(bool *state);
- virtual status_t setMasterMute(bool state);
- virtual status_t getMasterMute(bool *state);
+ status_t setMicMute(bool state) override;
+ status_t getMicMute(bool *state) override;
+ status_t setMasterMute(bool state) override;
+ status_t getMasterMute(bool *state) override;
// Set global audio parameters.
- virtual status_t setParameters(const String8& kvPairs);
+ status_t setParameters(const String8& kvPairs) override;
// Get global audio parameters.
- virtual status_t getParameters(const String8& keys, String8 *values);
+ status_t getParameters(const String8& keys, String8 *values) override;
// Returns audio input buffer size according to parameters passed.
- virtual status_t getInputBufferSize(const struct audio_config *config,
- size_t *size);
+ status_t getInputBufferSize(const struct audio_config* config, size_t* size) override;
// Creates and opens the audio hardware output stream. The stream is closed
// by releasing all references to the returned object.
- virtual status_t openOutputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- audio_output_flags_t flags,
- struct audio_config *config,
- const char *address,
- sp<StreamOutHalInterface> *outStream);
+ status_t openOutputStream(audio_io_handle_t handle, audio_devices_t devices,
+ audio_output_flags_t flags, struct audio_config* config,
+ const char* address, sp<StreamOutHalInterface>* outStream) override;
// Creates and opens the audio hardware input stream. The stream is closed
// by releasing all references to the returned object.
- virtual status_t openInputStream(
- audio_io_handle_t handle,
- audio_devices_t devices,
- struct audio_config *config,
- audio_input_flags_t flags,
- const char *address,
- audio_source_t source,
- audio_devices_t outputDevice,
- const char *outputDeviceAddress,
- sp<StreamInHalInterface> *inStream);
+ status_t openInputStream(audio_io_handle_t handle, audio_devices_t devices,
+ struct audio_config* config, audio_input_flags_t flags,
+ const char* address, audio_source_t source,
+ audio_devices_t outputDevice, const char* outputDeviceAddress,
+ sp<StreamInHalInterface>* inStream) override;
// Returns whether createAudioPatch and releaseAudioPatch operations are supported.
- virtual status_t supportsAudioPatches(bool *supportsPatches);
+ status_t supportsAudioPatches(bool* supportsPatches) override;
// Creates an audio patch between several source and sink ports.
- virtual status_t createAudioPatch(
- unsigned int num_sources,
- const struct audio_port_config *sources,
- unsigned int num_sinks,
- const struct audio_port_config *sinks,
- audio_patch_handle_t *patch);
+ status_t createAudioPatch(unsigned int num_sources, const struct audio_port_config* sources,
+ unsigned int num_sinks, const struct audio_port_config* sinks,
+ audio_patch_handle_t* patch) override;
// Releases an audio patch.
- virtual status_t releaseAudioPatch(audio_patch_handle_t patch);
+ status_t releaseAudioPatch(audio_patch_handle_t patch) override;
// Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port *port);
+ status_t getAudioPort(struct audio_port *port) override;
// Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port_v7 *port);
+ status_t getAudioPort(struct audio_port_v7 *port) override;
// Set audio port configuration.
- virtual status_t setAudioPortConfig(const struct audio_port_config *config);
+ status_t setAudioPortConfig(const struct audio_port_config *config) override;
// List microphones
- virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
+ status_t getMicrophones(std::vector<audio_microphone_characteristic_t>* microphones) override;
status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
@@ -132,6 +119,11 @@
return INVALID_OPERATION;
}
+ int32_t supportsBluetoothVariableLatency(bool* supports __unused) override {
+ // TODO: Implement the HAL query when moving to AIDL HAL.
+ return INVALID_OPERATION;
+ }
+
status_t setConnectedState(const struct audio_port_v7 *port, bool connected) override;
error::Result<audio_hw_sync_t> getHwAvSync() override;
diff --git a/media/libaudiohal/impl/DevicesFactoryHalAidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalAidl.cpp
new file mode 100644
index 0000000..b9ca164
--- /dev/null
+++ b/media/libaudiohal/impl/DevicesFactoryHalAidl.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "DevicesFactoryHalAidl"
+//#define LOG_NDEBUG 0
+
+#include <aidl/android/hardware/audio/core/IModule.h>
+#include <android/binder_manager.h>
+#include <memory>
+#include <utils/Log.h>
+
+#include "DeviceHalAidl.h"
+#include "DevicesFactoryHalAidl.h"
+
+using namespace ::aidl::android::hardware::audio::core;
+using ::android::detail::AudioHalVersionInfo;
+
+namespace android {
+
+DevicesFactoryHalAidl::DevicesFactoryHalAidl(std::shared_ptr<IConfig> iconfig)
+ : mIConfig(std::move(iconfig)) {
+ ALOG_ASSERT(iconfig != nullptr, "Provided default IConfig service is NULL");
+}
+
+void DevicesFactoryHalAidl::onFirstRef() {
+ ALOGE("%s not implemented yet", __func__);
+}
+
+// Opens a device with the specified name. To close the device, it is
+// necessary to release references to the returned object.
+status_t DevicesFactoryHalAidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
+ if (name == nullptr || device == nullptr) {
+ return BAD_VALUE;
+ }
+ ALOGE("%s not implemented yet %s", __func__, name);
+ return INVALID_OPERATION;
+
+ // TODO: only support primary now ("default" means "primary")
+ if (strcmp(name, "primary") != 0) {
+ auto serviceName = std::string() + IModule::descriptor + "/default";
+ auto service = IModule::fromBinder(
+ ndk::SpAIBinder(AServiceManager_waitForService(serviceName.c_str())));
+ ALOGW("%s fromBinder %s %s", __func__, IModule::descriptor, service ? "succ" : "fail");
+ *device = new DeviceHalAidl(service);
+ }
+ return OK;
+}
+
+status_t DevicesFactoryHalAidl::getHalPids(std::vector<pid_t> *pids) {
+ if (pids == nullptr) {
+ return BAD_VALUE;
+ }
+ ALOGE("%s not implemented yet", __func__);
+ return INVALID_OPERATION;
+}
+
+status_t DevicesFactoryHalAidl::setCallbackOnce(sp<DevicesFactoryHalCallback> callback) {
+ if (callback == nullptr) {
+ return BAD_VALUE;
+ }
+ ALOGE("%s not implemented yet", __func__);
+ return INVALID_OPERATION;
+}
+
+AudioHalVersionInfo DevicesFactoryHalAidl::getHalVersion() const {
+ int32_t versionNumber = 0;
+ if (mIConfig) {
+ if (!mIConfig->getInterfaceVersion(&versionNumber).isOk()) {
+ ALOGE("%s getInterfaceVersion failed", __func__);
+ } else {
+ ALOGI("%s getInterfaceVersion %d", __func__, versionNumber);
+ }
+ }
+ // AIDL does not have minor version, fill 0 for all versions
+ return AudioHalVersionInfo(AudioHalVersionInfo::Type::AIDL, versionNumber);
+}
+
+// Main entry-point to the shared library.
+extern "C" __attribute__((visibility("default"))) void* createIDevicesFactoryImpl() {
+ auto serviceName = std::string(IConfig::descriptor) + "/default";
+ auto service = IConfig::fromBinder(
+ ndk::SpAIBinder(AServiceManager_waitForService(serviceName.c_str())));
+ if (!service) {
+ ALOGE("%s binder service %s not exist", __func__, serviceName.c_str());
+ return nullptr;
+ }
+ return new DevicesFactoryHalAidl(service);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/impl/DevicesFactoryHalAidl.h b/media/libaudiohal/impl/DevicesFactoryHalAidl.h
new file mode 100644
index 0000000..71138a0
--- /dev/null
+++ b/media/libaudiohal/impl/DevicesFactoryHalAidl.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/core/IConfig.h>
+#include <media/audiohal/DevicesFactoryHalInterface.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+class DevicesFactoryHalAidl : public DevicesFactoryHalInterface
+{
+ public:
+ explicit DevicesFactoryHalAidl(
+ std::shared_ptr<::aidl::android::hardware::audio::core::IConfig> iConfig);
+ void onFirstRef() override;
+
+ // Opens a device with the specified name. To close the device, it is
+ // necessary to release references to the returned object.
+ status_t openDevice(const char *name, sp<DeviceHalInterface> *device) override;
+
+ status_t getHalPids(std::vector<pid_t> *pids) override;
+
+ status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) override;
+
+ android::detail::AudioHalVersionInfo getHalVersion() const override;
+
+ private:
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IConfig> mIConfig;
+ ~DevicesFactoryHalAidl() = default;
+};
+
+} // namespace android
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidlEntry.cpp b/media/libaudiohal/impl/DevicesFactoryHalEntry.cpp
similarity index 74%
copy from media/libaudiohal/impl/EffectsFactoryHalHidlEntry.cpp
copy to media/libaudiohal/impl/DevicesFactoryHalEntry.cpp
index 2c6f2c6..5bf6d89 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidlEntry.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalEntry.cpp
@@ -14,8 +14,10 @@
* limitations under the License.
*/
-extern "C" void* createIEffectsFactoryImpl();
+#include "android/media/AudioHalVersion.h"
-extern "C" __attribute__((visibility("default"))) void* createIEffectsFactory() {
- return createIEffectsFactoryImpl();
+extern "C" void* createIDevicesFactoryImpl();
+
+extern "C" __attribute__((visibility("default"))) void* createIDevicesFactory() {
+ return createIDevicesFactoryImpl();
}
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
index 4069a6b..9f06f83 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
@@ -29,6 +29,7 @@
#include "DeviceHalHidl.h"
#include "DevicesFactoryHalHidl.h"
+using ::android::detail::AudioHalVersionInfo;
using ::android::hardware::audio::CPP_VERSION::IDevice;
using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::Result;
using ::android::hardware::Return;
@@ -226,8 +227,13 @@
return mDeviceFactories;
}
+
+AudioHalVersionInfo DevicesFactoryHalHidl::getHalVersion() const {
+ return AudioHalVersionInfo(AudioHalVersionInfo::Type::HIDL, MAJOR_VERSION, MINOR_VERSION);
+}
+
// Main entry-point to the shared library.
-extern "C" __attribute__((visibility("default"))) void* createIDevicesFactory() {
+extern "C" __attribute__((visibility("default"))) void* createIDevicesFactoryImpl() {
auto service = hardware::audio::CPP_VERSION::IDevicesFactory::getService();
return service ? new DevicesFactoryHalHidl(service) : nullptr;
}
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
index ffd229d..5294728 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -45,7 +45,7 @@
status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) override;
- float getHalVersion() const override { return MAJOR_VERSION + (float)MINOR_VERSION / 10; }
+ android::detail::AudioHalVersionInfo getHalVersion() const override;
private:
friend class ServiceNotificationListener;
diff --git a/media/libaudiohal/impl/EffectBufferHalAidl.cpp b/media/libaudiohal/impl/EffectBufferHalAidl.cpp
new file mode 100644
index 0000000..5af8e24
--- /dev/null
+++ b/media/libaudiohal/impl/EffectBufferHalAidl.cpp
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectBufferHalAidl"
+//#define LOG_NDEBUG 0
+
+#include <utils/Log.h>
+
+#include "EffectBufferHalAidl.h"
+
+namespace android {
+namespace effect {
+
+// static
+status_t EffectBufferHalAidl::allocate(size_t size, sp<EffectBufferHalInterface>* buffer) {
+ ALOGE("%s not implemented yet %zu %p", __func__, size, buffer);
+ return mirror(nullptr, size, buffer);
+}
+
+status_t EffectBufferHalAidl::mirror(void* external, size_t size,
+ sp<EffectBufferHalInterface>* buffer) {
+ // buffer->setExternalData(external);
+ ALOGW("%s not implemented yet %p %zu %p", __func__, external, size, buffer);
+ return OK;
+}
+
+EffectBufferHalAidl::EffectBufferHalAidl(size_t size)
+ : mBufferSize(size),
+ mFrameCountChanged(false),
+ mExternalData(nullptr),
+ mAudioBuffer{0, {nullptr}} {
+}
+
+EffectBufferHalAidl::~EffectBufferHalAidl() {
+}
+
+status_t EffectBufferHalAidl::init() {
+ ALOGW("%s not implemented yet", __func__);
+ return OK;
+}
+
+audio_buffer_t* EffectBufferHalAidl::audioBuffer() {
+ return &mAudioBuffer;
+}
+
+void* EffectBufferHalAidl::externalData() const {
+ return mExternalData;
+}
+
+void EffectBufferHalAidl::setFrameCount(size_t frameCount) {
+ mAudioBuffer.frameCount = frameCount;
+ mFrameCountChanged = true;
+}
+
+bool EffectBufferHalAidl::checkFrameCountChange() {
+ bool result = mFrameCountChanged;
+ mFrameCountChanged = false;
+ return result;
+}
+
+void EffectBufferHalAidl::setExternalData(void* external) {
+ mExternalData = external;
+}
+
+void EffectBufferHalAidl::update() {
+ ALOGW("%s not implemented yet", __func__);
+}
+
+void EffectBufferHalAidl::commit() {
+ ALOGW("%s not implemented yet", __func__);
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/EffectBufferHalAidl.h b/media/libaudiohal/impl/EffectBufferHalAidl.h
new file mode 100644
index 0000000..f488708
--- /dev/null
+++ b/media/libaudiohal/impl/EffectBufferHalAidl.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <media/audiohal/EffectBufferHalInterface.h>
+#include <system/audio_effect.h>
+
+namespace android {
+namespace effect {
+
+class EffectBufferHalAidl : public EffectBufferHalInterface {
+ public:
+ static status_t allocate(size_t size, sp<EffectBufferHalInterface>* buffer);
+ static status_t mirror(void* external, size_t size, sp<EffectBufferHalInterface>* buffer);
+
+ audio_buffer_t* audioBuffer() override;
+ void* externalData() const override;
+
+ size_t getSize() const override { return mBufferSize; }
+
+ void setExternalData(void* external) override;
+ void setFrameCount(size_t frameCount) override;
+ bool checkFrameCountChange() override;
+
+ void update() override;
+ void commit() override;
+ void update(size_t size) override;
+ void commit(size_t size) override;
+
+ private:
+ friend class EffectBufferHalInterface;
+
+ const size_t mBufferSize;
+ bool mFrameCountChanged;
+ void* mExternalData;
+ audio_buffer_t mAudioBuffer;
+
+ // Can not be constructed directly by clients.
+ explicit EffectBufferHalAidl(size_t size);
+
+ ~EffectBufferHalAidl();
+
+ status_t init();
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/EffectHalAidl.cpp b/media/libaudiohal/impl/EffectHalAidl.cpp
new file mode 100644
index 0000000..31c5ca5
--- /dev/null
+++ b/media/libaudiohal/impl/EffectHalAidl.cpp
@@ -0,0 +1,234 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectHalAidl"
+//#define LOG_NDEBUG 0
+
+#include <media/AidlConversionCppNdk.h>
+#include <media/AidlConversionNdk.h>
+#include <media/audiohal/AudioHalUtils.h>
+#include <media/EffectsFactoryApi.h>
+#include <mediautils/TimeCheck.h>
+#include <utils/Log.h>
+
+#include "EffectHalAidl.h"
+
+#include <system/audio.h>
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+
+using ::aidl::android::hardware::audio::effect::CommandId;
+using ::aidl::android::hardware::audio::effect::Descriptor;
+using ::aidl::android::hardware::audio::effect::IEffect;
+using ::aidl::android::hardware::audio::effect::State;
+using ::aidl::android::hardware::audio::effect::Parameter;
+
+namespace android {
+namespace effect {
+
+EffectHalAidl::EffectHalAidl(const std::shared_ptr<IEffect>& effect, uint64_t effectId,
+ int32_t sessionId, int32_t ioId)
+ : mEffectId(effectId), mSessionId(sessionId), mIoId(ioId), mEffect(effect) {}
+
+EffectHalAidl::~EffectHalAidl() {}
+
+status_t EffectHalAidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ if (buffer == nullptr) {
+ return BAD_VALUE;
+ }
+ ALOGW("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t EffectHalAidl::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
+ if (buffer == nullptr) {
+ return BAD_VALUE;
+ }
+ ALOGW("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t EffectHalAidl::process() {
+ ALOGW("%s not implemented yet", __func__);
+ // write to input FMQ here?
+ return OK;
+}
+
+// TODO: no one using, maybe deprecate this interface
+status_t EffectHalAidl::processReverse() {
+ ALOGW("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t EffectHalAidl::handleSetConfig(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) || replySize == NULL ||
+ *replySize != sizeof(int32_t) || pReplyData == NULL) {
+ ALOGE("%s parameter error code %u", __func__, cmdCode);
+ return BAD_VALUE;
+ }
+
+ *static_cast<int32_t*>(pReplyData) = FAILED_TRANSACTION;
+ memcpy(&mConfig, pCmdData, cmdSize);
+
+ State state;
+ RETURN_IF_BINDER_FAIL(mEffect->getState(&state));
+ // effect not open yet, save settings locally
+ if (state != State::INIT) {
+ effect_config_t* legacyConfig = static_cast<effect_config_t*>(pCmdData);
+ // already open, apply latest settings
+ Parameter aidlParam;
+ Parameter::Common aidlCommon;
+ aidlCommon.input.base =
+ VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_AudioConfigBase_buffer_config_t(
+ legacyConfig->inputCfg, true /* isInput */));
+ aidlCommon.output.base =
+ VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_AudioConfigBase_buffer_config_t(
+ legacyConfig->outputCfg, false /* isInput */));
+ aidlCommon.session = mSessionId;
+ aidlCommon.ioHandle = mIoId;
+ Parameter::Id id;
+ id.set<Parameter::Id::commonTag>(Parameter::common);
+ aidlParam.set<Parameter::common>(aidlCommon);
+ RETURN_IF_BINDER_FAIL(mEffect->setParameter(aidlParam));
+ }
+ *(int*)pReplyData = 0;
+ *static_cast<int32_t*>(pReplyData) = OK;
+ return OK;
+}
+
+status_t EffectHalAidl::handleGetConfig(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ if (pCmdData == NULL || cmdSize == 0 || replySize == NULL ||
+ *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
+ ALOGE("%s parameter error with cmdCode %d", __func__, cmdCode);
+ return BAD_VALUE;
+ }
+
+ *(effect_config_t*)pReplyData = mConfig;
+ return OK;
+}
+
+status_t EffectHalAidl::handleSetParameter(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ ALOGW("%s not implemented yet", __func__);
+ if (pCmdData == NULL || cmdSize == 0 || replySize == NULL ||
+ *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
+ ALOGE("%s parameter error with cmdCode %d", __func__, cmdCode);
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+status_t EffectHalAidl::handleGetParameter(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ ALOGW("%s not implemented yet", __func__);
+ if (pCmdData == NULL || cmdSize == 0 || replySize == NULL ||
+ *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
+ ALOGE("%s parameter error with cmdCode %d", __func__, cmdCode);
+ return BAD_VALUE;
+ }
+ return OK;
+}
+
+status_t EffectHalAidl::command(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ ALOGW("%s code %d not implemented yet", __func__, cmdCode);
+ ::ndk::ScopedAStatus status;
+ switch (cmdCode) {
+ case EFFECT_CMD_INIT: {
+ // open with default effect_config_t (convert to Parameter.Common)
+ IEffect::OpenEffectReturn ret;
+ Parameter::Common common;
+ RETURN_IF_BINDER_FAIL(mEffect->open(common, std::nullopt, &ret));
+ return OK;
+ }
+ case EFFECT_CMD_SET_CONFIG:
+ return handleSetConfig(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ case EFFECT_CMD_GET_CONFIG:
+ return handleGetConfig(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ case EFFECT_CMD_RESET:
+ return mEffect->command(CommandId::RESET).getStatus();
+ case EFFECT_CMD_ENABLE:
+ return mEffect->command(CommandId::START).getStatus();
+ case EFFECT_CMD_DISABLE:
+ return mEffect->command(CommandId::STOP).getStatus();
+ case EFFECT_CMD_SET_PARAM:
+ return handleSetParameter(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ case EFFECT_CMD_SET_PARAM_DEFERRED:
+ case EFFECT_CMD_SET_PARAM_COMMIT:
+ // TODO
+ return OK;
+ case EFFECT_CMD_GET_PARAM:
+ return handleGetParameter(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ case EFFECT_CMD_SET_DEVICE:
+ return OK;
+ case EFFECT_CMD_SET_VOLUME:
+ return OK;
+ case EFFECT_CMD_SET_AUDIO_MODE:
+ return OK;
+ case EFFECT_CMD_SET_CONFIG_REVERSE:
+ return OK;
+ case EFFECT_CMD_SET_INPUT_DEVICE:
+ return OK;
+ case EFFECT_CMD_GET_CONFIG_REVERSE:
+ return OK;
+ case EFFECT_CMD_GET_FEATURE_SUPPORTED_CONFIGS:
+ return OK;
+ case EFFECT_CMD_GET_FEATURE_CONFIG:
+ return OK;
+ case EFFECT_CMD_SET_FEATURE_CONFIG:
+ return OK;
+ case EFFECT_CMD_SET_AUDIO_SOURCE:
+ return OK;
+ case EFFECT_CMD_OFFLOAD:
+ return OK;
+ case EFFECT_CMD_DUMP:
+ return OK;
+ case EFFECT_CMD_FIRST_PROPRIETARY:
+ return OK;
+ default:
+ return INVALID_OPERATION;
+ }
+ return INVALID_OPERATION;
+}
+
+status_t EffectHalAidl::getDescriptor(effect_descriptor_t* pDescriptor) {
+ ALOGW("%s %p", __func__, pDescriptor);
+ if (pDescriptor == nullptr) {
+ return BAD_VALUE;
+ }
+ Descriptor aidlDesc;
+ RETURN_IF_BINDER_FAIL(mEffect->getDescriptor(&aidlDesc));
+
+ *pDescriptor = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::aidl2legacy_Descriptor_effect_descriptor(aidlDesc));
+ return OK;
+}
+
+status_t EffectHalAidl::close() {
+ auto ret = mEffect->close();
+ ALOGI("%s %s", __func__, ret.getMessage());
+ return ret.getStatus();
+}
+
+status_t EffectHalAidl::dump(int fd) {
+ ALOGW("%s not implemented yet, fd %d", __func__, fd);
+ return OK;
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/EffectHalAidl.h b/media/libaudiohal/impl/EffectHalAidl.h
new file mode 100644
index 0000000..76bb240
--- /dev/null
+++ b/media/libaudiohal/impl/EffectHalAidl.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <system/audio_effect.h>
+
+namespace android {
+namespace effect {
+
+class EffectHalAidl : public EffectHalInterface {
+ public:
+ // Set the input buffer.
+ status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer) override;
+
+ // Set the output buffer.
+ status_t setOutBuffer(const sp<EffectBufferHalInterface>& buffer) override;
+
+ // Effect process function.
+ status_t process() override;
+
+ // Process reverse stream function. This function is used to pass
+ // a reference stream to the effect engine.
+ status_t processReverse() override;
+
+ // Send a command and receive a response to/from effect engine.
+ status_t command(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData, uint32_t* replySize,
+ void* pReplyData) override;
+
+ // Returns the effect descriptor.
+ status_t getDescriptor(effect_descriptor_t *pDescriptor) override;
+
+ // Free resources on the remote side.
+ status_t close() override;
+
+ // Whether it's a local implementation.
+ bool isLocal() const override { return false; }
+
+ status_t dump(int fd) override;
+
+ uint64_t effectId() const override { return mEffectId; }
+
+ private:
+ friend class EffectsFactoryHalAidl;
+
+ const uint64_t mEffectId;
+ const int32_t mSessionId;
+ const int32_t mIoId;
+ sp<EffectBufferHalInterface> mInBuffer, mOutBuffer;
+ effect_config_t mConfig;
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> mEffect;
+
+ // Can not be constructed directly by clients.
+ EffectHalAidl(const std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect>& effect,
+ uint64_t effectId, int32_t sessionId, int32_t ioId);
+
+ status_t handleSetConfig(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
+ uint32_t* replySize, void* pReplyData);
+ status_t handleGetConfig(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
+ uint32_t* replySize, void* pReplyData);
+ status_t handleSetParameter(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
+ uint32_t* replySize, void* pReplyData);
+ status_t handleGetParameter(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
+ uint32_t* replySize, void* pReplyData);
+
+ // The destructor automatically releases the effect.
+ virtual ~EffectHalAidl();
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index 8743c04..3956a6c 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -17,11 +17,16 @@
#define LOG_TAG "EffectHalHidl"
//#define LOG_NDEBUG 0
+#include <android/hidl/manager/1.0/IServiceManager.h>
+#include <android-base/stringprintf.h>
#include <common/all-versions/VersionUtils.h>
#include <cutils/native_handle.h>
+#include <cutils/properties.h>
#include <hwbinder/IPCThreadState.h>
#include <media/EffectsFactoryApi.h>
+#include <mediautils/SchedulingPolicyService.h>
#include <mediautils/TimeCheck.h>
+#include <system/audio_effects/effect_spatializer.h>
#include <utils/Log.h>
#include <util/EffectUtils.h>
@@ -50,6 +55,18 @@
effect_descriptor_t halDescriptor{};
if (EffectHalHidl::getDescriptor(&halDescriptor) == NO_ERROR) {
mIsInput = (halDescriptor.flags & EFFECT_FLAG_TYPE_PRE_PROC) == EFFECT_FLAG_TYPE_PRE_PROC;
+ const bool isSpatializer =
+ memcmp(&halDescriptor.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0;
+ if (isSpatializer) {
+ constexpr int32_t kRTPriorityMin = 1;
+ constexpr int32_t kRTPriorityMax = 3;
+ const int32_t priorityBoost = property_get_int32("audio.spatializer.priority", 1);
+ if (priorityBoost >= kRTPriorityMin && priorityBoost <= kRTPriorityMax) {
+ ALOGD("%s: audio.spatializer.priority %d on effect %lld",
+ __func__, priorityBoost, (long long)effectId);
+ mHalThreadPriority = priorityBoost;
+ }
+ }
}
}
@@ -127,6 +144,8 @@
ALOGE_IF(!mEfGroup, "Event flag creation for effects failed");
return NO_INIT;
}
+
+ (void)checkHalThreadPriority();
mStatusMQ = std::move(tempStatusMQ);
return OK;
}
@@ -317,5 +336,67 @@
return result;
}
+status_t EffectHalHidl::getHalPid(pid_t *pid) const {
+ using ::android::hidl::base::V1_0::DebugInfo;
+ using ::android::hidl::manager::V1_0::IServiceManager;
+ DebugInfo debugInfo;
+ const auto ret = mEffect->getDebugInfo([&] (const auto &info) {
+ debugInfo = info;
+ });
+ if (!ret.isOk()) {
+ ALOGW("%s: cannot get effect debug info", __func__);
+ return INVALID_OPERATION;
+ }
+ if (debugInfo.pid != (int)IServiceManager::PidConstant::NO_PID) {
+ *pid = debugInfo.pid;
+ return NO_ERROR;
+ }
+ ALOGW("%s: effect debug info does not contain pid", __func__);
+ return NAME_NOT_FOUND;
+}
+
+status_t EffectHalHidl::getHalWorkerTid(pid_t *tid) {
+ int32_t reply = -1;
+ uint32_t replySize = sizeof(reply);
+ const status_t status =
+ command('gtid', 0 /* cmdSize */, nullptr /* pCmdData */, &replySize, &reply);
+ if (status == OK) {
+ *tid = (pid_t)reply;
+ } else {
+ ALOGW("%s: failed with status:%d", __func__, status);
+ }
+ return status;
+}
+
+bool EffectHalHidl::requestHalThreadPriority(pid_t threadPid, pid_t threadId) {
+ if (mHalThreadPriority == kRTPriorityDisabled) {
+ return true;
+ }
+ const int err = requestPriority(
+ threadPid, threadId,
+ mHalThreadPriority, false /*isForApp*/, true /*asynchronous*/);
+ ALOGW_IF(err, "%s: failed to set RT priority %d for pid %d tid %d; error %d",
+ __func__, mHalThreadPriority, threadPid, threadId, err);
+ // Audio will still work, but may be more susceptible to glitches.
+ return err == 0;
+}
+
+status_t EffectHalHidl::checkHalThreadPriority() {
+ if (mHalThreadPriority == kRTPriorityDisabled) return OK;
+ if (mHalThreadPriority < kRTPriorityMin
+ || mHalThreadPriority > kRTPriorityMax) return BAD_VALUE;
+
+ pid_t halPid, halWorkerTid;
+ const status_t status = getHalPid(&halPid) ?: getHalWorkerTid(&halWorkerTid);
+ const bool success = status == OK && requestHalThreadPriority(halPid, halWorkerTid);
+ ALOGD("%s: effectId %lld RT priority(%d) request %s%s",
+ __func__, (long long)mEffectId, mHalThreadPriority,
+ success ? "succeeded" : "failed",
+ status == OK
+ ? base::StringPrintf(" for pid:%d tid:%d", halPid, halWorkerTid).c_str()
+ : " (pid / tid cannot be read)");
+ return success ? OK : status != OK ? status : INVALID_OPERATION /* request failed */;
+}
+
} // namespace effect
} // namespace android
diff --git a/media/libaudiohal/impl/EffectHalHidl.h b/media/libaudiohal/impl/EffectHalHidl.h
index e139768..94dcd7e 100644
--- a/media/libaudiohal/impl/EffectHalHidl.h
+++ b/media/libaudiohal/impl/EffectHalHidl.h
@@ -78,6 +78,11 @@
std::unique_ptr<StatusMQ> mStatusMQ;
EventFlag* mEfGroup;
bool mIsInput = false;
+ static constexpr int32_t kRTPriorityMin = 1;
+ static constexpr int32_t kRTPriorityMax = 3;
+ static constexpr int kRTPriorityDisabled = 0;
+ // Typical RealTime mHalThreadPriority ranges from 1 (low) to 3 (high).
+ int mHalThreadPriority = kRTPriorityDisabled;
// Can not be constructed directly by clients.
EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId);
@@ -93,6 +98,10 @@
uint32_t cmdCode, uint32_t cmdSize, void *pCmdData,
uint32_t *replySize, void *pReplyData);
status_t setProcessBuffers();
+ status_t getHalPid(pid_t *pid) const;
+ status_t getHalWorkerTid(pid_t *tid);
+ bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
+ status_t checkHalThreadPriority();
};
} // namespace effect
diff --git a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
new file mode 100644
index 0000000..0039c86
--- /dev/null
+++ b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
@@ -0,0 +1,230 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+#define LOG_TAG "EffectsFactoryHalAidl"
+//#define LOG_NDEBUG 0
+
+#include <aidl/android/hardware/audio/effect/IFactory.h>
+#include <android/binder_manager.h>
+#include <media/AidlConversionCppNdk.h>
+#include <media/AidlConversionNdk.h>
+#include <media/audiohal/AudioHalUtils.h>
+#include <utils/Log.h>
+
+#include "EffectBufferHalAidl.h"
+#include "EffectHalAidl.h"
+#include "EffectsFactoryHalAidl.h"
+
+using aidl::android::hardware::audio::effect::IFactory;
+using aidl::android::media::audio::common::AudioUuid;
+using android::detail::AudioHalVersionInfo;
+
+namespace android {
+namespace effect {
+
+EffectsFactoryHalAidl::EffectsFactoryHalAidl(std::shared_ptr<IFactory> effectsFactory)
+ : mFactory(effectsFactory),
+ mHalVersion(AudioHalVersionInfo(AudioHalVersionInfo::Type::AIDL, [this]() {
+ int32_t majorVersion = 0;
+ return (mFactory && mFactory->getInterfaceVersion(&majorVersion).isOk()) ? majorVersion
+ : 0;
+ }())) {
+ ALOG_ASSERT(effectsFactory != nullptr, "Provided IEffectsFactory service is NULL");
+}
+
+status_t EffectsFactoryHalAidl::queryNumberEffects(uint32_t *pNumEffects) {
+ if (pNumEffects == nullptr) {
+ return BAD_VALUE;
+ }
+
+ {
+ std::lock_guard lg(mLock);
+ RETURN_IF_NOT_OK(queryEffectList_l());
+ *pNumEffects = mDescList->size();
+ }
+ ALOGI("%s %d", __func__, *pNumEffects);
+ return OK;
+}
+
+status_t EffectsFactoryHalAidl::getDescriptor(uint32_t index, effect_descriptor_t* pDescriptor) {
+ if (pDescriptor == nullptr) {
+ return BAD_VALUE;
+ }
+
+ std::lock_guard lg(mLock);
+ RETURN_IF_NOT_OK(queryEffectList_l());
+
+ auto listSize = mDescList->size();
+ if (index >= listSize) {
+ ALOGE("%s index %d exceed size DescList %zd", __func__, index, listSize);
+ return INVALID_OPERATION;
+ }
+
+ *pDescriptor = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::aidl2legacy_Descriptor_effect_descriptor(mDescList->at(index)));
+ return OK;
+}
+
+status_t EffectsFactoryHalAidl::getDescriptor(const effect_uuid_t* halUuid,
+ effect_descriptor_t* pDescriptor) {
+ if (halUuid == nullptr || pDescriptor == nullptr) {
+ return BAD_VALUE;
+ }
+
+ AudioUuid uuid =
+ VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_audio_uuid_t_AudioUuid(*halUuid));
+ std::lock_guard lg(mLock);
+ return getHalDescriptorWithImplUuid_l(uuid, pDescriptor);
+}
+
+status_t EffectsFactoryHalAidl::getDescriptors(const effect_uuid_t* halType,
+ std::vector<effect_descriptor_t>* descriptors) {
+ if (halType == nullptr || descriptors == nullptr) {
+ return BAD_VALUE;
+ }
+
+ AudioUuid type =
+ VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_audio_uuid_t_AudioUuid(*halType));
+ std::lock_guard lg(mLock);
+ return getHalDescriptorWithTypeUuid_l(type, descriptors);
+}
+
+status_t EffectsFactoryHalAidl::createEffect(const effect_uuid_t* uuid, int32_t sessionId,
+ int32_t ioId, int32_t deviceId __unused,
+ sp<EffectHalInterface>* effect) {
+ if (uuid == nullptr || effect == nullptr) {
+ return BAD_VALUE;
+ }
+ ALOGI("%s session %d ioId %d", __func__, sessionId, ioId);
+
+ AudioUuid aidlUuid =
+ VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_audio_uuid_t_AudioUuid(*uuid));
+ std::shared_ptr<IEffect> aidlEffect;
+ ndk::ScopedAStatus status = mFactory->createEffect(aidlUuid, &aidlEffect);
+ if (!status.isOk() || aidlEffect == nullptr) {
+ ALOGE("%s IFactory::createFactory failed %s UUID %s", __func__,
+ status.getDescription().c_str(), aidlUuid.toString().c_str());
+ return INVALID_OPERATION;
+ }
+ uint64_t effectId;
+ {
+ std::lock_guard lg(mLock);
+ effectId = ++mEffectIdCounter;
+ }
+
+ *effect = new EffectHalAidl(aidlEffect, effectId, sessionId, ioId);
+ return OK;
+}
+
+status_t EffectsFactoryHalAidl::dumpEffects(int fd) {
+ ALOGE("%s not implemented yet, fd %d", __func__, fd);
+ return INVALID_OPERATION;
+}
+
+status_t EffectsFactoryHalAidl::allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) {
+ ALOGI("%s size %zu buffer %p", __func__, size, buffer);
+ // Buffer doesn't allocated here for AIDL, instead each effect open will return I/O data FMQ.
+ return EffectBufferHalAidl::allocate(size, buffer);
+}
+
+status_t EffectsFactoryHalAidl::mirrorBuffer(void* external, size_t size,
+ sp<EffectBufferHalInterface>* buffer) {
+ ALOGI("%s extern %p size %zu buffer %p", __func__, external, size, buffer);
+ // TODO: implement with FMQ
+ return EffectBufferHalAidl::mirror(external, size, buffer);
+}
+
+AudioHalVersionInfo EffectsFactoryHalAidl::getHalVersion() const {
+ return mHalVersion;
+}
+
+status_t EffectsFactoryHalAidl::queryEffectList_l() {
+ if (!mDescList) {
+ std::vector<Descriptor> list;
+ auto status = mFactory->queryEffects(std::nullopt, std::nullopt, std::nullopt, &list);
+ if (!status.isOk()) {
+ ALOGE("%s IFactory::queryEffects failed %s", __func__, status.getDescription().c_str());
+ return status.getStatus();
+ }
+
+ mDescList = std::make_unique<std::vector<Descriptor>>(list);
+ }
+ return OK;
+}
+
+status_t EffectsFactoryHalAidl::getHalDescriptorWithImplUuid_l(const AudioUuid& uuid,
+ effect_descriptor_t* pDescriptor) {
+ if (pDescriptor == nullptr) {
+ return BAD_VALUE;
+ }
+ if (!mDescList) {
+ RETURN_IF_NOT_OK(queryEffectList_l());
+ }
+
+ auto matchIt = std::find_if(mDescList->begin(), mDescList->end(),
+ [&](const auto& desc) { return desc.common.id.uuid == uuid; });
+ if (matchIt == mDescList->end()) {
+ ALOGE("%s UUID %s not found", __func__, uuid.toString().c_str());
+ return BAD_VALUE;
+ }
+
+ *pDescriptor = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::aidl2legacy_Descriptor_effect_descriptor(*matchIt));
+ return OK;
+}
+
+status_t EffectsFactoryHalAidl::getHalDescriptorWithTypeUuid_l(
+ const AudioUuid& type, std::vector<effect_descriptor_t>* descriptors) {
+ if (descriptors == nullptr) {
+ return BAD_VALUE;
+ }
+ if (!mDescList) {
+ RETURN_IF_NOT_OK(queryEffectList_l());
+ }
+ std::vector<Descriptor> result;
+ std::copy_if(mDescList->begin(), mDescList->end(), std::back_inserter(result),
+ [&](auto& desc) { return desc.common.id.type == type; });
+ if (result.size() == 0) {
+ ALOGE("%s type UUID %s not found", __func__, type.toString().c_str());
+ return BAD_VALUE;
+ }
+
+ *descriptors = VALUE_OR_RETURN_STATUS(
+ aidl::android::convertContainer<std::vector<effect_descriptor_t>>(
+ result, ::aidl::android::aidl2legacy_Descriptor_effect_descriptor));
+ return OK;
+}
+
+} // namespace effect
+
+// When a shared library is built from a static library, even explicit
+// exports from a static library are optimized out unless actually used by
+// the shared library. See EffectsFactoryHalEntry.cpp.
+extern "C" void* createIEffectsFactoryImpl() {
+ auto serviceName = std::string(IFactory::descriptor) + "/default";
+ auto service = IFactory::fromBinder(
+ ndk::SpAIBinder(AServiceManager_waitForService(serviceName.c_str())));
+ if (!service) {
+ ALOGE("%s binder service %s not exist", __func__, serviceName.c_str());
+ return nullptr;
+ }
+ return new effect::EffectsFactoryHalAidl(service);
+}
+
+} // namespace android
diff --git a/media/libaudiohal/impl/EffectsFactoryHalAidl.h b/media/libaudiohal/impl/EffectsFactoryHalAidl.h
new file mode 100644
index 0000000..1e85da9
--- /dev/null
+++ b/media/libaudiohal/impl/EffectsFactoryHalAidl.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <cstddef>
+#include <memory>
+#include <mutex>
+
+#include <android-base/thread_annotations.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <system/thread_defs.h>
+
+namespace android {
+namespace effect {
+
+using namespace aidl::android::hardware::audio::effect;
+
+class EffectsFactoryHalAidl final : public EffectsFactoryHalInterface {
+ public:
+ explicit EffectsFactoryHalAidl(std::shared_ptr<IFactory> effectsFactory);
+
+ // Returns the number of different effects in all loaded libraries.
+ status_t queryNumberEffects(uint32_t *pNumEffects) override;
+
+ // Returns a descriptor of the next available effect.
+ status_t getDescriptor(uint32_t index, effect_descriptor_t* pDescriptor) override;
+
+ status_t getDescriptor(const effect_uuid_t* pEffectUuid,
+ effect_descriptor_t* pDescriptor) override;
+
+ status_t getDescriptors(const effect_uuid_t* pEffectType,
+ std::vector<effect_descriptor_t>* descriptors) override;
+
+ // Creates an effect engine of the specified type.
+ // To release the effect engine, it is necessary to release references to the returned effect
+ // object.
+ status_t createEffect(const effect_uuid_t* pEffectUuid, int32_t sessionId, int32_t ioId,
+ int32_t deviceId, sp<EffectHalInterface>* effect) override;
+
+ status_t dumpEffects(int fd) override;
+
+ status_t allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) override;
+ status_t mirrorBuffer(void* external, size_t size,
+ sp<EffectBufferHalInterface>* buffer) override;
+
+ detail::AudioHalVersionInfo getHalVersion() const override;
+
+ private:
+ std::mutex mLock;
+ const std::shared_ptr<IFactory> mFactory;
+ uint64_t mEffectIdCounter GUARDED_BY(mLock) = 0; // Align with HIDL (0 is INVALID_ID)
+ std::unique_ptr<std::vector<Descriptor>> mDescList GUARDED_BY(mLock) = nullptr;
+ const detail::AudioHalVersionInfo mHalVersion;
+
+ virtual ~EffectsFactoryHalAidl() = default;
+ status_t queryEffectList_l() REQUIRES(mLock);
+ status_t getHalDescriptorWithImplUuid_l(
+ const aidl::android::media::audio::common::AudioUuid& uuid,
+ effect_descriptor_t* pDescriptor) REQUIRES(mLock);
+ status_t getHalDescriptorWithTypeUuid_l(
+ const aidl::android::media::audio::common::AudioUuid& type,
+ std::vector<effect_descriptor_t>* descriptors) REQUIRES(mLock);
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidlEntry.cpp b/media/libaudiohal/impl/EffectsFactoryHalEntry.cpp
similarity index 94%
rename from media/libaudiohal/impl/EffectsFactoryHalHidlEntry.cpp
rename to media/libaudiohal/impl/EffectsFactoryHalEntry.cpp
index 2c6f2c6..f6ad99a 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidlEntry.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalEntry.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include "android/media/AudioHalVersion.h"
+
extern "C" void* createIEffectsFactoryImpl();
extern "C" __attribute__((visibility("default"))) void* createIEffectsFactory() {
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index d7217fc..172ebdf 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -17,6 +17,9 @@
#define LOG_TAG "EffectsFactoryHalHidl"
//#define LOG_NDEBUG 0
+#include <optional>
+#include <tuple>
+
#include <cutils/native_handle.h>
#include <UuidUtils.h>
@@ -28,6 +31,9 @@
#include "EffectHalHidl.h"
#include "EffectsFactoryHalHidl.h"
+#include "android/media/AudioHalVersion.h"
+
+using ::android::detail::AudioHalVersionInfo;
using ::android::hardware::audio::common::CPP_VERSION::implementation::UuidUtils;
using ::android::hardware::audio::effect::CPP_VERSION::implementation::EffectUtils;
using ::android::hardware::Return;
@@ -38,51 +44,71 @@
using namespace ::android::hardware::audio::common::CPP_VERSION;
using namespace ::android::hardware::audio::effect::CPP_VERSION;
-EffectsFactoryHalHidl::EffectsFactoryHalHidl(sp<IEffectsFactory> effectsFactory)
- : EffectConversionHelperHidl("EffectsFactory") {
- ALOG_ASSERT(effectsFactory != nullptr, "Provided IEffectsFactory service is NULL");
- mEffectsFactory = effectsFactory;
-}
+class EffectDescriptorCache {
+ public:
+ using QueryResult = std::tuple<Return<void>, Result, hidl_vec<EffectDescriptor>>;
+ QueryResult queryAllDescriptors(IEffectsFactory* effectsFactory);
+ private:
+ std::mutex mLock;
+ std::optional<hidl_vec<EffectDescriptor>> mLastDescriptors; // GUARDED_BY(mLock)
+};
-status_t EffectsFactoryHalHidl::queryAllDescriptors() {
- if (mEffectsFactory == 0) return NO_INIT;
+EffectDescriptorCache::QueryResult EffectDescriptorCache::queryAllDescriptors(
+ IEffectsFactory* effectsFactory) {
+ {
+ std::lock_guard l(mLock);
+ if (mLastDescriptors.has_value()) {
+ return {::android::hardware::Void(), Result::OK, mLastDescriptors.value()};
+ }
+ }
Result retval = Result::NOT_INITIALIZED;
- Return<void> ret = mEffectsFactory->getAllDescriptors(
+ hidl_vec<EffectDescriptor> descriptors;
+ Return<void> ret = effectsFactory->getAllDescriptors(
[&](Result r, const hidl_vec<EffectDescriptor>& result) {
retval = r;
if (retval == Result::OK) {
- mLastDescriptors = result;
+ descriptors = result;
}
});
- if (ret.isOk()) {
- return retval == Result::OK ? OK : NO_INIT;
+ if (ret.isOk() && retval == Result::OK) {
+ std::lock_guard l(mLock);
+ mLastDescriptors = descriptors;
}
- mLastDescriptors.resize(0);
- return processReturn(__FUNCTION__, ret);
+ return {std::move(ret), retval, std::move(descriptors)};
+}
+
+EffectsFactoryHalHidl::EffectsFactoryHalHidl(sp<IEffectsFactory> effectsFactory)
+ : EffectConversionHelperHidl("EffectsFactory"), mCache(new EffectDescriptorCache) {
+ ALOG_ASSERT(effectsFactory != nullptr, "Provided IEffectsFactory service is NULL");
+ mEffectsFactory = std::move(effectsFactory);
}
status_t EffectsFactoryHalHidl::queryNumberEffects(uint32_t *pNumEffects) {
- status_t queryResult = queryAllDescriptors();
- if (queryResult == OK) {
- *pNumEffects = mLastDescriptors.size();
+ if (mEffectsFactory == 0) return NO_INIT;
+ auto [ret, retval, descriptors] = mCache->queryAllDescriptors(mEffectsFactory.get());
+ if (ret.isOk() && retval == Result::OK) {
+ *pNumEffects = descriptors.size();
+ return OK;
+ } else if (ret.isOk()) {
+ return NO_INIT;
}
- return queryResult;
+ return processReturn(__FUNCTION__, ret);
}
status_t EffectsFactoryHalHidl::getDescriptor(
uint32_t index, effect_descriptor_t *pDescriptor) {
- // TODO: We need somehow to track the changes on the server side
- // or figure out how to convert everybody to query all the descriptors at once.
if (pDescriptor == nullptr) {
return BAD_VALUE;
}
- if (mLastDescriptors.size() == 0) {
- status_t queryResult = queryAllDescriptors();
- if (queryResult != OK) return queryResult;
+ if (mEffectsFactory == 0) return NO_INIT;
+ auto [ret, retval, descriptors] = mCache->queryAllDescriptors(mEffectsFactory.get());
+ if (ret.isOk() && retval == Result::OK) {
+ if (index >= descriptors.size()) return NAME_NOT_FOUND;
+ EffectUtils::effectDescriptorToHal(descriptors[index], pDescriptor);
+ } else if (ret.isOk()) {
+ return NO_INIT;
}
- if (index >= mLastDescriptors.size()) return NAME_NOT_FOUND;
- EffectUtils::effectDescriptorToHal(mLastDescriptors[index], pDescriptor);
- return OK;
+ return processReturn(__FUNCTION__, ret);
}
status_t EffectsFactoryHalHidl::getDescriptor(
@@ -114,21 +140,15 @@
if (pEffectType == nullptr || descriptors == nullptr) {
return BAD_VALUE;
}
+ if (mEffectsFactory == 0) return NO_INIT;
- uint32_t numEffects = 0;
- status_t status = queryNumberEffects(&numEffects);
- if (status != NO_ERROR) {
- ALOGW("%s error %d from FactoryHal queryNumberEffects", __func__, status);
- return status;
+ auto [ret, retval, hidlDescs] = mCache->queryAllDescriptors(mEffectsFactory.get());
+ if (!ret.isOk() || retval != Result::OK) {
+ return processReturn(__FUNCTION__, ret, retval);
}
-
- for (uint32_t i = 0; i < numEffects; i++) {
+ for (const auto& hidlDesc : hidlDescs) {
effect_descriptor_t descriptor;
- status = getDescriptor(i, &descriptor);
- if (status != NO_ERROR) {
- ALOGW("%s error %d from FactoryHal getDescriptor", __func__, status);
- continue;
- }
+ EffectUtils::effectDescriptorToHal(hidlDesc, &descriptor);
if (memcmp(&descriptor.type, pEffectType, sizeof(effect_uuid_t)) == 0) {
descriptors->push_back(descriptor);
}
@@ -204,11 +224,15 @@
return EffectBufferHalHidl::mirror(external, size, buffer);
}
+AudioHalVersionInfo EffectsFactoryHalHidl::getHalVersion() const {
+ return AudioHalVersionInfo(AudioHalVersionInfo::Type::HIDL, MAJOR_VERSION, MINOR_VERSION);
+}
+
} // namespace effect
// When a shared library is built from a static library, even explicit
// exports from a static library are optimized out unless actually used by
-// the shared library. See EffectsFactoryHalHidlEntry.cpp.
+// the shared library. See EffectsFactoryHalEntry.cpp.
extern "C" void* createIEffectsFactoryImpl() {
auto service = hardware::audio::effect::CPP_VERSION::IEffectsFactory::getService();
return service ? new effect::EffectsFactoryHalHidl(service) : nullptr;
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
index e1882e1..9875154 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -14,8 +14,9 @@
* limitations under the License.
*/
-#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
-#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
+#pragma once
+
+#include <memory>
#include PATH(android/hardware/audio/effect/FILE_VERSION/IEffectsFactory.h)
#include <media/audiohal/EffectsFactoryHalInterface.h>
@@ -28,47 +29,43 @@
using ::android::hardware::hidl_vec;
using namespace ::android::hardware::audio::effect::CPP_VERSION;
-class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public EffectConversionHelperHidl
-{
+class EffectDescriptorCache;
+
+class EffectsFactoryHalHidl final : public EffectsFactoryHalInterface,
+ public EffectConversionHelperHidl {
public:
EffectsFactoryHalHidl(sp<IEffectsFactory> effectsFactory);
// Returns the number of different effects in all loaded libraries.
- virtual status_t queryNumberEffects(uint32_t *pNumEffects);
+ status_t queryNumberEffects(uint32_t *pNumEffects) override;
// Returns a descriptor of the next available effect.
- virtual status_t getDescriptor(uint32_t index,
- effect_descriptor_t *pDescriptor);
+ status_t getDescriptor(uint32_t index, effect_descriptor_t* pDescriptor) override;
- virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
- effect_descriptor_t *pDescriptor);
+ status_t getDescriptor(const effect_uuid_t* pEffectUuid,
+ effect_descriptor_t* pDescriptor) override;
- virtual status_t getDescriptors(const effect_uuid_t *pEffectType,
- std::vector<effect_descriptor_t> *descriptors);
+ status_t getDescriptors(const effect_uuid_t* pEffectType,
+ std::vector<effect_descriptor_t>* descriptors) override;
// Creates an effect engine of the specified type.
// To release the effect engine, it is necessary to release references
// to the returned effect object.
- virtual status_t createEffect(const effect_uuid_t *pEffectUuid,
- int32_t sessionId, int32_t ioId, int32_t deviceId,
- sp<EffectHalInterface> *effect);
+ status_t createEffect(const effect_uuid_t* pEffectUuid, int32_t sessionId, int32_t ioId,
+ int32_t deviceId, sp<EffectHalInterface>* effect) override;
- virtual status_t dumpEffects(int fd);
-
- virtual float getHalVersion() { return MAJOR_VERSION + (float)MINOR_VERSION / 10; }
+ status_t dumpEffects(int fd) override;
status_t allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) override;
status_t mirrorBuffer(void* external, size_t size,
sp<EffectBufferHalInterface>* buffer) override;
+ android::detail::AudioHalVersionInfo getHalVersion() const override;
+
private:
sp<IEffectsFactory> mEffectsFactory;
- hidl_vec<EffectDescriptor> mLastDescriptors;
-
- status_t queryAllDescriptors();
+ std::unique_ptr<EffectDescriptorCache> mCache;
};
} // namespace effect
} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/include/media/audiohal/AudioHalUtils.h b/media/libaudiohal/include/media/audiohal/AudioHalUtils.h
new file mode 100644
index 0000000..4862cba
--- /dev/null
+++ b/media/libaudiohal/include/media/audiohal/AudioHalUtils.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#define RETURN_IF_BINDER_FAIL(expr) \
+ do { \
+ const ::ndk::ScopedAStatus _temp_status_ = (expr); \
+ if (!_temp_status_.isOk()) { \
+ ALOGE("%s:%d return with expr %s msg %s", __func__, __LINE__, #expr, \
+ _temp_status_.getMessage()); \
+ return _temp_status_.getStatus(); \
+ } \
+ } while (false)
+
+#define RETURN_IF_NOT_OK(statement) \
+ do { \
+ auto tmp = (statement); \
+ if (tmp != OK) { \
+ return tmp; \
+ } \
+ } while (false)
diff --git a/media/libaudiohal/include/media/audiohal/AudioHalVersionInfo.h b/media/libaudiohal/include/media/audiohal/AudioHalVersionInfo.h
new file mode 100644
index 0000000..6e09463
--- /dev/null
+++ b/media/libaudiohal/include/media/audiohal/AudioHalVersionInfo.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+#include <utility>
+#include <android/media/AudioHalVersion.h>
+
+namespace android::detail {
+
+class AudioHalVersionInfo : public android::media::AudioHalVersion {
+ public:
+ AudioHalVersionInfo(Type halType, int halMajor, int halMinor = 0) {
+ type = halType;
+ major = halMajor;
+ minor = halMinor;
+ }
+
+ Type getType() const { return type; }
+
+ int getMajorVersion() const { return major; }
+
+ int getMinorVersion() const { return minor; }
+
+ /** Keep HIDL version format as is for backward compatibility, only add prefix for AIDL. */
+ std::string toVersionString() const {
+ std::string versionStr =
+ android::internal::ToString(major) + "." + android::internal::ToString(minor);
+ if (type == Type::AIDL) {
+ return "aidl";
+ } else {
+ return versionStr;
+ }
+ }
+};
+
+} // namespace android
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index d27ad4c..2c8219e 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -21,7 +21,6 @@
#include <android/media/audio/common/AudioMMapPolicyType.h>
#include <error/Result.h>
#include <media/audiohal/EffectHalInterface.h>
-#include <media/MicrophoneInfo.h>
#include <system/audio.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
@@ -107,16 +106,23 @@
virtual status_t releaseAudioPatch(audio_patch_handle_t patch) = 0;
// Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port *port) = 0;
+ virtual status_t getAudioPort(struct audio_port* port) {
+ ALOGE("%s override me port %p", __func__, port);
+ return OK;
+ }
// Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port_v7 *port) = 0;
+ virtual status_t getAudioPort(struct audio_port_v7 *port) {
+ ALOGE("%s override me port %p", __func__, port);
+ return OK;
+ }
// Set audio port configuration.
virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
// List microphones
- virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
+ virtual status_t getMicrophones(
+ std::vector<audio_microphone_characteristic_t>* microphones) = 0;
virtual status_t addDeviceEffect(
audio_port_handle_t device, sp<EffectHalInterface> effect) = 0;
@@ -125,12 +131,16 @@
virtual status_t getMmapPolicyInfos(
media::audio::common::AudioMMapPolicyType policyType,
- std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) = 0;
+ std::vector<media::audio::common::AudioMMapPolicyInfo> *policyInfos) = 0;
virtual int32_t getAAudioMixerBurstCount() = 0;
virtual int32_t getAAudioHardwareBurstMinUsec() = 0;
+ virtual int32_t supportsBluetoothVariableLatency(bool* supports) = 0;
// Update the connection status of an external device.
- virtual status_t setConnectedState(const struct audio_port_v7 *port, bool connected) = 0;
+ virtual status_t setConnectedState(const struct audio_port_v7* port, bool connected) {
+ ALOGE("%s override me port %p connected %d", __func__, port, connected);
+ return OK;
+ }
virtual error::Result<audio_hw_sync_t> getHwAvSync() = 0;
diff --git a/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
index 17010e6..be3a723 100644
--- a/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DevicesFactoryHalInterface.h
@@ -14,14 +14,15 @@
* limitations under the License.
*/
-#ifndef ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
-#define ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
+#pragma once
#include <media/audiohal/DeviceHalInterface.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
#include <vector>
+#include "AudioHalVersionInfo.h"
+
namespace android {
class DevicesFactoryHalCallback : public RefBase
@@ -43,7 +44,7 @@
// The callback can be only set once.
virtual status_t setCallbackOnce(sp<DevicesFactoryHalCallback> callback) = 0;
- virtual float getHalVersion() const = 0;
+ virtual android::detail::AudioHalVersionInfo getHalVersion() const = 0;
static sp<DevicesFactoryHalInterface> create();
@@ -55,5 +56,3 @@
};
} // namespace android
-
-#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_INTERFACE_H
diff --git a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
index 3e505bd..d740fe9 100644
--- a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
@@ -14,14 +14,16 @@
* limitations under the License.
*/
-#ifndef ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
-#define ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
+#pragma once
#include <media/audiohal/EffectHalInterface.h>
#include <system/audio_effect.h>
#include <utils/Errors.h>
#include <utils/RefBase.h>
+#include "AudioHalVersionInfo.h"
+#include "FactoryHal.h"
+
namespace android {
class EffectsFactoryHalInterface : public RefBase
@@ -49,16 +51,16 @@
virtual status_t dumpEffects(int fd) = 0;
- virtual float getHalVersion() = 0;
-
static sp<EffectsFactoryHalInterface> create();
virtual status_t allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) = 0;
virtual status_t mirrorBuffer(void* external, size_t size,
sp<EffectBufferHalInterface>* buffer) = 0;
+ virtual android::detail::AudioHalVersionInfo getHalVersion() const = 0;
+
// Helper function to compare effect uuid to EFFECT_UUID_NULL.
- static bool isNullUuid(const effect_uuid_t *pEffectUuid);
+ static bool isNullUuid(const effect_uuid_t* pEffectUuid);
protected:
// Subclasses can not be constructed directly by clients.
@@ -68,5 +70,3 @@
};
} // namespace android
-
-#endif // ANDROID_HARDWARE_EFFECTS_FACTORY_HAL_INTERFACE_H
diff --git a/media/libaudiohal/include/media/audiohal/FactoryHalHidl.h b/media/libaudiohal/include/media/audiohal/FactoryHal.h
similarity index 62%
rename from media/libaudiohal/include/media/audiohal/FactoryHalHidl.h
rename to media/libaudiohal/include/media/audiohal/FactoryHal.h
index 866dd3e..4776d98 100644
--- a/media/libaudiohal/include/media/audiohal/FactoryHalHidl.h
+++ b/media/libaudiohal/include/media/audiohal/FactoryHal.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2018 The Android Open Source Project
+ * Copyright (C) 2022 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -14,24 +14,19 @@
* limitations under the License.
*/
-#ifndef ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
-#define ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
+#pragma once
#include <string>
#include <utility>
-
#include <utils/StrongPointer.h>
+#include "AudioHalVersionInfo.h"
+
namespace android {
-// The pair of the interface's package name and the interface name,
-// e.g. <"android.hardware.audio", "IDevicesFactory">.
-// Splitting is used for easier construction of versioned names (FQNs).
-using InterfaceName = std::pair<std::string, std::string>;
-
namespace detail {
-void* createPreferredImpl(const InterfaceName& iface, const InterfaceName& siblingIface);
+void* createPreferredImpl(bool isCore);
} // namespace detail
@@ -45,17 +40,12 @@
* shared libraries the loader function considers which interface among two has the most
* recent version. Thus, a pair of interface names must be passed in.
*
- * @param iface the interface that needs to be created.
- * @param siblingIface the interface which occupies the same shared library.
+ * @param isCore Indicating if this is audio Core HAL service interface.
* @return the preferred available implementation or nullptr if none are available.
*/
+
template <class Interface>
-static sp<Interface> createPreferredImpl(
- const InterfaceName& iface, const InterfaceName& siblingIface) {
- return sp<Interface>{
- static_cast<Interface*>(detail::createPreferredImpl(iface, siblingIface))};
+static sp<Interface> createPreferredImpl(bool isCore) {
+ return sp<Interface>{static_cast<Interface*>(detail::createPreferredImpl(isCore))};
}
-
} // namespace android
-
-#endif // ANDROID_HARDWARE_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/tests/Android.bp b/media/libaudiohal/tests/Android.bp
new file mode 100644
index 0000000..e20f74c
--- /dev/null
+++ b/media/libaudiohal/tests/Android.bp
@@ -0,0 +1,51 @@
+// Copyright (C) 2022 The Android Open Source Project
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Headers module is in frameworks/av/Android.bp because modules are not allowed
+// to refer to headers in parent directories and the headers live in
+// frameworks/av/include.
+
+package {
+ default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_test {
+ name: "EffectsFactoryHalInterfaceTest",
+ test_suites: ["device-tests"],
+
+ srcs: [
+ "EffectsFactoryHalInterface_test.cpp",
+ ],
+
+ defaults: [
+ "latest_android_media_audio_common_types_cpp_shared",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ "-Wthread-safety",
+ ],
+
+ shared_libs: [
+ "audioclient-types-aidl-cpp",
+ "libaudiohal",
+ "libutils",
+ ],
+
+ header_libs: [
+ "libaudiohal_headers",
+ ],
+}
diff --git a/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp b/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
new file mode 100644
index 0000000..83c7809
--- /dev/null
+++ b/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#include <cstdint>
+#define LOG_TAG "EffectsFactoryHalInterfaceTest"
+
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+
+#include <gtest/gtest.h>
+#include <utils/RefBase.h>
+
+namespace android {
+
+// EffectsFactoryHalInterface
+TEST(libAudioHalTest, createEffectsFactoryHalInterface) {
+ ASSERT_NE(nullptr, EffectsFactoryHalInterface::create());
+}
+
+TEST(libAudioHalTest, queryNumberEffects) {
+ auto factory = EffectsFactoryHalInterface::create();
+ ASSERT_NE(nullptr, factory);
+
+ uint32_t numEffects = 0;
+ EXPECT_EQ(OK, factory->queryNumberEffects(&numEffects));
+ EXPECT_NE(0ul, numEffects);
+}
+
+TEST(libAudioHalTest, getDescriptorByNumber) {
+ auto factory = EffectsFactoryHalInterface::create();
+ ASSERT_NE(nullptr, factory);
+
+ uint32_t numEffects = 0;
+ EXPECT_EQ(OK, factory->queryNumberEffects(&numEffects));
+ EXPECT_NE(0ul, numEffects);
+
+ effect_descriptor_t desc;
+ for (uint32_t i = 0; i < numEffects; i++) {
+ EXPECT_EQ(OK, factory->getDescriptor(i, &desc));
+ }
+}
+
+TEST(libAudioHalTest, createEffect) {
+ auto factory = EffectsFactoryHalInterface::create();
+ ASSERT_NE(nullptr, factory);
+
+ uint32_t numEffects = 0;
+ EXPECT_EQ(OK, factory->queryNumberEffects(&numEffects));
+ EXPECT_NE(0ul, numEffects);
+
+ effect_descriptor_t desc;
+ for (uint32_t i = 0; i < numEffects; i++) {
+ sp<EffectHalInterface> interface;
+ EXPECT_EQ(OK, factory->getDescriptor(i, &desc));
+ EXPECT_EQ(OK, factory->createEffect(&desc.uuid, 1 /* sessionId */, 1 /* ioId */,
+ 1 /* deviceId */, &interface));
+ }
+}
+
+TEST(libAudioHalTest, getHalVersion) {
+ auto factory = EffectsFactoryHalInterface::create();
+ ASSERT_NE(nullptr, factory);
+
+ auto version = factory->getHalVersion();
+ EXPECT_NE(0, version.getMajorVersion());
+}
+
+// TODO: b/263986405 Add multi-thread testing
+
+} // namespace android
diff --git a/media/libaudioprocessing/tests/test-resampler.cpp b/media/libaudioprocessing/tests/test-resampler.cpp
index f178bde..2166a83 100644
--- a/media/libaudioprocessing/tests/test-resampler.cpp
+++ b/media/libaudioprocessing/tests/test-resampler.cpp
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
+#include <memory>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
@@ -474,7 +475,7 @@
// mono takes left channel only (out of stereo output pair)
// stereo and multichannel preserve all channels.
int32_t* out = (int32_t*) output_vaddr;
- int16_t* convert = (int16_t*) malloc(output_frames * channels * sizeof(int16_t));
+ std::unique_ptr<int16_t[]> convert(new int16_t[output_frames * channels]);
const int volumeShift = 12; // shift requirement for Q4.27 to Q.15
// round to half towards zero and saturate at int16 (non-dithered)
@@ -509,7 +510,7 @@
perror(file_out);
return EXIT_FAILURE;
}
- (void) sf_writef_short(sf, convert, output_frames);
+ (void) sf_writef_short(sf, convert.get(), output_frames);
sf_close(sf);
return EXIT_SUCCESS;
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index abe622d..742626c 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -47,3 +47,29 @@
"libhardware_headers",
],
}
+
+cc_library_shared {
+ name: "libdownmixaidl",
+ srcs: [
+ "aidl/EffectDownmix.cpp",
+ "aidl/DownmixContext.cpp",
+ ":effectCommonFile",
+ ],
+ defaults: [
+ "aidlaudioservice_defaults",
+ "latest_android_hardware_audio_effect_ndk_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
+ ],
+ header_libs: [
+ "libaudioeffects",
+ "libhardware_headers"
+ ],
+ shared_libs: [
+ "libaudioutils",
+ "libcutils",
+ "liblog",
+ ],
+ visibility: [
+ "//hardware/interfaces/audio/aidl/default",
+ ],
+}
diff --git a/media/libeffects/downmix/aidl/DownmixContext.cpp b/media/libeffects/downmix/aidl/DownmixContext.cpp
new file mode 100644
index 0000000..6869689
--- /dev/null
+++ b/media/libeffects/downmix/aidl/DownmixContext.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_DownmixContext"
+
+#include <android-base/logging.h>
+
+#include "DownmixContext.h"
+
+using aidl::android::hardware::audio::effect::IEffect;
+using ::android::hardware::audio::common::getChannelCount;
+
+namespace aidl::android::hardware::audio::effect {
+
+DownmixContext::DownmixContext(int statusDepth, const Parameter::Common& common)
+ : EffectContext(statusDepth, common) {
+ LOG(DEBUG) << __func__;
+ mState = DOWNMIX_STATE_UNINITIALIZED;
+ init_params(common);
+}
+
+DownmixContext::~DownmixContext() {
+ LOG(DEBUG) << __func__;
+ mState = DOWNMIX_STATE_UNINITIALIZED;
+}
+
+RetCode DownmixContext::enable() {
+ LOG(DEBUG) << __func__;
+ if (mState != DOWNMIX_STATE_INITIALIZED) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = DOWNMIX_STATE_ACTIVE;
+ return RetCode::SUCCESS;
+}
+
+RetCode DownmixContext::disable() {
+ LOG(DEBUG) << __func__;
+ if (mState != DOWNMIX_STATE_ACTIVE) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = DOWNMIX_STATE_INITIALIZED;
+ return RetCode::SUCCESS;
+}
+
+void DownmixContext::reset() {
+ LOG(DEBUG) << __func__;
+ disable();
+ resetBuffer();
+}
+
+IEffect::Status DownmixContext::lvmProcess(float* in, float* out, int samples) {
+ LOG(DEBUG) << __func__ << " in " << in << " out " << out << " sample " << samples;
+ IEffect::Status status = {EX_ILLEGAL_ARGUMENT, 0, 0};
+
+ if (in == nullptr || out == nullptr || getInputFrameSize() != getOutputFrameSize() ||
+ getInputFrameSize() == 0) {
+ return status;
+ }
+
+ status = {EX_ILLEGAL_STATE, 0, 0};
+ if (mState == DOWNMIX_STATE_UNINITIALIZED) {
+ LOG(ERROR) << __func__ << "Trying to use an uninitialized downmixer";
+ return status;
+ } else if (mState == DOWNMIX_STATE_INITIALIZED) {
+ LOG(ERROR) << __func__ << "Trying to use a non-configured downmixer";
+ return status;
+ }
+
+ LOG(DEBUG) << __func__ << " start processing";
+ bool accumulate = false;
+ int frames = samples * sizeof(float) / getInputFrameSize();
+ if (mType == Downmix::Type::STRIP) {
+ int inputChannelCount = getChannelCount(mChMask);
+ while (frames) {
+ if (accumulate) {
+ out[0] = std::clamp(out[0] + in[0], -1.f, 1.f);
+ out[1] = std::clamp(out[1] + in[1], -1.f, 1.f);
+ } else {
+ out[0] = in[0];
+ out[1] = in[1];
+ }
+ in += inputChannelCount;
+ out += 2;
+ frames--;
+ }
+ } else {
+ int chMask = mChMask.get<AudioChannelLayout::layoutMask>();
+ if (!mChannelMix.process(in, out, frames, accumulate, (audio_channel_mask_t)chMask)) {
+ LOG(ERROR) << "Multichannel configuration " << mChMask.toString()
+ << " is not supported";
+ return status;
+ }
+ }
+ LOG(DEBUG) << __func__ << " done processing";
+ return {STATUS_OK, samples, samples};
+}
+
+void DownmixContext::init_params(const Parameter::Common& common) {
+ // when configuring the effect, do not allow a blank or unsupported channel mask
+ AudioChannelLayout channelMask = common.input.base.channelMask;
+ if (isChannelMaskValid(channelMask)) {
+ LOG(ERROR) << "Downmix_Configure error: input channel mask " << channelMask.toString()
+ << " not supported";
+ } else {
+ mType = Downmix::Type::FOLD;
+ mChMask = channelMask;
+ mState = DOWNMIX_STATE_INITIALIZED;
+ }
+}
+
+bool DownmixContext::isChannelMaskValid(AudioChannelLayout channelMask) {
+ if (channelMask.getTag() == AudioChannelLayout::layoutMask) return false;
+ int chMask = channelMask.get<AudioChannelLayout::layoutMask>();
+ // check against unsupported channels (up to FCC_26)
+ constexpr uint32_t MAXIMUM_CHANNEL_MASK = AudioChannelLayout::LAYOUT_22POINT2 |
+ AudioChannelLayout::CHANNEL_FRONT_WIDE_LEFT |
+ AudioChannelLayout::CHANNEL_FRONT_WIDE_RIGHT;
+ if (chMask & ~MAXIMUM_CHANNEL_MASK) {
+ LOG(ERROR) << "Unsupported channels in " << (chMask & ~MAXIMUM_CHANNEL_MASK);
+ return false;
+ }
+ return true;
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/downmix/aidl/DownmixContext.h b/media/libeffects/downmix/aidl/DownmixContext.h
new file mode 100644
index 0000000..8a244ac
--- /dev/null
+++ b/media/libeffects/downmix/aidl/DownmixContext.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "effect-impl/EffectContext.h"
+
+#include <audio_utils/ChannelMix.h>
+
+namespace aidl::android::hardware::audio::effect {
+
+using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioDeviceDescription;
+
+enum DownmixState {
+ DOWNMIX_STATE_UNINITIALIZED,
+ DOWNMIX_STATE_INITIALIZED,
+ DOWNMIX_STATE_ACTIVE,
+};
+
+class DownmixContext final : public EffectContext {
+ public:
+ DownmixContext(int statusDepth, const Parameter::Common& common);
+ ~DownmixContext();
+ RetCode enable();
+ RetCode disable();
+ void reset();
+
+ RetCode setDmType(Downmix::Type type) {
+ mType = type;
+ return RetCode::SUCCESS;
+ }
+ Downmix::Type getDmType() const { return mType; }
+
+ RetCode setVolumeStereo(const Parameter::VolumeStereo& volumeStereo) override {
+ // FIXME change volume
+ mVolumeStereo = volumeStereo;
+ return RetCode::SUCCESS;
+ }
+ Parameter::VolumeStereo getVolumeStereo() override { return mVolumeStereo; }
+
+ RetCode setOutputDevice(const AudioDeviceDescription& device) override {
+ // FIXME change type if playing on headset vs speaker
+ mOutputDevice = device;
+ return RetCode::SUCCESS;
+ }
+ AudioDeviceDescription getOutputDevice() { return mOutputDevice; }
+
+ IEffect::Status lvmProcess(float* in, float* out, int samples);
+
+ private:
+ DownmixState mState;
+ Downmix::Type mType;
+ AudioChannelLayout mChMask;
+ ::android::audio_utils::channels::ChannelMix mChannelMix;
+
+ // Common Params
+ AudioDeviceDescription mOutputDevice;
+ Parameter::VolumeStereo mVolumeStereo;
+
+ void init_params(const Parameter::Common& common);
+ bool isChannelMaskValid(AudioChannelLayout channelMask);
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/downmix/aidl/EffectDownmix.cpp b/media/libeffects/downmix/aidl/EffectDownmix.cpp
new file mode 100644
index 0000000..17d0736
--- /dev/null
+++ b/media/libeffects/downmix/aidl/EffectDownmix.cpp
@@ -0,0 +1,214 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_DownmixImpl"
+
+#include <android-base/logging.h>
+
+#include "EffectDownmix.h"
+
+using aidl::android::hardware::audio::effect::Descriptor;
+using aidl::android::hardware::audio::effect::DownmixImpl;
+using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::kDownmixImplUUID;
+using aidl::android::hardware::audio::effect::kDownmixTypeUUID;
+using aidl::android::media::audio::common::AudioUuid;
+
+extern "C" binder_exception_t createEffect(const AudioUuid* in_impl_uuid,
+ std::shared_ptr<IEffect>* instanceSpp) {
+ if (!in_impl_uuid || *in_impl_uuid != kDownmixImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (instanceSpp) {
+ *instanceSpp = ndk::SharedRefBase::make<DownmixImpl>();
+ LOG(DEBUG) << __func__ << " instance " << instanceSpp->get() << " created";
+ return EX_NONE;
+ } else {
+ LOG(ERROR) << __func__ << " invalid input parameter!";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+}
+
+extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
+ if (!in_impl_uuid || *in_impl_uuid != kDownmixImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ *_aidl_return = DownmixImpl::kDescriptor;
+ return EX_NONE;
+}
+
+namespace aidl::android::hardware::audio::effect {
+
+const std::string DownmixImpl::kEffectName = "Multichannel Downmix To Stereo";
+const Descriptor DownmixImpl::kDescriptor = {
+ .common = {
+ .id = {.type = kDownmixTypeUUID, .uuid = kDownmixImplUUID, .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::INSERT, .insert = Flags::Insert::FIRST},
+ .name = DownmixImpl::kEffectName,
+ .implementor = "The Android Open Source Project"}};
+
+ndk::ScopedAStatus DownmixImpl::getDescriptor(Descriptor* _aidl_return) {
+ RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
+ LOG(DEBUG) << __func__ << kDescriptor.toString();
+ *_aidl_return = kDescriptor;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus DownmixImpl::setParameterCommon(const Parameter& param) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto tag = param.getTag();
+ switch (tag) {
+ case Parameter::common:
+ RETURN_IF(mContext->setCommon(param.get<Parameter::common>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setCommFailed");
+ break;
+ case Parameter::deviceDescription:
+ RETURN_IF(mContext->setOutputDevice(param.get<Parameter::deviceDescription>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setDeviceFailed");
+ break;
+ case Parameter::mode:
+ RETURN_IF(mContext->setAudioMode(param.get<Parameter::mode>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setModeFailed");
+ break;
+ case Parameter::source:
+ RETURN_IF(mContext->setAudioSource(param.get<Parameter::source>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setSourceFailed");
+ break;
+ case Parameter::volumeStereo:
+ RETURN_IF(mContext->setVolumeStereo(param.get<Parameter::volumeStereo>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setVolumeStereoFailed");
+ break;
+ default: {
+ LOG(ERROR) << __func__ << " unsupportedParameterTag " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commonParamNotSupported");
+ }
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus DownmixImpl::commandImpl(CommandId command) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ switch (command) {
+ case CommandId::START:
+ mContext->enable();
+ break;
+ case CommandId::STOP:
+ mContext->disable();
+ break;
+ case CommandId::RESET:
+ mContext->reset();
+ break;
+ default:
+ LOG(ERROR) << __func__ << " commandId " << toString(command) << " not supported";
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commandIdNotSupported");
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus DownmixImpl::setParameterSpecific(const Parameter::Specific& specific) {
+ RETURN_IF(Parameter::Specific::downmix != specific.getTag(), EX_ILLEGAL_ARGUMENT,
+ "EffectNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto& dmParam = specific.get<Parameter::Specific::downmix>();
+ auto tag = dmParam.getTag();
+
+ switch (tag) {
+ case Downmix::type: {
+ RETURN_IF(mContext->setDmType(dmParam.get<Downmix::type>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setTypeFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "DownmixTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus DownmixImpl::getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
+ auto tag = id.getTag();
+ RETURN_IF(Parameter::Id::downmixTag != tag, EX_ILLEGAL_ARGUMENT, "wrongIdTag");
+ auto dmId = id.get<Parameter::Id::downmixTag>();
+ auto dmIdTag = dmId.getTag();
+ switch (dmIdTag) {
+ case Downmix::Id::commonTag:
+ return getParameterDownmix(dmId.get<Downmix::Id::commonTag>(), specific);
+ default:
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(dmIdTag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "DownmixTagNotSupported");
+ }
+}
+
+ndk::ScopedAStatus DownmixImpl::getParameterDownmix(const Downmix::Tag& tag,
+ Parameter::Specific* specific) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ Downmix dmParam;
+ switch (tag) {
+ case Downmix::type: {
+ dmParam.set<Downmix::type>(mContext->getDmType());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "DownmixTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::downmix>(dmParam);
+ return ndk::ScopedAStatus::ok();
+}
+
+std::shared_ptr<EffectContext> DownmixImpl::createContext(const Parameter::Common& common) {
+ if (mContext) {
+ LOG(DEBUG) << __func__ << " context already exist";
+ return mContext;
+ }
+
+ mContext = std::make_shared<DownmixContext>(1 /* statusFmqDepth */, common);
+ return mContext;
+}
+
+RetCode DownmixImpl::releaseContext() {
+ if (mContext) {
+ mContext.reset();
+ }
+ return RetCode::SUCCESS;
+}
+
+// Processing method running in EffectWorker thread.
+IEffect::Status DownmixImpl::effectProcessImpl(float* in, float* out, int sampleToProcess) {
+ if (!mContext) {
+ LOG(ERROR) << __func__ << " nullContext";
+ return {EX_NULL_POINTER, 0, 0};
+ }
+ return mContext->lvmProcess(in, out, sampleToProcess);
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/downmix/aidl/EffectDownmix.h b/media/libeffects/downmix/aidl/EffectDownmix.h
new file mode 100644
index 0000000..d590133
--- /dev/null
+++ b/media/libeffects/downmix/aidl/EffectDownmix.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+#include <audio_effects/effect_downmix.h>
+
+#include "DownmixContext.h"
+#include "effect-impl/EffectImpl.h"
+#include "effect-impl/EffectUUID.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class DownmixImpl final : public EffectImpl {
+ public:
+ static const std::string kEffectName;
+ static const Descriptor kDescriptor;
+ DownmixImpl() { LOG(DEBUG) << __func__; }
+ ~DownmixImpl() {
+ cleanUp();
+ LOG(DEBUG) << __func__;
+ }
+
+ ndk::ScopedAStatus commandImpl(CommandId command) override;
+ ndk::ScopedAStatus getDescriptor(Descriptor* _aidl_return) override;
+ ndk::ScopedAStatus setParameterCommon(const Parameter& param) override;
+ ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
+ ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) override;
+ IEffect::Status effectProcessImpl(float* in, float* out, int process) override;
+ std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+ RetCode releaseContext() override;
+
+ std::shared_ptr<EffectContext> getContext() override { return mContext; }
+ std::string getEffectName() override { return kEffectName; }
+
+ private:
+ std::shared_ptr<DownmixContext> mContext;
+ ndk::ScopedAStatus getParameterDownmix(const Downmix::Tag& tag, Parameter::Specific* specific);
+};
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/dynamicsproc/Android.bp b/media/libeffects/dynamicsproc/Android.bp
index 84131a4..f655c4f 100644
--- a/media/libeffects/dynamicsproc/Android.bp
+++ b/media/libeffects/dynamicsproc/Android.bp
@@ -32,34 +32,68 @@
],
}
+cc_defaults {
+ name : "dynamicsprocessingdefaults",
+ srcs: [
+ "dsp/DPBase.cpp",
+ "dsp/DPFrequency.cpp",
+ ],
+
+ shared_libs: [
+ "libaudioutils",
+ "libbase",
+ "liblog",
+ "libutils",
+ ],
+ header_libs: [
+ "libaudioeffects",
+ "libeigen",
+ ],
+ cflags: [
+ "-Wthread-safety",
+ "-Wall",
+ "-Werror",
+ ],
+}
+
cc_library_shared {
name: "libdynproc",
vendor: true,
+ defaults: [
+ "dynamicsprocessingdefaults",
+ ],
+
srcs: [
"EffectDynamicsProcessing.cpp",
- "dsp/DPBase.cpp",
- "dsp/DPFrequency.cpp",
],
cflags: [
"-O2",
"-fvisibility=hidden",
-
- "-Wall",
- "-Werror",
- ],
-
- shared_libs: [
- "libcutils",
- "liblog",
],
relative_install_path: "soundfx",
+}
- header_libs: [
- "libaudioeffects",
- "libeigen",
+cc_library_shared {
+ name: "libdynamicsprocessingaidl",
+
+ srcs: [
+ "aidl/DynamicsProcessing.cpp",
+ "aidl/DynamicsProcessingContext.cpp",
+ ":effectCommonFile",
+ ],
+
+ defaults: [
+ "aidlaudioservice_defaults",
+ "latest_android_hardware_audio_effect_ndk_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
+ "dynamicsprocessingdefaults",
+ ],
+
+ visibility: [
+ "//hardware/interfaces/audio/aidl/default",
],
}
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.cpp b/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.cpp
new file mode 100644
index 0000000..203a27b
--- /dev/null
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.cpp
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_DynamicsProcessingLibEffects"
+
+#include <android-base/logging.h>
+
+#include "DynamicsProcessing.h"
+
+#include <dsp/DPBase.h>
+#include <dsp/DPFrequency.h>
+
+using aidl::android::hardware::audio::effect::Descriptor;
+using aidl::android::hardware::audio::effect::DynamicsProcessingImpl;
+using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::kDynamicsProcessingImplUUID;
+using aidl::android::hardware::audio::effect::State;
+using aidl::android::media::audio::common::AudioUuid;
+using aidl::android::media::audio::common::PcmType;
+
+extern "C" binder_exception_t createEffect(const AudioUuid* in_impl_uuid,
+ std::shared_ptr<IEffect>* instanceSpp) {
+ if (!in_impl_uuid || *in_impl_uuid != kDynamicsProcessingImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (instanceSpp) {
+ *instanceSpp = ndk::SharedRefBase::make<DynamicsProcessingImpl>();
+ LOG(DEBUG) << __func__ << " instance " << instanceSpp->get() << " created";
+ return EX_NONE;
+ } else {
+ LOG(ERROR) << __func__ << " invalid input parameter!";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+}
+
+extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
+ if (!in_impl_uuid || *in_impl_uuid != kDynamicsProcessingImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ *_aidl_return = DynamicsProcessingImpl::kDescriptor;
+ return EX_NONE;
+}
+
+namespace aidl::android::hardware::audio::effect {
+
+const std::string DynamicsProcessingImpl::kEffectName = "DynamicsProcessing";
+const DynamicsProcessing::Capability DynamicsProcessingImpl::kCapability = {.minCutOffFreq = 220,
+ .maxCutOffFreq = 20000};
+const Descriptor DynamicsProcessingImpl::kDescriptor = {
+ .common = {.id = {.type = kDynamicsProcessingTypeUUID,
+ .uuid = kDynamicsProcessingImplUUID,
+ .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::INSERT,
+ .insert = Flags::Insert::LAST,
+ .volume = Flags::Volume::CTRL},
+ .name = DynamicsProcessingImpl::kEffectName,
+ .implementor = "The Android Open Source Project"},
+ .capability = Capability::make<Capability::dynamicsProcessing>(
+ DynamicsProcessingImpl::kCapability)};
+
+ndk::ScopedAStatus DynamicsProcessingImpl::open(const Parameter::Common& common,
+ const std::optional<Parameter::Specific>& specific,
+ OpenEffectReturn* ret) {
+ LOG(DEBUG) << __func__;
+ // effect only support 32bits float
+ RETURN_IF(common.input.base.format.pcm != common.output.base.format.pcm ||
+ common.input.base.format.pcm != PcmType::FLOAT_32_BIT,
+ EX_ILLEGAL_ARGUMENT, "dataMustBe32BitsFloat");
+ RETURN_OK_IF(mState != State::INIT);
+ auto context = createContext(common);
+ RETURN_IF(!context, EX_NULL_POINTER, "createContextFailed");
+
+ RETURN_IF_ASTATUS_NOT_OK(setParameterCommon(common), "setCommParamErr");
+ if (specific.has_value()) {
+ RETURN_IF_ASTATUS_NOT_OK(setParameterSpecific(specific.value()), "setSpecParamErr");
+ } else {
+ Parameter::Specific defaultSpecific =
+ Parameter::Specific::make<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::engineArchitecture>(
+ mContext->getEngineArchitecture()));
+ RETURN_IF_ASTATUS_NOT_OK(setParameterSpecific(defaultSpecific), "setDefaultEngineErr");
+ }
+
+ mState = State::IDLE;
+ context->dupeFmq(ret);
+ RETURN_IF(createThread(context, getEffectName()) != RetCode::SUCCESS, EX_UNSUPPORTED_OPERATION,
+ "FailedToCreateWorker");
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus DynamicsProcessingImpl::getDescriptor(Descriptor* _aidl_return) {
+ RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
+ LOG(DEBUG) << __func__ << kDescriptor.toString();
+ *_aidl_return = kDescriptor;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus DynamicsProcessingImpl::commandImpl(CommandId command) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ switch (command) {
+ case CommandId::START:
+ mContext->enable();
+ return ndk::ScopedAStatus::ok();
+ case CommandId::STOP:
+ mContext->disable();
+ return ndk::ScopedAStatus::ok();
+ case CommandId::RESET:
+ mContext->disable();
+ mContext->resetBuffer();
+ return ndk::ScopedAStatus::ok();
+ default:
+ // Need this default handling for vendor extendable CommandId::VENDOR_COMMAND_*
+ LOG(ERROR) << __func__ << " commandId " << toString(command) << " not supported";
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commandIdNotSupported");
+ }
+}
+
+ndk::ScopedAStatus DynamicsProcessingImpl::setParameterSpecific(
+ const Parameter::Specific& specific) {
+ RETURN_IF(Parameter::Specific::dynamicsProcessing != specific.getTag(), EX_ILLEGAL_ARGUMENT,
+ "EffectNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto& param = specific.get<Parameter::Specific::dynamicsProcessing>();
+ auto tag = param.getTag();
+
+ switch (tag) {
+ case DynamicsProcessing::engineArchitecture: {
+ RETURN_IF(mContext->setEngineArchitecture(
+ param.get<DynamicsProcessing::engineArchitecture>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setEngineArchitectureFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::preEq: {
+ RETURN_IF(
+ mContext->setPreEq(param.get<DynamicsProcessing::preEq>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setPreEqFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::postEq: {
+ RETURN_IF(mContext->setPostEq(param.get<DynamicsProcessing::postEq>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setPostEqFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::preEqBand: {
+ RETURN_IF(mContext->setPreEqBand(param.get<DynamicsProcessing::preEqBand>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setPreEqBandFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::postEqBand: {
+ RETURN_IF(mContext->setPostEqBand(param.get<DynamicsProcessing::postEqBand>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setPostEqBandFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::mbc: {
+ RETURN_IF(mContext->setMbc(param.get<DynamicsProcessing::mbc>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setMbcFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::mbcBand: {
+ RETURN_IF(mContext->setMbcBand(param.get<DynamicsProcessing::mbcBand>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setMbcBandFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::limiter: {
+ RETURN_IF(mContext->setLimiter(param.get<DynamicsProcessing::limiter>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setLimiterFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::inputGain: {
+ RETURN_IF(mContext->setInputGain(param.get<DynamicsProcessing::inputGain>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setInputGainFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::vendorExtension: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "DPVendorExtensionTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus DynamicsProcessingImpl::getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
+ auto tag = id.getTag();
+ RETURN_IF(Parameter::Id::dynamicsProcessingTag != tag, EX_ILLEGAL_ARGUMENT, "wrongIdTag");
+ auto dpId = id.get<Parameter::Id::dynamicsProcessingTag>();
+ auto dpIdTag = dpId.getTag();
+ switch (dpIdTag) {
+ case DynamicsProcessing::Id::commonTag:
+ return getParameterDynamicsProcessing(dpId.get<DynamicsProcessing::Id::commonTag>(),
+ specific);
+ case DynamicsProcessing::Id::vendorExtensionTag:
+ LOG(ERROR) << __func__ << " unsupported ID: " << toString(dpIdTag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "DPVendorExtensionIdNotSupported");
+ }
+}
+
+ndk::ScopedAStatus DynamicsProcessingImpl::getParameterDynamicsProcessing(
+ const DynamicsProcessing::Tag& tag, Parameter::Specific* specific) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ switch (tag) {
+ case DynamicsProcessing::engineArchitecture: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::engineArchitecture>(
+ mContext->getEngineArchitecture()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::preEq: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::preEq>(mContext->getPreEq()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::postEq: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::postEq>(mContext->getPostEq()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::preEqBand: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::preEqBand>(
+ mContext->getPreEqBand()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::postEqBand: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::postEqBand>(
+ mContext->getPostEqBand()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::mbc: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::mbc>(mContext->getMbc()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::mbcBand: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::mbcBand>(mContext->getMbcBand()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::limiter: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::limiter>(mContext->getLimiter()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::inputGain: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::inputGain>(
+ mContext->getInputGain()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::vendorExtension: {
+ LOG(ERROR) << __func__ << " wrong vendor tag in CommonTag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "DPVendorExtensionTagInWrongId");
+ }
+ }
+}
+
+std::shared_ptr<EffectContext> DynamicsProcessingImpl::createContext(
+ const Parameter::Common& common) {
+ if (mContext) {
+ LOG(DEBUG) << __func__ << " context already exist";
+ return mContext;
+ }
+
+ mContext = std::make_shared<DynamicsProcessingContext>(1 /* statusFmqDepth */, common);
+ return mContext;
+}
+
+RetCode DynamicsProcessingImpl::releaseContext() {
+ if (mContext) {
+ mContext->disable();
+ mContext->resetBuffer();
+ mContext.reset();
+ }
+ return RetCode::SUCCESS;
+}
+
+// Processing method running in EffectWorker thread.
+IEffect::Status DynamicsProcessingImpl::effectProcessImpl(float* in, float* out, int samples) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!mContext, status, "nullContext");
+ return mContext->lvmProcess(in, out, samples);
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.h b/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.h
new file mode 100644
index 0000000..824ebea
--- /dev/null
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+
+#include "effect-impl/EffectImpl.h"
+#include "effect-impl/EffectUUID.h"
+#include "DynamicsProcessingContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class DynamicsProcessingImpl final : public EffectImpl {
+ public:
+ static const std::string kEffectName;
+ static const Descriptor kDescriptor;
+ static const DynamicsProcessing::Capability kCapability;
+
+ DynamicsProcessingImpl() { LOG(DEBUG) << __func__; }
+ ~DynamicsProcessingImpl() {
+ cleanUp();
+ LOG(DEBUG) << __func__;
+ }
+
+ ndk::ScopedAStatus open(const Parameter::Common& common,
+ const std::optional<Parameter::Specific>& specific,
+ OpenEffectReturn* ret) override;
+ ndk::ScopedAStatus commandImpl(CommandId command) override;
+ ndk::ScopedAStatus getDescriptor(Descriptor* _aidl_return) override;
+ ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
+ ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) override;
+ IEffect::Status effectProcessImpl(float* in, float* out, int process) override;
+ std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+ RetCode releaseContext() override;
+
+ std::shared_ptr<EffectContext> getContext() override { return mContext; }
+ std::string getEffectName() override { return kEffectName; }
+
+ private:
+ std::shared_ptr<DynamicsProcessingContext> mContext;
+ ndk::ScopedAStatus getParameterDynamicsProcessing(const DynamicsProcessing::Tag& tag,
+ Parameter::Specific* specific);
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
new file mode 100644
index 0000000..57a2be9
--- /dev/null
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
@@ -0,0 +1,580 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_DPLibEffectsContext"
+
+#include "DynamicsProcessing.h"
+#include "DynamicsProcessingContext.h"
+
+#include <functional>
+#include <sys/param.h>
+#include <unordered_set>
+
+namespace aidl::android::hardware::audio::effect {
+
+DynamicsProcessingContext::DynamicsProcessingContext(int statusDepth,
+ const Parameter::Common& common)
+ : EffectContext(statusDepth, common) {
+ LOG(DEBUG) << __func__;
+ init();
+}
+
+DynamicsProcessingContext::~DynamicsProcessingContext() {
+ LOG(DEBUG) << __func__;
+}
+
+RetCode DynamicsProcessingContext::enable() {
+ std::lock_guard lg(mMutex);
+ if (mState != DYNAMICS_PROCESSING_STATE_INITIALIZED) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = DYNAMICS_PROCESSING_STATE_ACTIVE;
+ return RetCode::SUCCESS;
+}
+
+RetCode DynamicsProcessingContext::disable() {
+ std::lock_guard lg(mMutex);
+ if (mState != DYNAMICS_PROCESSING_STATE_ACTIVE) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
+ return RetCode::SUCCESS;
+}
+
+void DynamicsProcessingContext::reset() {
+ std::lock_guard lg(mMutex);
+ if (mDpFreq != nullptr) {
+ mDpFreq.reset();
+ }
+}
+
+RetCode DynamicsProcessingContext::setCommon(const Parameter::Common& common) {
+ mCommon = common;
+ init();
+ return RetCode::SUCCESS;
+}
+
+void DynamicsProcessingContext::dpSetFreqDomainVariant_l(
+ const DynamicsProcessing::EngineArchitecture& engine) {
+ mDpFreq.reset(new dp_fx::DPFrequency());
+ mDpFreq->init(mChannelCount, engine.preEqStage.inUse, engine.preEqStage.bandCount,
+ engine.mbcStage.inUse, engine.mbcStage.bandCount, engine.postEqStage.inUse,
+ engine.postEqStage.bandCount, engine.limiterInUse);
+
+ int32_t sampleRate = mCommon.input.base.sampleRate;
+ int32_t minBlockSize = (int32_t)dp_fx::DPFrequency::getMinBockSize();
+ int32_t block = engine.preferredProcessingDurationMs * sampleRate / 1000.0f;
+ LOG(INFO) << __func__ << " sampleRate " << sampleRate << " block length "
+ << engine.preferredProcessingDurationMs << " ms (" << block << "samples)";
+ if (block < minBlockSize) {
+ block = minBlockSize;
+ } else if (!powerof2(block)) {
+ //find next highest power of 2.
+ block = 1 << (32 - __builtin_clz(block));
+ }
+ mDpFreq->configure(block, block >> 1, sampleRate);
+}
+
+RetCode DynamicsProcessingContext::setEngineArchitecture(
+ const DynamicsProcessing::EngineArchitecture& engineArchitecture) {
+ RETURN_VALUE_IF(!validateEngineConfig(engineArchitecture), RetCode::ERROR_ILLEGAL_PARAMETER,
+ "illegalEngineConfig");
+
+ std::lock_guard lg(mMutex);
+ if (!mEngineInited || mEngineArchitecture != engineArchitecture) {
+ if (engineArchitecture.resolutionPreference ==
+ DynamicsProcessing::ResolutionPreference::FAVOR_FREQUENCY_RESOLUTION) {
+ dpSetFreqDomainVariant_l(engineArchitecture);
+ } else {
+ LOG(WARNING) << __func__ << toString(engineArchitecture.resolutionPreference)
+ << " not available now";
+ }
+ mEngineInited = true;
+ mEngineArchitecture = engineArchitecture;
+ }
+ LOG(INFO) << __func__ << engineArchitecture.toString();
+ return RetCode::SUCCESS;
+}
+
+RetCode DynamicsProcessingContext::setPreEq(
+ const std::vector<DynamicsProcessing::ChannelConfig>& channels) {
+ std::lock_guard lg(mMutex);
+ return setDpChannels_l<dp_fx::DPEq>(channels, mEngineArchitecture.preEqStage.inUse,
+ StageType::PREEQ);
+}
+
+RetCode DynamicsProcessingContext::setPostEq(
+ const std::vector<DynamicsProcessing::ChannelConfig>& channels) {
+ std::lock_guard lg(mMutex);
+ return setDpChannels_l<dp_fx::DPEq>(channels, mEngineArchitecture.postEqStage.inUse,
+ StageType::POSTEQ);
+}
+
+RetCode DynamicsProcessingContext::setMbc(
+ const std::vector<DynamicsProcessing::ChannelConfig>& channels) {
+ std::lock_guard lg(mMutex);
+ return setDpChannels_l<dp_fx::DPMbc>(channels, mEngineArchitecture.mbcStage.inUse,
+ StageType::MBC);
+}
+
+RetCode DynamicsProcessingContext::setPreEqBand(
+ const std::vector<DynamicsProcessing::EqBandConfig>& bands) {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(!mEngineArchitecture.postEqStage.inUse, RetCode::ERROR_ILLEGAL_PARAMETER,
+ "postEqNotInUse");
+ return setBands_l<DynamicsProcessing::EqBandConfig>(
+ bands, mEngineArchitecture.preEqStage.bandCount, StageType::PREEQ);
+}
+
+RetCode DynamicsProcessingContext::setPostEqBand(
+ const std::vector<DynamicsProcessing::EqBandConfig>& bands) {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(!mEngineArchitecture.postEqStage.inUse, RetCode::ERROR_ILLEGAL_PARAMETER,
+ "postEqNotInUse");
+ return setBands_l<DynamicsProcessing::EqBandConfig>(
+ bands, mEngineArchitecture.postEqStage.bandCount, StageType::POSTEQ);
+}
+
+RetCode DynamicsProcessingContext::setMbcBand(
+ const std::vector<DynamicsProcessing::MbcBandConfig>& bands) {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(!mEngineArchitecture.mbcStage.inUse, RetCode::ERROR_ILLEGAL_PARAMETER,
+ "mbcNotInUse");
+ return setBands_l<DynamicsProcessing::MbcBandConfig>(
+ bands, mEngineArchitecture.preEqStage.bandCount, StageType::MBC);
+}
+
+RetCode DynamicsProcessingContext::setLimiter(
+ const std::vector<DynamicsProcessing::LimiterConfig>& limiters) {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(!mEngineArchitecture.limiterInUse, RetCode::ERROR_ILLEGAL_PARAMETER,
+ "limiterNotInUse");
+ return setBands_l<DynamicsProcessing::LimiterConfig>(limiters, -1, StageType::LIMITER);
+}
+
+RetCode DynamicsProcessingContext::setInputGain(
+ const std::vector<DynamicsProcessing::InputGain>& inputGains) {
+ std::lock_guard lg(mMutex);
+ return setBands_l<DynamicsProcessing::InputGain>(inputGains, -1, StageType::INPUTGAIN);
+}
+
+DynamicsProcessing::EngineArchitecture DynamicsProcessingContext::getEngineArchitecture() {
+ std::lock_guard lg(mMutex);
+ LOG(INFO) << __func__ << mEngineArchitecture.toString();
+ return mEngineArchitecture;
+}
+
+std::vector<DynamicsProcessing::ChannelConfig> DynamicsProcessingContext::getPreEq() {
+ return getChannelConfig(StageType::PREEQ);
+}
+
+std::vector<DynamicsProcessing::ChannelConfig> DynamicsProcessingContext::getPostEq() {
+ return getChannelConfig(StageType::POSTEQ);
+}
+
+std::vector<DynamicsProcessing::EqBandConfig> DynamicsProcessingContext::getPreEqBand() {
+ return getEqBandConfigs(StageType::PREEQ);
+}
+
+std::vector<DynamicsProcessing::EqBandConfig> DynamicsProcessingContext::getPostEqBand() {
+ return getEqBandConfigs(StageType::POSTEQ);
+}
+
+std::vector<DynamicsProcessing::ChannelConfig> DynamicsProcessingContext::getMbc() {
+ return getChannelConfig(StageType::MBC);
+}
+
+std::vector<DynamicsProcessing::MbcBandConfig> DynamicsProcessingContext::getMbcBand() {
+ std::vector<DynamicsProcessing::MbcBandConfig> bands;
+
+ std::lock_guard lg(mMutex);
+ auto maxBand = mEngineArchitecture.mbcStage.bandCount;
+ for (int32_t ch = 0; ch < mChannelCount; ch++) {
+ auto mbc = getMbc_l(ch);
+ if (!mbc) {
+ continue;
+ }
+ for (int32_t bandId = 0; bandId < maxBand; bandId++) {
+ auto band = mbc->getBand(bandId);
+ if (!band) {
+ continue;
+ }
+ bands.push_back({.channel = ch,
+ .band = bandId,
+ .enable = band->isEnabled(),
+ .cutoffFrequencyHz = band->getCutoffFrequency(),
+ .attackTimeMs = band->getAttackTime(),
+ .releaseTimeMs = band->getReleaseTime(),
+ .ratio = band->getRatio(),
+ .thresholdDb = band->getThreshold(),
+ .kneeWidthDb = band->getKneeWidth(),
+ .noiseGateThresholdDb = band->getNoiseGateThreshold(),
+ .expanderRatio = band->getExpanderRatio(),
+ .preGainDb = band->getPreGain(),
+ .postGainDb = band->getPostGain()});
+ }
+ }
+ return bands;
+}
+
+std::vector<DynamicsProcessing::LimiterConfig> DynamicsProcessingContext::getLimiter() {
+ std::vector<DynamicsProcessing::LimiterConfig> ret;
+
+ std::lock_guard lg(mMutex);
+ for (int32_t ch = 0; ch < mChannelCount; ch++) {
+ auto limiter = getLimiter_l(ch);
+ if (!limiter) {
+ continue;
+ }
+ ret.push_back({.channel = ch,
+ .enable = limiter->isEnabled(),
+ .linkGroup = static_cast<int32_t>(limiter->getLinkGroup()),
+ .attackTimeMs = limiter->getAttackTime(),
+ .releaseTimeMs = limiter->getReleaseTime(),
+ .ratio = limiter->getRatio(),
+ .thresholdDb = limiter->getThreshold(),
+ .postGainDb = limiter->getPostGain()});
+ }
+ return ret;
+}
+
+std::vector<DynamicsProcessing::InputGain> DynamicsProcessingContext::getInputGain() {
+ std::vector<DynamicsProcessing::InputGain> ret;
+
+ std::lock_guard lg(mMutex);
+ for (int32_t ch = 0; ch < mChannelCount; ch++) {
+ auto channel = getChannel_l(ch);
+ if (!channel) {
+ continue;
+ }
+ ret.push_back({.channel = ch, .gainDb = channel->getInputGain()});
+ }
+ return ret;
+}
+
+IEffect::Status DynamicsProcessingContext::lvmProcess(float* in, float* out, int samples) {
+ LOG(DEBUG) << __func__ << " in " << in << " out " << out << " sample " << samples;
+
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!in, status, "nullInput");
+ RETURN_VALUE_IF(!out, status, "nullOutput");
+ status = {EX_ILLEGAL_STATE, 0, 0};
+
+ LOG(DEBUG) << __func__ << " start processing";
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(mState != DynamicsProcessingState::DYNAMICS_PROCESSING_STATE_ACTIVE, status,
+ "notInActiveState");
+ RETURN_VALUE_IF(!mDpFreq, status, "engineNotInited");
+ mDpFreq->processSamples(in, out, samples);
+ }
+ return {STATUS_OK, samples, samples};
+}
+
+void DynamicsProcessingContext::init() {
+ std::lock_guard lg(mMutex);
+ mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
+ mChannelCount =
+ ::android::hardware::audio::common::getChannelCount(mCommon.input.base.channelMask);
+}
+
+dp_fx::DPChannel* DynamicsProcessingContext::getChannel_l(int channel) {
+ RETURN_VALUE_IF(mDpFreq == nullptr, nullptr, "DPFreqNotInited");
+
+ return mDpFreq->getChannel(channel);
+}
+
+dp_fx::DPEq* DynamicsProcessingContext::getPreEq_l(int ch) {
+ auto channel = getChannel_l(ch);
+ RETURN_VALUE_IF(channel == nullptr, nullptr, "ChannelNotExist");
+
+ return channel->getPreEq();
+}
+
+dp_fx::DPEq* DynamicsProcessingContext::getPostEq_l(int ch) {
+ auto channel = getChannel_l(ch);
+ RETURN_VALUE_IF(channel == nullptr, nullptr, "ChannelNotExist");
+
+ return channel->getPostEq();
+}
+
+dp_fx::DPMbc* DynamicsProcessingContext::getMbc_l(int ch) {
+ auto channel = getChannel_l(ch);
+ RETURN_VALUE_IF(channel == nullptr, nullptr, "ChannelNotExist");
+
+ return channel->getMbc();
+}
+
+dp_fx::DPLimiter* DynamicsProcessingContext::getLimiter_l(int ch) {
+ auto channel = getChannel_l(ch);
+ RETURN_VALUE_IF(channel == nullptr, nullptr, "ChannelNotExist");
+
+ return channel->getLimiter();
+}
+
+dp_fx::DPBandStage* DynamicsProcessingContext::getStageWithType_l(
+ DynamicsProcessingContext::StageType type, int ch) {
+ switch (type) {
+ case StageType::PREEQ: {
+ return getEqWithType_l(type, ch);
+ }
+ case StageType::POSTEQ: {
+ return getEqWithType_l(type, ch);
+ }
+ case StageType::MBC: {
+ return getMbc_l(ch);
+ }
+ case StageType::LIMITER:
+ FALLTHROUGH_INTENDED;
+ case StageType::INPUTGAIN: {
+ return nullptr;
+ }
+ }
+}
+
+dp_fx::DPEq* DynamicsProcessingContext::getEqWithType_l(DynamicsProcessingContext::StageType type,
+ int ch) {
+ switch (type) {
+ case StageType::PREEQ: {
+ return getPreEq_l(ch);
+ }
+ case StageType::POSTEQ: {
+ return getPostEq_l(ch);
+ }
+ case StageType::MBC:
+ FALLTHROUGH_INTENDED;
+ case StageType::LIMITER:
+ FALLTHROUGH_INTENDED;
+ case StageType::INPUTGAIN: {
+ return nullptr;
+ }
+ }
+}
+
+std::vector<DynamicsProcessing::ChannelConfig> DynamicsProcessingContext::getChannelConfig(
+ StageType type) {
+ std::vector<DynamicsProcessing::ChannelConfig> ret;
+
+ std::lock_guard lg(mMutex);
+ for (int32_t ch = 0; ch < mChannelCount; ch++) {
+ auto stage = getStageWithType_l(type, ch);
+ if (!stage) {
+ continue;
+ }
+ ret.push_back({.channel = ch, .enable = stage->isEnabled()});
+ }
+ return ret;
+}
+
+std::vector<DynamicsProcessing::EqBandConfig> DynamicsProcessingContext::getEqBandConfigs(
+ StageType type) {
+ std::vector<DynamicsProcessing::EqBandConfig> eqBands;
+
+ std::lock_guard lg(mMutex);
+ auto maxBand = mEngineArchitecture.preEqStage.bandCount;
+ for (int32_t ch = 0; ch < mChannelCount; ch++) {
+ auto eq = getEqWithType_l(type, ch);
+ if (!eq) {
+ continue;
+ }
+ for (int32_t bandId = 0; bandId < maxBand; bandId++) {
+ auto band = eq->getBand(bandId);
+ if (!band) {
+ continue;
+ }
+ eqBands.push_back({.channel = ch,
+ .band = bandId,
+ .enable = band->isEnabled(),
+ .cutoffFrequencyHz = band->getCutoffFrequency(),
+ .gainDb = band->getGain()});
+ }
+ }
+ return eqBands;
+}
+
+/**
+ * When StageEnablement is in use, bandCount needs to be positive.
+ */
+bool DynamicsProcessingContext::validateStageEnablement(
+ const DynamicsProcessing::StageEnablement& enablement) {
+ return !enablement.inUse || (enablement.inUse && enablement.bandCount > 0);
+}
+
+bool DynamicsProcessingContext::validateEngineConfig(
+ const DynamicsProcessing::EngineArchitecture& engine) {
+ return engine.preferredProcessingDurationMs >= 0 &&
+ validateStageEnablement(engine.preEqStage) &&
+ validateStageEnablement(engine.postEqStage) && validateStageEnablement(engine.mbcStage);
+}
+
+inline bool DynamicsProcessingContext::validateCutoffFrequency(float freq) {
+ return freq >= DynamicsProcessingImpl::kCapability.minCutOffFreq &&
+ freq <= DynamicsProcessingImpl::kCapability.maxCutOffFreq;
+}
+
+bool DynamicsProcessingContext::validateEqBandConfig(const DynamicsProcessing::EqBandConfig& band,
+ int maxChannel, int maxBand) {
+ return validateChannel(band.channel, maxChannel) && validateBand(band.band, maxBand) &&
+ validateCutoffFrequency(band.cutoffFrequencyHz);
+}
+
+bool DynamicsProcessingContext::validateMbcBandConfig(const DynamicsProcessing::MbcBandConfig& band,
+ int maxChannel, int maxBand) {
+ return validateChannel(band.channel, maxChannel) && validateBand(band.band, maxBand) &&
+ validateCutoffFrequency(band.cutoffFrequencyHz) && validateTime(band.attackTimeMs) &&
+ validateTime(band.releaseTimeMs) && validateRatio(band.ratio) &&
+ validateBandDb(band.thresholdDb) && validateBandDb(band.kneeWidthDb) &&
+ validateBandDb(band.noiseGateThresholdDb) && validateRatio(band.expanderRatio);
+}
+
+bool DynamicsProcessingContext::validateLimiterConfig(
+ const DynamicsProcessing::LimiterConfig& limiter, int maxChannel) {
+ return validateChannel(limiter.channel, maxChannel) && validateTime(limiter.attackTimeMs) &&
+ validateTime(limiter.releaseTimeMs) && validateRatio(limiter.ratio) &&
+ validateBandDb(limiter.thresholdDb);
+}
+
+bool DynamicsProcessingContext::validateInputGainConfig(const DynamicsProcessing::InputGain& gain,
+ int maxChannel) {
+ return validateChannel(gain.channel, maxChannel);
+}
+
+template <typename D>
+RetCode DynamicsProcessingContext::setDpChannels_l(
+ const std::vector<DynamicsProcessing::ChannelConfig>& channels, bool stageInUse,
+ StageType type) {
+ RetCode ret = RetCode::SUCCESS;
+ std::unordered_set<int> channelSet;
+
+ RETURN_VALUE_IF(!stageInUse, RetCode::ERROR_ILLEGAL_PARAMETER, "stageNotInUse");
+ for (auto& it : channels) {
+ if (0 != channelSet.count(it.channel)) {
+ LOG(WARNING) << __func__ << " duplicated channel " << it.channel;
+ ret = RetCode::ERROR_ILLEGAL_PARAMETER;
+ } else {
+ channelSet.insert(it.channel);
+ }
+ if (it.channel < 0 || it.channel >= mChannelCount) {
+ LOG(WARNING) << __func__ << " skip illegal ChannelConfig " << it.toString() << " max "
+ << mChannelCount;
+ ret = RetCode::ERROR_ILLEGAL_PARAMETER;
+ continue;
+ }
+ auto dp = getStageWithType_l(type, it.channel);
+ if (!dp) {
+ LOG(WARNING) << __func__ << " channel " << it.channel << " not exist";
+ ret = RetCode::ERROR_ILLEGAL_PARAMETER;
+ continue;
+ }
+ if (dp->isEnabled() != it.enable) {
+ LOG(INFO) << __func__ << it.toString();
+ dp->setEnabled(it.enable);
+ }
+ }
+ return ret;
+}
+
+RetCode DynamicsProcessingContext::setDpChannelBand_l(const std::any& anyConfig, StageType type,
+ int maxCh, int maxBand,
+ std::set<std::pair<int, int>>& chBandSet) {
+ RETURN_VALUE_IF(!anyConfig.has_value(), RetCode::ERROR_ILLEGAL_PARAMETER, "bandInvalid");
+ RetCode ret = RetCode::SUCCESS;
+ std::pair<int, int> chBandKey;
+ switch (type) {
+ case StageType::PREEQ:
+ FALLTHROUGH_INTENDED;
+ case StageType::POSTEQ: {
+ dp_fx::DPEq* dp;
+ const auto& config = std::any_cast<DynamicsProcessing::EqBandConfig>(anyConfig);
+ RETURN_VALUE_IF(!validateEqBandConfig(config, maxCh, maxBand),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "eqBandNotValid");
+ RETURN_VALUE_IF(
+ nullptr == (dp = getEqWithType_l(type, config.channel)) || !dp->isEnabled(),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "dpEqNotExist");
+ dp_fx::DPEqBand band;
+ band.init(config.enable, config.cutoffFrequencyHz, config.gainDb);
+ dp->setBand(config.band, band);
+ chBandKey = {config.channel, config.band};
+ break;
+ }
+ case StageType::MBC: {
+ dp_fx::DPMbc* dp;
+ const auto& config = std::any_cast<DynamicsProcessing::MbcBandConfig>(anyConfig);
+ RETURN_VALUE_IF(!validateMbcBandConfig(config, maxCh, maxBand),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "mbcBandNotValid");
+ RETURN_VALUE_IF(nullptr == (dp = getMbc_l(config.channel)) || !dp->isEnabled(),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "dpMbcNotExist");
+ dp_fx::DPMbcBand band;
+ band.init(config.enable, config.cutoffFrequencyHz, config.attackTimeMs,
+ config.releaseTimeMs, config.ratio, config.thresholdDb, config.kneeWidthDb,
+ config.noiseGateThresholdDb, config.expanderRatio, config.preGainDb,
+ config.postGainDb);
+ dp->setBand(config.band, band);
+ chBandKey = {config.channel, config.band};
+ break;
+ }
+ case StageType::LIMITER: {
+ dp_fx::DPChannel* dp;
+ const auto& config = std::any_cast<DynamicsProcessing::LimiterConfig>(anyConfig);
+ RETURN_VALUE_IF(!validateLimiterConfig(config, maxCh),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "limiterBandNotValid");
+ RETURN_VALUE_IF(nullptr == (dp = getChannel_l(config.channel)),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "dpChNotExist");
+ dp_fx::DPLimiter limiter;
+ limiter.init(mEngineArchitecture.limiterInUse, config.enable, config.linkGroup,
+ config.attackTimeMs, config.releaseTimeMs, config.ratio,
+ config.thresholdDb, config.postGainDb);
+ dp->setLimiter(limiter);
+ chBandKey = {config.channel, 0};
+ break;
+ }
+ case StageType::INPUTGAIN: {
+ dp_fx::DPChannel* dp;
+ const auto& config = std::any_cast<DynamicsProcessing::InputGain>(anyConfig);
+ RETURN_VALUE_IF(!validateInputGainConfig(config, maxCh),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "inputGainNotValid");
+ RETURN_VALUE_IF(nullptr == (dp = getChannel_l(config.channel)),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "dpChNotExist");
+ dp->setInputGain(config.gainDb);
+ chBandKey = {config.channel, 0};
+ break;
+ }
+ }
+ RETURN_VALUE_IF(0 != chBandSet.count(chBandKey), RetCode::ERROR_ILLEGAL_PARAMETER,
+ "duplicatedBand");
+ chBandSet.insert(chBandKey);
+ return ret;
+}
+
+template <typename T /* BandConfig */>
+RetCode DynamicsProcessingContext::setBands_l(
+ const std::vector<T>& bands, int maxBand, StageType type) {
+ RetCode ret = RetCode::SUCCESS;
+ std::set<std::pair<int /* channel */, int /* band */>> bandSet;
+
+ for (const auto& it : bands) {
+ if (RetCode::SUCCESS !=
+ setDpChannelBand_l(std::make_any<T>(it), type, mChannelCount, maxBand, bandSet)) {
+ LOG(WARNING) << __func__ << " skipping band " << it.toString();
+ ret = RetCode::ERROR_ILLEGAL_PARAMETER;
+ continue;
+ }
+ LOG(INFO) << __func__ << it.toString();
+ }
+ return ret;
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.h b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.h
new file mode 100644
index 0000000..8be784e
--- /dev/null
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <audio_effects/effect_dynamicsprocessing.h>
+
+#include "effect-impl/EffectContext.h"
+
+#include <any>
+#include <cstddef>
+#include <dsp/DPBase.h>
+#include <dsp/DPFrequency.h>
+
+namespace aidl::android::hardware::audio::effect {
+
+enum DynamicsProcessingState {
+ DYNAMICS_PROCESSING_STATE_UNINITIALIZED,
+ DYNAMICS_PROCESSING_STATE_INITIALIZED,
+ DYNAMICS_PROCESSING_STATE_ACTIVE,
+};
+
+class DynamicsProcessingContext final : public EffectContext {
+ public:
+ DynamicsProcessingContext(int statusDepth, const Parameter::Common& common);
+ ~DynamicsProcessingContext();
+
+ RetCode enable();
+ RetCode disable();
+ void reset();
+
+ // override EffectContext::setCommon to update mChannelCount
+ RetCode setCommon(const Parameter::Common& common) override;
+
+ RetCode setEngineArchitecture(const DynamicsProcessing::EngineArchitecture& engineArchitecture);
+ RetCode setPreEq(const std::vector<DynamicsProcessing::ChannelConfig>& eqChannels);
+ RetCode setPostEq(const std::vector<DynamicsProcessing::ChannelConfig>& eqChannels);
+ RetCode setPreEqBand(const std::vector<DynamicsProcessing::EqBandConfig>& eqBands);
+ RetCode setPostEqBand(const std::vector<DynamicsProcessing::EqBandConfig>& eqBands);
+ RetCode setMbc(const std::vector<DynamicsProcessing::ChannelConfig>& mbcChannels);
+ RetCode setMbcBand(const std::vector<DynamicsProcessing::MbcBandConfig>& eqBands);
+ RetCode setLimiter(const std::vector<DynamicsProcessing::LimiterConfig>& limiters);
+ RetCode setInputGain(const std::vector<DynamicsProcessing::InputGain>& gain);
+
+ DynamicsProcessing::EngineArchitecture getEngineArchitecture();
+ std::vector<DynamicsProcessing::ChannelConfig> getPreEq();
+ std::vector<DynamicsProcessing::ChannelConfig> getPostEq();
+ std::vector<DynamicsProcessing::EqBandConfig> getPreEqBand();
+ std::vector<DynamicsProcessing::EqBandConfig> getPostEqBand();
+ std::vector<DynamicsProcessing::ChannelConfig> getMbc();
+ std::vector<DynamicsProcessing::MbcBandConfig> getMbcBand();
+ std::vector<DynamicsProcessing::LimiterConfig> getLimiter();
+ std::vector<DynamicsProcessing::InputGain> getInputGain();
+
+ IEffect::Status lvmProcess(float* in, float* out, int samples);
+
+ private:
+ static constexpr float kPreferredProcessingDurationMs = 10.0f;
+ static constexpr int kBandCount = 5;
+ std::mutex mMutex;
+ size_t mChannelCount GUARDED_BY(mMutex) = 0;
+ DynamicsProcessingState mState GUARDED_BY(mMutex) = DYNAMICS_PROCESSING_STATE_UNINITIALIZED;
+ std::unique_ptr<dp_fx::DPFrequency> mDpFreq GUARDED_BY(mMutex) = nullptr;
+ bool mEngineInited GUARDED_BY(mMutex) = false;
+ DynamicsProcessing::EngineArchitecture mEngineArchitecture GUARDED_BY(mMutex) = {
+ .resolutionPreference =
+ DynamicsProcessing::ResolutionPreference::FAVOR_FREQUENCY_RESOLUTION,
+ .preferredProcessingDurationMs = kPreferredProcessingDurationMs,
+ .preEqStage = {.inUse = true, .bandCount = kBandCount},
+ .postEqStage = {.inUse = true, .bandCount = kBandCount},
+ .mbcStage = {.inUse = true, .bandCount = kBandCount},
+ .limiterInUse = true,
+ };
+
+ enum class StageType { PREEQ, POSTEQ, MBC, LIMITER, INPUTGAIN };
+
+ void init();
+
+ void dpSetFreqDomainVariant_l(const DynamicsProcessing::EngineArchitecture& engine)
+ REQUIRES(mMutex);
+ dp_fx::DPChannel* getChannel_l(int ch) REQUIRES(mMutex);
+ dp_fx::DPEq* getPreEq_l(int ch) REQUIRES(mMutex);
+ dp_fx::DPEq* getPostEq_l(int ch) REQUIRES(mMutex);
+ dp_fx::DPMbc* getMbc_l(int ch) REQUIRES(mMutex);
+ dp_fx::DPLimiter* getLimiter_l(int ch) REQUIRES(mMutex);
+ dp_fx::DPBandStage* getStageWithType_l(StageType type, int ch) REQUIRES(mMutex);
+ dp_fx::DPEq* getEqWithType_l(StageType type, int ch) REQUIRES(mMutex);
+ template <typename D>
+ RetCode setDpChannels_l(const std::vector<DynamicsProcessing::ChannelConfig>& channels,
+ bool stageInUse, StageType type) REQUIRES(mMutex);
+ template <typename T /* BandConfig */>
+ RetCode setBands_l(const std::vector<T>& bands, int maxBand, StageType type) REQUIRES(mMutex);
+ RetCode setDpChannelBand_l(const std::any& anyConfig, StageType type, int maxCh, int maxBand,
+ std::set<std::pair<int, int>>& chBandSet) REQUIRES(mMutex);
+
+ std::vector<DynamicsProcessing::EqBandConfig> getEqBandConfigs(StageType type);
+ std::vector<DynamicsProcessing::ChannelConfig> getChannelConfig(StageType type);
+
+ bool validateStageEnablement(const DynamicsProcessing::StageEnablement& enablement);
+ bool validateEngineConfig(const DynamicsProcessing::EngineArchitecture& engine);
+ bool validateEqBandConfig(const DynamicsProcessing::EqBandConfig& band, int maxChannel,
+ int maxBand);
+ bool validateMbcBandConfig(const DynamicsProcessing::MbcBandConfig& band, int maxChannel,
+ int maxBand);
+ bool validateLimiterConfig(const DynamicsProcessing::LimiterConfig& limiter, int maxChannel);
+ bool validateInputGainConfig(const DynamicsProcessing::InputGain& gain, int maxChannel);
+
+ inline bool validateCutoffFrequency(float freq);
+ inline bool validateChannel(int ch, int maxCh) { return ch >= 0 && ch < maxCh; }
+ inline bool validateBand(int band, int maxBand) { return band >= 0 && band < maxBand; }
+ inline bool validateTime(int time) { return time >= 0; }
+ inline bool validateRatio(int ratio) { return ratio >= 0; }
+ inline bool validateBandDb(int db) { return db <= 0; }
+};
+
+} // namespace aidl::android::hardware::audio::effect
\ No newline at end of file
diff --git a/media/libeffects/hapticgenerator/Android.bp b/media/libeffects/hapticgenerator/Android.bp
index ba511fe..07ba492 100644
--- a/media/libeffects/hapticgenerator/Android.bp
+++ b/media/libeffects/hapticgenerator/Android.bp
@@ -22,6 +22,23 @@
default_applicable_licenses: ["frameworks_av_license"],
}
+cc_defaults {
+ name : "hapticgeneratordefaults",
+ srcs: [
+ "Processors.cpp",
+ ],
+ shared_libs: [
+ "libaudioutils",
+ "libbase",
+ "liblog",
+ "libutils",
+ "libvibratorutils",
+ ],
+ header_libs: [
+ "libaudioeffects",
+ ],
+}
+
cc_library_shared {
name: "libhapticgenerator",
@@ -29,7 +46,10 @@
srcs: [
"EffectHapticGenerator.cpp",
- "Processors.cpp",
+ ],
+
+ defaults: [
+ "hapticgeneratordefaults",
],
cflags: [
@@ -43,17 +63,30 @@
"-fvisibility=hidden",
],
- shared_libs: [
- "libaudioutils",
- "libbase",
- "liblog",
- "libutils",
- "libvibratorutils",
+ relative_install_path: "soundfx",
+}
+
+cc_library_shared {
+ name: "libhapticgeneratoraidl",
+
+ srcs: [
+ "aidl/EffectHapticGenerator.cpp",
+ "aidl/HapticGeneratorContext.cpp",
+ ":effectCommonFile",
],
- relative_install_path: "soundfx",
+ defaults: [
+ "aidlaudioservice_defaults",
+ "latest_android_hardware_audio_effect_ndk_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
+ "hapticgeneratordefaults",
+ ],
- header_libs: [
- "libaudioeffects",
+ cflags: [
+ "-Wthread-safety",
+ ],
+
+ visibility: [
+ "//hardware/interfaces/audio/aidl/default",
],
}
diff --git a/media/libeffects/hapticgenerator/aidl/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/aidl/EffectHapticGenerator.cpp
new file mode 100644
index 0000000..7e22482
--- /dev/null
+++ b/media/libeffects/hapticgenerator/aidl/EffectHapticGenerator.cpp
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_HapticGeneratorImpl"
+
+#include "EffectHapticGenerator.h"
+
+#include <android-base/logging.h>
+#include <audio_effects/effect_hapticgenerator.h>
+
+using aidl::android::hardware::audio::effect::Descriptor;
+using aidl::android::hardware::audio::effect::HapticGeneratorImpl;
+using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::kHapticGeneratorImplUUID;
+using aidl::android::media::audio::common::AudioUuid;
+
+extern "C" binder_exception_t createEffect(const AudioUuid* in_impl_uuid,
+ std::shared_ptr<IEffect>* instanceSpp) {
+ if (!in_impl_uuid || *in_impl_uuid != kHapticGeneratorImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (instanceSpp) {
+ *instanceSpp = ndk::SharedRefBase::make<HapticGeneratorImpl>();
+ LOG(DEBUG) << __func__ << " instance " << instanceSpp->get() << " created";
+ return EX_NONE;
+ } else {
+ LOG(ERROR) << __func__ << " invalid input parameter!";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+}
+
+extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
+ if (!in_impl_uuid || *in_impl_uuid != kHapticGeneratorImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ *_aidl_return = HapticGeneratorImpl::kDescriptor;
+ return EX_NONE;
+}
+
+namespace aidl::android::hardware::audio::effect {
+
+const std::string HapticGeneratorImpl::kEffectName = "Haptic Generator";
+const Descriptor HapticGeneratorImpl::kDescriptor = {
+ .common = {.id = {.type = kHapticGeneratorTypeUUID,
+ .uuid = kHapticGeneratorImplUUID,
+ .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::INSERT, .insert = Flags::Insert::FIRST},
+ .name = HapticGeneratorImpl::kEffectName,
+ .implementor = "The Android Open Source Project"}};
+
+ndk::ScopedAStatus HapticGeneratorImpl::getDescriptor(Descriptor* _aidl_return) {
+ RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
+ LOG(DEBUG) << __func__ << kDescriptor.toString();
+ *_aidl_return = kDescriptor;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus HapticGeneratorImpl::commandImpl(CommandId command) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ switch (command) {
+ case CommandId::START:
+ mContext->enable();
+ break;
+ case CommandId::STOP:
+ mContext->disable();
+ break;
+ case CommandId::RESET:
+ mContext->reset();
+ break;
+ default:
+ LOG(ERROR) << __func__ << " commandId " << toString(command) << " not supported";
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commandIdNotSupported");
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus HapticGeneratorImpl::setParameterSpecific(const Parameter::Specific& specific) {
+ RETURN_IF(Parameter::Specific::hapticGenerator != specific.getTag(), EX_ILLEGAL_ARGUMENT,
+ "EffectNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto& hgParam = specific.get<Parameter::Specific::hapticGenerator>();
+ auto tag = hgParam.getTag();
+
+ switch (tag) {
+ case HapticGenerator::hapticScales: {
+ RETURN_IF(mContext->setHgHapticScales(hgParam.get<HapticGenerator::hapticScales>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setHapticScaleFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case HapticGenerator::vibratorInfo: {
+ RETURN_IF(mContext->setHgVibratorInformation(
+ hgParam.get<HapticGenerator::vibratorInfo>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setVibratorInfoFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "HapticGeneratorTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus HapticGeneratorImpl::getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
+ auto tag = id.getTag();
+ RETURN_IF(Parameter::Id::hapticGeneratorTag != tag, EX_ILLEGAL_ARGUMENT, "wrongIdTag");
+ auto hgId = id.get<Parameter::Id::hapticGeneratorTag>();
+ auto hgIdTag = hgId.getTag();
+ switch (hgIdTag) {
+ case HapticGenerator::Id::commonTag:
+ return getParameterHapticGenerator(hgId.get<HapticGenerator::Id::commonTag>(),
+ specific);
+ default:
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(hgIdTag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "HapticGeneratorTagNotSupported");
+ }
+}
+
+ndk::ScopedAStatus HapticGeneratorImpl::getParameterHapticGenerator(const HapticGenerator::Tag& tag,
+ Parameter::Specific* specific) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ HapticGenerator hgParam;
+ switch (tag) {
+ case HapticGenerator::hapticScales: {
+ hgParam.set<HapticGenerator::hapticScales>(mContext->getHgHapticScales());
+ break;
+ }
+ case HapticGenerator::vibratorInfo: {
+ hgParam.set<HapticGenerator::vibratorInfo>(mContext->getHgVibratorInformation());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "HapticGeneratorTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::hapticGenerator>(hgParam);
+ return ndk::ScopedAStatus::ok();
+}
+
+std::shared_ptr<EffectContext> HapticGeneratorImpl::createContext(const Parameter::Common& common) {
+ if (mContext) {
+ LOG(DEBUG) << __func__ << " context already exist";
+ return mContext;
+ }
+
+ mContext = std::make_shared<HapticGeneratorContext>(1 /* statusFmqDepth */, common);
+ return mContext;
+}
+
+RetCode HapticGeneratorImpl::releaseContext() {
+ if (mContext) {
+ mContext->reset();
+ }
+ return RetCode::SUCCESS;
+}
+
+// Processing method running in EffectWorker thread.
+IEffect::Status HapticGeneratorImpl::effectProcessImpl(float* in, float* out, int samples) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!mContext, status, "nullContext");
+ return mContext->lvmProcess(in, out, samples);
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/hapticgenerator/aidl/EffectHapticGenerator.h b/media/libeffects/hapticgenerator/aidl/EffectHapticGenerator.h
new file mode 100644
index 0000000..02ca392
--- /dev/null
+++ b/media/libeffects/hapticgenerator/aidl/EffectHapticGenerator.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+
+#include "HapticGeneratorContext.h"
+#include "effect-impl/EffectImpl.h"
+#include "effect-impl/EffectUUID.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class HapticGeneratorImpl final : public EffectImpl {
+ public:
+ static const std::string kEffectName;
+ static const Descriptor kDescriptor;
+ HapticGeneratorImpl() { LOG(DEBUG) << __func__; }
+ ~HapticGeneratorImpl() {
+ cleanUp();
+ LOG(DEBUG) << __func__;
+ }
+
+ ndk::ScopedAStatus commandImpl(CommandId command) override;
+ ndk::ScopedAStatus getDescriptor(Descriptor* _aidl_return) override;
+ ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
+ ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) override;
+ IEffect::Status effectProcessImpl(float* in, float* out, int process) override;
+ std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+ RetCode releaseContext() override;
+
+ std::shared_ptr<EffectContext> getContext() override { return mContext; }
+ std::string getEffectName() override { return kEffectName; }
+
+ private:
+ std::shared_ptr<HapticGeneratorContext> mContext;
+ ndk::ScopedAStatus getParameterHapticGenerator(const HapticGenerator::Tag& tag,
+ Parameter::Specific* specific);
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
new file mode 100644
index 0000000..64f51c3
--- /dev/null
+++ b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_HapticGeneratorContext"
+
+#include <Utils.h>
+#include <android-base/parsedouble.h>
+#include <android-base/properties.h>
+
+#include "HapticGeneratorContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+HapticGeneratorContext::HapticGeneratorContext(int statusDepth, const Parameter::Common& common)
+ : EffectContext(statusDepth, common) {
+ LOG(DEBUG) << __func__;
+ mState = HAPTIC_GENERATOR_STATE_UNINITIALIZED;
+ mSampleRate = common.input.base.sampleRate;
+ mFrameCount = common.input.frameCount;
+ init_params(common.input.base.channelMask, common.output.base.channelMask);
+}
+
+HapticGeneratorContext::~HapticGeneratorContext() {
+ LOG(DEBUG) << __func__;
+ mState = HAPTIC_GENERATOR_STATE_UNINITIALIZED;
+}
+
+RetCode HapticGeneratorContext::enable() {
+ if (mState != HAPTIC_GENERATOR_STATE_INITIALIZED) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = HAPTIC_GENERATOR_STATE_ACTIVE;
+ return RetCode::SUCCESS;
+}
+
+RetCode HapticGeneratorContext::disable() {
+ if (mState != HAPTIC_GENERATOR_STATE_ACTIVE) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = HAPTIC_GENERATOR_STATE_INITIALIZED;
+ return RetCode::SUCCESS;
+}
+
+void HapticGeneratorContext::reset() {
+ for (auto& filter : mProcessorsRecord.filters) {
+ filter->clear();
+ }
+ for (auto& slowEnv : mProcessorsRecord.slowEnvs) {
+ slowEnv->clear();
+ }
+ for (auto& distortion : mProcessorsRecord.distortions) {
+ distortion->clear();
+ }
+}
+
+RetCode HapticGeneratorContext::setHgHapticScales(
+ const std::vector<HapticGenerator::HapticScale> hapticScales) {
+ std::lock_guard lg(mMutex);
+ for (auto hapticScale : hapticScales) {
+ mParams.mHapticScales.insert_or_assign(hapticScale.id, hapticScale.scale);
+ }
+ mParams.mMaxVibratorScale = HapticGenerator::VibratorScale::MUTE;
+ for (const auto& [id, vibratorScale] : mParams.mHapticScales) {
+ mParams.mMaxVibratorScale = std::max(mParams.mMaxVibratorScale, vibratorScale);
+ }
+ return RetCode::SUCCESS;
+}
+
+HapticGenerator::VibratorInformation HapticGeneratorContext::getHgVibratorInformation() {
+ std::lock_guard lg(mMutex);
+ return mParams.mVibratorInfo;
+}
+
+std::vector<HapticGenerator::HapticScale> HapticGeneratorContext::getHgHapticScales() {
+ std::vector<HapticGenerator::HapticScale> result;
+ std::lock_guard lg(mMutex);
+ for (const auto& [id, vibratorScale] : mParams.mHapticScales) {
+ result.push_back({id, vibratorScale});
+ }
+ return result;
+}
+
+RetCode HapticGeneratorContext::setHgVibratorInformation(
+ const HapticGenerator::VibratorInformation& vibratorInfo) {
+ {
+ std::lock_guard lg(mMutex);
+ mParams.mVibratorInfo = vibratorInfo;
+
+ if (mProcessorsRecord.bpf != nullptr) {
+ mProcessorsRecord.bpf->setCoefficients(
+ ::android::audio_effect::haptic_generator::bpfCoefs(
+ mParams.mVibratorInfo.resonantFrequencyHz, DEFAULT_BPF_Q, mSampleRate));
+ }
+ if (mProcessorsRecord.bsf != nullptr) {
+ mProcessorsRecord.bsf->setCoefficients(
+ ::android::audio_effect::haptic_generator::bsfCoefs(
+ mParams.mVibratorInfo.resonantFrequencyHz,
+ mParams.mVibratorInfo.qFactor, mParams.mVibratorInfo.qFactor / 2.0f,
+ mSampleRate));
+ }
+ }
+ configure();
+ return RetCode::SUCCESS;
+}
+
+IEffect::Status HapticGeneratorContext::lvmProcess(float* in, float* out, int samples) {
+ LOG(DEBUG) << __func__ << " in " << in << " out " << out << " sample " << samples;
+
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!in, status, "nullInput");
+ RETURN_VALUE_IF(!out, status, "nullOutput");
+ status = {EX_ILLEGAL_STATE, 0, 0};
+ RETURN_VALUE_IF(getInputFrameSize() != getOutputFrameSize(), status, "FrameSizeMismatch");
+ auto frameSize = getInputFrameSize();
+ RETURN_VALUE_IF(0 == frameSize, status, "zeroFrameSize");
+
+ LOG(DEBUG) << __func__ << " start processing";
+ // The audio data must not be modified but just written to
+ // output buffer according the access mode.
+ bool accumulate = false;
+ if (in != out) {
+ for (int i = 0; i < samples; i++) {
+ if (accumulate) {
+ out[i] += in[i];
+ } else {
+ out[i] = in[i];
+ }
+ }
+ }
+
+ if (mState != HAPTIC_GENERATOR_STATE_ACTIVE) {
+ return status;
+ }
+
+ std::lock_guard lg(mMutex);
+ if (mParams.mMaxVibratorScale == HapticGenerator::VibratorScale::MUTE) {
+ // Haptic channels are muted, not need to generate haptic data.
+ return {STATUS_OK, samples, samples};
+ }
+
+ // Resize buffer if the haptic sample count is greater than buffer size.
+ size_t hapticSampleCount = mFrameCount * mParams.mHapticChannelCount;
+ if (hapticSampleCount > mInputBuffer.size()) {
+ // The inputBuffer and outputBuffer must have the same size, which must be at least
+ // the haptic sample count.
+ mInputBuffer.resize(hapticSampleCount);
+ mOutputBuffer.resize(hapticSampleCount);
+ }
+
+ // Construct input buffer according to haptic channel source
+ for (size_t i = 0; i < mFrameCount; ++i) {
+ for (size_t j = 0; j < mParams.mHapticChannelCount; ++j) {
+ mInputBuffer[i * mParams.mHapticChannelCount + j] =
+ in[i * mParams.mAudioChannelCount + mParams.mHapticChannelSource[j]];
+ }
+ }
+
+ float* hapticOutBuffer =
+ runProcessingChain(mInputBuffer.data(), mOutputBuffer.data(), mFrameCount);
+ ::android::os::scaleHapticData(
+ hapticOutBuffer, hapticSampleCount,
+ static_cast<::android::os::HapticScale>(mParams.mMaxVibratorScale),
+ mParams.mVibratorInfo.qFactor);
+
+ // For haptic data, the haptic playback thread will copy the data from effect input
+ // buffer, which contains haptic data at the end of the buffer, directly to sink buffer.
+ // In that case, copy haptic data to input buffer instead of output buffer.
+ // Note: this may not work with rpc/binder calls
+ int offset = samples;
+ for (int i = 0; i < hapticSampleCount; ++i) {
+ in[samples + i] = hapticOutBuffer[i];
+ }
+ return {STATUS_OK, samples, static_cast<int32_t>(samples + hapticSampleCount)};
+}
+
+void HapticGeneratorContext::init_params(media::audio::common::AudioChannelLayout inputChMask,
+ media::audio::common::AudioChannelLayout outputChMask) {
+ std::lock_guard lg(mMutex);
+ mParams.mMaxVibratorScale = HapticGenerator::VibratorScale::MUTE;
+ mParams.mVibratorInfo.resonantFrequencyHz = DEFAULT_RESONANT_FREQUENCY;
+ mParams.mVibratorInfo.qFactor = DEFAULT_BSF_ZERO_Q;
+
+ mParams.mAudioChannelCount = ::android::hardware::audio::common::getChannelCount(
+ inputChMask, ~media::audio::common::AudioChannelLayout::LAYOUT_HAPTIC_AB);
+ mParams.mHapticChannelCount = ::android::hardware::audio::common::getChannelCount(
+ outputChMask, media::audio::common::AudioChannelLayout::LAYOUT_HAPTIC_AB);
+ LOG_ALWAYS_FATAL_IF(mParams.mHapticChannelCount > 2, "haptic channel count is too large");
+ for (size_t i = 0; i < mParams.mHapticChannelCount; ++i) {
+ // By default, use the first audio channel to generate haptic channels.
+ mParams.mHapticChannelSource[i] = 0;
+ }
+
+ mState = HAPTIC_GENERATOR_STATE_INITIALIZED;
+}
+
+float HapticGeneratorContext::getDistortionOutputGain() {
+ float distortionOutputGain = getFloatProperty(
+ "vendor.audio.hapticgenerator.distortion.output.gain", DEFAULT_DISTORTION_OUTPUT_GAIN);
+ LOG(DEBUG) << "Using distortion output gain as " << distortionOutputGain;
+ return distortionOutputGain;
+}
+
+float HapticGeneratorContext::getFloatProperty(const std::string& key, float defaultValue) {
+ float result;
+ std::string value = ::android::base::GetProperty(key, "");
+ if (!value.empty() && ::android::base::ParseFloat(value, &result)) {
+ return result;
+ }
+ return defaultValue;
+}
+
+void HapticGeneratorContext::addBiquadFilter(std::shared_ptr<HapticBiquadFilter> filter) {
+ // The process chain captures the shared pointer of the filter in lambda.
+ // The process record will keep a shared pointer to the filter so that it is possible to
+ // access the filter outside of the process chain.
+ mProcessorsRecord.filters.push_back(filter);
+ mProcessingChain.push_back([filter](float* out, const float* in, size_t frameCount) {
+ filter->process(out, in, frameCount);
+ });
+}
+
+/**
+ * Build haptic generator processing chain.
+ */
+void HapticGeneratorContext::buildProcessingChain() {
+ std::lock_guard lg(mMutex);
+ const size_t channelCount = mParams.mHapticChannelCount;
+ float highPassCornerFrequency = 50.0f;
+ auto hpf = ::android::audio_effect::haptic_generator::createHPF2(highPassCornerFrequency,
+ mSampleRate, channelCount);
+ addBiquadFilter(hpf);
+ float lowPassCornerFrequency = 9000.0f;
+ auto lpf = ::android::audio_effect::haptic_generator::createLPF2(lowPassCornerFrequency,
+ mSampleRate, channelCount);
+ addBiquadFilter(lpf);
+
+ auto ramp = std::make_shared<::android::audio_effect::haptic_generator::Ramp>(
+ channelCount); // ramp = half-wave rectifier.
+ // The process chain captures the shared pointer of the ramp in lambda. It will be the only
+ // reference to the ramp.
+ // The process record will keep a weak pointer to the ramp so that it is possible to access
+ // the ramp outside of the process chain.
+ mProcessorsRecord.ramps.push_back(ramp);
+ mProcessingChain.push_back([ramp](float* out, const float* in, size_t frameCount) {
+ ramp->process(out, in, frameCount);
+ });
+
+ highPassCornerFrequency = 60.0f;
+ hpf = ::android::audio_effect::haptic_generator::createHPF2(highPassCornerFrequency,
+ mSampleRate, channelCount);
+ addBiquadFilter(hpf);
+ lowPassCornerFrequency = 700.0f;
+ lpf = ::android::audio_effect::haptic_generator::createLPF2(lowPassCornerFrequency, mSampleRate,
+ channelCount);
+ addBiquadFilter(lpf);
+
+ lowPassCornerFrequency = 400.0f;
+ lpf = ::android::audio_effect::haptic_generator::createLPF2(lowPassCornerFrequency, mSampleRate,
+ channelCount);
+ addBiquadFilter(lpf);
+ lowPassCornerFrequency = 500.0f;
+ lpf = ::android::audio_effect::haptic_generator::createLPF2(lowPassCornerFrequency, mSampleRate,
+ channelCount);
+ addBiquadFilter(lpf);
+
+ auto bpf = ::android::audio_effect::haptic_generator::createBPF(
+ mParams.mVibratorInfo.resonantFrequencyHz, DEFAULT_BPF_Q, mSampleRate, channelCount);
+ mProcessorsRecord.bpf = bpf;
+ addBiquadFilter(bpf);
+
+ float normalizationPower = DEFAULT_SLOW_ENV_NORMALIZATION_POWER;
+ // The process chain captures the shared pointer of the slow envelope in lambda. It will
+ // be the only reference to the slow envelope.
+ // The process record will keep a weak pointer to the slow envelope so that it is possible
+ // to access the slow envelope outside of the process chain.
+ // SlowEnvelope = partial normalizer, or AGC.
+ auto slowEnv = std::make_shared<::android::audio_effect::haptic_generator::SlowEnvelope>(
+ 5.0f /*envCornerFrequency*/, mSampleRate, normalizationPower, 0.01f /*envOffset*/,
+ channelCount);
+ mProcessorsRecord.slowEnvs.push_back(slowEnv);
+ mProcessingChain.push_back([slowEnv](float* out, const float* in, size_t frameCount) {
+ slowEnv->process(out, in, frameCount);
+ });
+
+ auto bsf = ::android::audio_effect::haptic_generator::createBSF(
+ mParams.mVibratorInfo.resonantFrequencyHz, mParams.mVibratorInfo.qFactor,
+ mParams.mVibratorInfo.qFactor / 2.0f, mSampleRate, channelCount);
+ mProcessorsRecord.bsf = bsf;
+ addBiquadFilter(bsf);
+
+ // The process chain captures the shared pointer of the Distortion in lambda. It will
+ // be the only reference to the Distortion.
+ // The process record will keep a weak pointer to the Distortion so that it is possible
+ // to access the Distortion outside of the process chain.
+ auto distortion = std::make_shared<::android::audio_effect::haptic_generator::Distortion>(
+ DEFAULT_DISTORTION_CORNER_FREQUENCY, mSampleRate, DEFAULT_DISTORTION_INPUT_GAIN,
+ DEFAULT_DISTORTION_CUBE_THRESHOLD, getDistortionOutputGain(), channelCount);
+ mProcessorsRecord.distortions.push_back(distortion);
+ mProcessingChain.push_back([distortion](float* out, const float* in, size_t frameCount) {
+ distortion->process(out, in, frameCount);
+ });
+}
+
+void HapticGeneratorContext::configure() {
+ mProcessingChain.clear();
+ mProcessorsRecord.filters.clear();
+ mProcessorsRecord.ramps.clear();
+ mProcessorsRecord.slowEnvs.clear();
+ mProcessorsRecord.distortions.clear();
+
+ buildProcessingChain();
+}
+
+/**
+ * Run the processing chain to generate haptic data from audio data
+ *
+ * @param buf1 a buffer contains raw audio data
+ * @param buf2 a buffer that is large enough to keep all the data
+ * @param frameCount frame count of the data
+ *
+ * @return a pointer to the output buffer
+ */
+float* HapticGeneratorContext::runProcessingChain(float* buf1, float* buf2, size_t frameCount) {
+ float* in = buf1;
+ float* out = buf2;
+ for (const auto processingFunc : mProcessingChain) {
+ processingFunc(out, in, frameCount);
+ std::swap(in, out);
+ }
+ return in;
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h
new file mode 100644
index 0000000..dc43feb
--- /dev/null
+++ b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <vibrator/ExternalVibrationUtils.h>
+#include <map>
+
+#include "Processors.h"
+#include "effect-impl/EffectContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+enum HapticGeneratorState {
+ HAPTIC_GENERATOR_STATE_UNINITIALIZED,
+ HAPTIC_GENERATOR_STATE_INITIALIZED,
+ HAPTIC_GENERATOR_STATE_ACTIVE,
+};
+
+struct HapticGeneratorParam {
+ // The audio channels used to generate haptic channels. The first channel will be used to
+ // generate HAPTIC_A, The second channel will be used to generate HAPTIC_B.
+ // The value will be offset of audio channel
+ int mHapticChannelSource[2];
+
+ int mHapticChannelCount;
+ int mAudioChannelCount;
+
+ HapticGenerator::HapticScale mHapticScale;
+ std::map<int, HapticGenerator::VibratorScale> mHapticScales;
+ // max intensity will be used to scale haptic data.
+ HapticGenerator::VibratorScale mMaxVibratorScale;
+
+ HapticGenerator::VibratorInformation mVibratorInfo;
+};
+
+// A structure to keep all shared pointers for all processors in HapticGenerator.
+struct HapticGeneratorProcessorsRecord {
+ std::vector<std::shared_ptr<HapticBiquadFilter>> filters;
+ std::vector<std::shared_ptr<::android::audio_effect::haptic_generator::Ramp>> ramps;
+ std::vector<std::shared_ptr<::android::audio_effect::haptic_generator::SlowEnvelope>> slowEnvs;
+ std::vector<std::shared_ptr<::android::audio_effect::haptic_generator::Distortion>> distortions;
+
+ // Cache band-pass filter and band-stop filter for updating parameters
+ // according to vibrator info
+ std::shared_ptr<HapticBiquadFilter> bpf;
+ std::shared_ptr<HapticBiquadFilter> bsf;
+};
+
+class HapticGeneratorContext final : public EffectContext {
+ public:
+ HapticGeneratorContext(int statusDepth, const Parameter::Common& common);
+ ~HapticGeneratorContext();
+ RetCode enable();
+ RetCode disable();
+ void reset();
+
+ RetCode setHgHapticScales(const std::vector<HapticGenerator::HapticScale> hapticScales);
+ std::vector<HapticGenerator::HapticScale> getHgHapticScales();
+
+ RetCode setHgVibratorInformation(const HapticGenerator::VibratorInformation& vibratorInfo);
+ HapticGenerator::VibratorInformation getHgVibratorInformation();
+
+ IEffect::Status lvmProcess(float* in, float* out, int samples);
+
+ private:
+ static constexpr float DEFAULT_RESONANT_FREQUENCY = 150.0f;
+ static constexpr float DEFAULT_BSF_ZERO_Q = 8.0f;
+ static constexpr float DEFAULT_BSF_POLE_Q = 4.0f;
+ static constexpr float DEFAULT_DISTORTION_OUTPUT_GAIN = 1.5f;
+ static constexpr float DEFAULT_BPF_Q = 1.0f;
+ static constexpr float DEFAULT_SLOW_ENV_NORMALIZATION_POWER = -0.8f;
+ static constexpr float DEFAULT_DISTORTION_CORNER_FREQUENCY = 300.0f;
+ static constexpr float DEFAULT_DISTORTION_INPUT_GAIN = 0.3f;
+ static constexpr float DEFAULT_DISTORTION_CUBE_THRESHOLD = 0.1f;
+
+ std::mutex mMutex;
+ HapticGeneratorState mState;
+ HapticGeneratorParam mParams GUARDED_BY(mMutex);
+ int mSampleRate;
+ int mFrameCount = 0;
+
+ // A cache for all shared pointers of the HapticGenerator
+ struct HapticGeneratorProcessorsRecord mProcessorsRecord;
+
+ // Using a vector of functions to record the processing chain for haptic-generating algorithm.
+ // The three parameters of the processing functions are pointer to output buffer, pointer to
+ // input buffer and frame count.
+ std::vector<std::function<void(float*, const float*, size_t)>> mProcessingChain;
+
+ // inputBuffer is where to keep input buffer for the generating algorithm. It will be
+ // constructed according to hapticChannelSource.
+ std::vector<float> mInputBuffer;
+
+ // outputBuffer is a buffer having the same length as inputBuffer. It can be used as
+ // intermediate buffer in the generating algorithm.
+ std::vector<float> mOutputBuffer;
+
+ void init_params(media::audio::common::AudioChannelLayout inputChMask,
+ media::audio::common::AudioChannelLayout outputChMask);
+ void configure();
+
+ float getDistortionOutputGain();
+ float getFloatProperty(const std::string& key, float defaultValue);
+ void addBiquadFilter(std::shared_ptr<HapticBiquadFilter> filter);
+ void buildProcessingChain();
+ float* runProcessingChain(float* buf1, float* buf2, size_t frameCount);
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/loudness/Android.bp b/media/libeffects/loudness/Android.bp
index bcd6947..fc0217b 100644
--- a/media/libeffects/loudness/Android.bp
+++ b/media/libeffects/loudness/Android.bp
@@ -44,3 +44,32 @@
header_libs: ["libaudioeffects"],
}
+
+cc_library_shared {
+ name: "libloudnessenhanceraidl",
+ srcs: [
+ "aidl/EffectLoudnessEnhancer.cpp",
+ "aidl/LoudnessEnhancerContext.cpp",
+ "dsp/core/dynamic_range_compression.cpp",
+ ":effectCommonFile",
+ ],
+ defaults: [
+ "aidlaudioservice_defaults",
+ "latest_android_hardware_audio_effect_ndk_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
+ ],
+ header_libs: [
+ "libaudioeffects",
+ "libhardware_headers",
+ ],
+ cflags: [
+ "-Wthread-safety",
+ ],
+ shared_libs: [
+ "libcutils",
+ "liblog",
+ ],
+ visibility: [
+ "//hardware/interfaces/audio/aidl/default",
+ ],
+}
diff --git a/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.cpp b/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.cpp
new file mode 100644
index 0000000..9d8bc80
--- /dev/null
+++ b/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.cpp
@@ -0,0 +1,182 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_LoudnessEnhancerImpl"
+
+#include <android-base/logging.h>
+
+#include "EffectLoudnessEnhancer.h"
+
+using aidl::android::hardware::audio::effect::Descriptor;
+using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::kLoudnessEnhancerImplUUID;
+using aidl::android::hardware::audio::effect::LoudnessEnhancerImpl;
+using aidl::android::hardware::audio::effect::State;
+using aidl::android::media::audio::common::AudioUuid;
+
+extern "C" binder_exception_t createEffect(const AudioUuid* in_impl_uuid,
+ std::shared_ptr<IEffect>* instanceSpp) {
+ if (!in_impl_uuid || *in_impl_uuid != kLoudnessEnhancerImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (instanceSpp) {
+ *instanceSpp = ndk::SharedRefBase::make<LoudnessEnhancerImpl>();
+ LOG(DEBUG) << __func__ << " instance " << instanceSpp->get() << " created";
+ return EX_NONE;
+ } else {
+ LOG(ERROR) << __func__ << " invalid input parameter!";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+}
+
+extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
+ if (!in_impl_uuid || *in_impl_uuid != kLoudnessEnhancerImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ *_aidl_return = LoudnessEnhancerImpl::kDescriptor;
+ return EX_NONE;
+}
+
+namespace aidl::android::hardware::audio::effect {
+
+const std::string LoudnessEnhancerImpl::kEffectName = "Loudness Enhancer";
+const Descriptor LoudnessEnhancerImpl::kDescriptor = {
+ .common = {.id = {.type = kLoudnessEnhancerTypeUUID,
+ .uuid = kLoudnessEnhancerImplUUID,
+ .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::INSERT, .insert = Flags::Insert::FIRST},
+ .name = LoudnessEnhancerImpl::kEffectName,
+ .implementor = "The Android Open Source Project"}};
+
+ndk::ScopedAStatus LoudnessEnhancerImpl::getDescriptor(Descriptor* _aidl_return) {
+ RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
+ LOG(DEBUG) << __func__ << kDescriptor.toString();
+ *_aidl_return = kDescriptor;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus LoudnessEnhancerImpl::commandImpl(CommandId command) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ switch (command) {
+ case CommandId::START:
+ mContext->enable();
+ break;
+ case CommandId::STOP:
+ mContext->disable();
+ break;
+ case CommandId::RESET:
+ mContext->disable();
+ mContext->resetBuffer();
+ break;
+ default:
+ LOG(ERROR) << __func__ << " commandId " << toString(command) << " not supported";
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commandIdNotSupported");
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus LoudnessEnhancerImpl::setParameterSpecific(const Parameter::Specific& specific) {
+ RETURN_IF(Parameter::Specific::loudnessEnhancer != specific.getTag(), EX_ILLEGAL_ARGUMENT,
+ "EffectNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto& leParam = specific.get<Parameter::Specific::loudnessEnhancer>();
+ auto tag = leParam.getTag();
+
+ switch (tag) {
+ case LoudnessEnhancer::gainMb: {
+ RETURN_IF(mContext->setLeGain(leParam.get<LoudnessEnhancer::gainMb>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setGainMbFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "LoudnessEnhancerTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus LoudnessEnhancerImpl::getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
+ auto tag = id.getTag();
+ RETURN_IF(Parameter::Id::loudnessEnhancerTag != tag, EX_ILLEGAL_ARGUMENT, "wrongIdTag");
+ auto leId = id.get<Parameter::Id::loudnessEnhancerTag>();
+ auto leIdTag = leId.getTag();
+ switch (leIdTag) {
+ case LoudnessEnhancer::Id::commonTag:
+ return getParameterLoudnessEnhancer(leId.get<LoudnessEnhancer::Id::commonTag>(),
+ specific);
+ default:
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(leIdTag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "LoudnessEnhancerTagNotSupported");
+ }
+}
+
+ndk::ScopedAStatus LoudnessEnhancerImpl::getParameterLoudnessEnhancer(
+ const LoudnessEnhancer::Tag& tag, Parameter::Specific* specific) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ LoudnessEnhancer leParam;
+ switch (tag) {
+ case LoudnessEnhancer::gainMb: {
+ leParam.set<LoudnessEnhancer::gainMb>(mContext->getLeGain());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "LoudnessEnhancerTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::loudnessEnhancer>(leParam);
+ return ndk::ScopedAStatus::ok();
+}
+
+std::shared_ptr<EffectContext> LoudnessEnhancerImpl::createContext(
+ const Parameter::Common& common) {
+ if (mContext) {
+ LOG(DEBUG) << __func__ << " context already exist";
+ return mContext;
+ }
+
+ mContext = std::make_shared<LoudnessEnhancerContext>(1 /* statusFmqDepth */, common);
+ return mContext;
+}
+
+RetCode LoudnessEnhancerImpl::releaseContext() {
+ if (mContext) {
+ mContext->disable();
+ mContext->resetBuffer();
+ }
+ return RetCode::SUCCESS;
+}
+
+// Processing method running in EffectWorker thread.
+IEffect::Status LoudnessEnhancerImpl::effectProcessImpl(float* in, float* out, int samples) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!mContext, status, "nullContext");
+ return mContext->lvmProcess(in, out, samples);
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.h b/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.h
new file mode 100644
index 0000000..6402fd2
--- /dev/null
+++ b/media/libeffects/loudness/aidl/EffectLoudnessEnhancer.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+
+#include "effect-impl/EffectImpl.h"
+#include "effect-impl/EffectUUID.h"
+#include "LoudnessEnhancerContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class LoudnessEnhancerImpl final : public EffectImpl {
+ public:
+ static const std::string kEffectName;
+ static const Descriptor kDescriptor;
+ LoudnessEnhancerImpl() { LOG(DEBUG) << __func__; }
+ ~LoudnessEnhancerImpl() {
+ cleanUp();
+ LOG(DEBUG) << __func__;
+ }
+
+ ndk::ScopedAStatus commandImpl(CommandId command) override;
+ ndk::ScopedAStatus getDescriptor(Descriptor* _aidl_return) override;
+ ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
+ ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) override;
+ IEffect::Status effectProcessImpl(float* in, float* out, int process) override;
+ std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+ RetCode releaseContext() override;
+
+ std::shared_ptr<EffectContext> getContext() override { return mContext; }
+ std::string getEffectName() override { return kEffectName; }
+
+ private:
+ std::shared_ptr<LoudnessEnhancerContext> mContext;
+ ndk::ScopedAStatus getParameterLoudnessEnhancer(const LoudnessEnhancer::Tag& tag,
+ Parameter::Specific* specific);
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/loudness/aidl/LoudnessEnhancerContext.cpp b/media/libeffects/loudness/aidl/LoudnessEnhancerContext.cpp
new file mode 100644
index 0000000..033b222
--- /dev/null
+++ b/media/libeffects/loudness/aidl/LoudnessEnhancerContext.cpp
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "LoudnessEnhancerContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+LoudnessEnhancerContext::LoudnessEnhancerContext(int statusDepth, const Parameter::Common& common)
+ : EffectContext(statusDepth, common) {
+ LOG(DEBUG) << __func__;
+ mState = LOUDNESS_ENHANCER_STATE_UNINITIALIZED;
+ mSampleRate = common.input.base.sampleRate;
+ init_params();
+}
+
+LoudnessEnhancerContext::~LoudnessEnhancerContext() {
+ LOG(DEBUG) << __func__;
+ mState = LOUDNESS_ENHANCER_STATE_UNINITIALIZED;
+}
+
+RetCode LoudnessEnhancerContext::enable() {
+ if (mState != LOUDNESS_ENHANCER_STATE_INITIALIZED) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = LOUDNESS_ENHANCER_STATE_ACTIVE;
+ return RetCode::SUCCESS;
+}
+
+RetCode LoudnessEnhancerContext::disable() {
+ if (mState != LOUDNESS_ENHANCER_STATE_ACTIVE) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = LOUDNESS_ENHANCER_STATE_INITIALIZED;
+ return RetCode::SUCCESS;
+}
+
+void LoudnessEnhancerContext::reset() {
+ float targetAmp = pow(10, mGain / 2000.0f); // mB to linear amplification
+ {
+ std::lock_guard lg(mMutex);
+ if (mCompressor != nullptr) {
+ // Get samplingRate from input
+ mCompressor->Initialize(targetAmp, mSampleRate);
+ }
+ }
+}
+
+RetCode LoudnessEnhancerContext::setLeGain(int gainMb) {
+ mGain = gainMb;
+ reset(); // apply parameter update
+ return RetCode::SUCCESS;
+}
+
+IEffect::Status LoudnessEnhancerContext::lvmProcess(float* in, float* out, int samples) {
+ LOG(DEBUG) << __func__ << " in " << in << " out " << out << " sample " << samples;
+
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!in, status, "nullInput");
+ RETURN_VALUE_IF(!out, status, "nullOutput");
+ status = {EX_ILLEGAL_STATE, 0, 0};
+ RETURN_VALUE_IF(getInputFrameSize() != getOutputFrameSize(), status, "FrameSizeMismatch");
+ auto frameSize = getInputFrameSize();
+ RETURN_VALUE_IF(0 == frameSize, status, "zeroFrameSize");
+
+ LOG(DEBUG) << __func__ << " start processing";
+ {
+ std::lock_guard lg(mMutex);
+ // PcmType is always expected to be Float 32 bit.
+ constexpr float scale = 1 << 15; // power of 2 is lossless conversion to int16_t range
+ constexpr float inverseScale = 1.f / scale;
+ const float inputAmp = pow(10, mGain / 2000.0f) * scale;
+ float leftSample, rightSample;
+ if (mCompressor != nullptr) {
+ for (int inIdx = 0; inIdx < samples; inIdx += 2) {
+ // makeup gain is applied on the input of the compressor
+ leftSample = inputAmp * in[inIdx];
+ rightSample = inputAmp * in[inIdx + 1];
+ mCompressor->Compress(&leftSample, &rightSample);
+ in[inIdx] = leftSample * inverseScale;
+ in[inIdx + 1] = rightSample * inverseScale;
+ }
+ } else {
+ for (int inIdx = 0; inIdx < samples; inIdx += 2) {
+ leftSample = inputAmp * in[inIdx];
+ rightSample = inputAmp * in[inIdx + 1];
+ in[inIdx] = leftSample * inverseScale;
+ in[inIdx + 1] = rightSample * inverseScale;
+ }
+ }
+ bool accumulate = false;
+ if (in != out) {
+ for (int i = 0; i < samples; i++) {
+ if (accumulate) {
+ out[i] += in[i];
+ } else {
+ out[i] = in[i];
+ }
+ }
+ }
+ }
+ return {STATUS_OK, samples, samples};
+}
+
+void LoudnessEnhancerContext::init_params() {
+ mGain = LOUDNESS_ENHANCER_DEFAULT_TARGET_GAIN_MB;
+ float targetAmp = pow(10, mGain / 2000.0f); // mB to linear amplification
+ LOG(DEBUG) << __func__ << "Target gain = " << mGain << "mB <=> factor = " << targetAmp;
+
+ {
+ std::lock_guard lg(mMutex);
+ mCompressor = std::make_unique<le_fx::AdaptiveDynamicRangeCompression>();
+ mCompressor->Initialize(targetAmp, mSampleRate);
+ }
+ mState = LOUDNESS_ENHANCER_STATE_INITIALIZED;
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/loudness/aidl/LoudnessEnhancerContext.h b/media/libeffects/loudness/aidl/LoudnessEnhancerContext.h
new file mode 100644
index 0000000..b478b27
--- /dev/null
+++ b/media/libeffects/loudness/aidl/LoudnessEnhancerContext.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <audio_effects/effect_loudnessenhancer.h>
+
+#include "dsp/core/dynamic_range_compression.h"
+#include "effect-impl/EffectContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+enum LoudnessEnhancerState {
+ LOUDNESS_ENHANCER_STATE_UNINITIALIZED,
+ LOUDNESS_ENHANCER_STATE_INITIALIZED,
+ LOUDNESS_ENHANCER_STATE_ACTIVE,
+};
+
+class LoudnessEnhancerContext final : public EffectContext {
+ public:
+ LoudnessEnhancerContext(int statusDepth, const Parameter::Common& common);
+ ~LoudnessEnhancerContext();
+
+ RetCode enable();
+ RetCode disable();
+ void reset();
+
+ RetCode setLeGain(int gainMb);
+ int getLeGain() const { return mGain; }
+
+ IEffect::Status lvmProcess(float* in, float* out, int samples);
+
+ private:
+ std::mutex mMutex;
+ LoudnessEnhancerState mState;
+ int mSampleRate;
+ int mGain;
+ // In this implementation, there is no coupling between the compression on the left and right
+ // channels
+ std::unique_ptr<le_fx::AdaptiveDynamicRangeCompression> mCompressor GUARDED_BY(mMutex);
+
+ void init_params();
+};
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
index 2defa4e..3aee721 100644
--- a/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
@@ -14,17 +14,23 @@
* limitations under the License.
*/
+#include <cstddef>
#define LOG_TAG "BundleContext"
#include <Utils.h>
#include "BundleContext.h"
#include "BundleTypes.h"
+#include "math.h"
namespace aidl::android::hardware::audio::effect {
+using aidl::android::media::audio::common::AudioDeviceDescription;
+using aidl::android::media::audio::common::AudioDeviceType;
+
RetCode BundleContext::init() {
+ std::lock_guard lg(mMutex);
// init with pre-defined preset NORMAL
- for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ for (std::size_t i = 0; i < lvm::MAX_NUM_BANDS; i++) {
mBandGaindB[i] = lvm::kSoftPresets[0 /* normal */][i];
}
@@ -57,6 +63,7 @@
}
void BundleContext::deInit() {
+ std::lock_guard lg(mMutex);
if (mInstance) {
LVM_DelInstanceHandle(&mInstance);
mInstance = nullptr;
@@ -64,32 +71,280 @@
}
RetCode BundleContext::enable() {
- LVM_ControlParams_t params;
- RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, "failGetControlParams");
- if (mType == lvm::BundleEffectType::EQUALIZER) {
- LOG(DEBUG) << __func__ << " enable bundle EQ";
- params.EQNB_OperatingMode = LVM_EQNB_ON;
+ if (mEnabled) return RetCode::ERROR_ILLEGAL_PARAMETER;
+ // Bass boost or Virtualizer can be temporarily disabled if playing over device speaker due to
+ // their nature.
+ bool tempDisabled = false;
+ switch (mType) {
+ case lvm::BundleEffectType::EQUALIZER:
+ LOG(DEBUG) << __func__ << " enable bundle EQ";
+ if (mSamplesToExitCountEq <= 0) mNumberEffectsEnabled++;
+ mSamplesToExitCountEq = (mSamplesPerSecond * 0.1);
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::EQUALIZER));
+ break;
+ case lvm::BundleEffectType::BASS_BOOST:
+ LOG(DEBUG) << __func__ << " enable bundle BB";
+ if (mSamplesToExitCountBb <= 0) mNumberEffectsEnabled++;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::BASS_BOOST));
+ mSamplesToExitCountBb = (mSamplesPerSecond * 0.1);
+ tempDisabled = mBassTempDisabled;
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ LOG(DEBUG) << __func__ << " enable bundle VR";
+ if (mSamplesToExitCountVirt <= 0) mNumberEffectsEnabled++;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::VIRTUALIZER));
+ mSamplesToExitCountVirt = (mSamplesPerSecond * 0.1);
+ tempDisabled = mVirtualizerTempDisabled;
+ break;
+ case lvm::BundleEffectType::VOLUME:
+ LOG(DEBUG) << __func__ << " enable bundle VOL";
+ if ((mEffectInDrain & (1 << int(lvm::BundleEffectType::VOLUME))) == 0)
+ mNumberEffectsEnabled++;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::VOLUME));
+ break;
}
- RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, "failSetControlParams");
mEnabled = true;
- // LvmEffect_limitLevel(pContext);
- return RetCode::SUCCESS;
+ return (tempDisabled ? RetCode::SUCCESS : enableOperatingMode());
+}
+
+RetCode BundleContext::enableOperatingMode() {
+ LVM_ControlParams_t params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, "failGetControlParams");
+ switch (mType) {
+ case lvm::BundleEffectType::EQUALIZER:
+ LOG(DEBUG) << __func__ << " enable bundle EQ";
+ params.EQNB_OperatingMode = LVM_EQNB_ON;
+ break;
+ case lvm::BundleEffectType::BASS_BOOST:
+ LOG(DEBUG) << __func__ << " enable bundle BB";
+ params.BE_OperatingMode = LVM_BE_ON;
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ LOG(DEBUG) << __func__ << " enable bundle VR";
+ params.VirtualizerOperatingMode = LVM_MODE_ON;
+ break;
+ case lvm::BundleEffectType::VOLUME:
+ LOG(DEBUG) << __func__ << " enable bundle VOL";
+ break;
+ }
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, "failSetControlParams");
+ }
+ return limitLevel();
}
RetCode BundleContext::disable() {
- LVM_ControlParams_t params;
- RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, "failGetControlParams");
- if (mType == lvm::BundleEffectType::EQUALIZER) {
- LOG(DEBUG) << __func__ << " disable bundle EQ";
- params.EQNB_OperatingMode = LVM_EQNB_OFF;
+ if (!mEnabled) return RetCode::ERROR_ILLEGAL_PARAMETER;
+ switch (mType) {
+ case lvm::BundleEffectType::EQUALIZER:
+ LOG(DEBUG) << __func__ << " disable bundle EQ";
+ mEffectInDrain |= 1 << int(lvm::BundleEffectType::EQUALIZER);
+ break;
+ case lvm::BundleEffectType::BASS_BOOST:
+ LOG(DEBUG) << __func__ << " disable bundle BB";
+ mEffectInDrain |= 1 << int(lvm::BundleEffectType::BASS_BOOST);
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ LOG(DEBUG) << __func__ << " disable bundle VR";
+ mEffectInDrain |= 1 << int(lvm::BundleEffectType::VIRTUALIZER);
+ break;
+ case lvm::BundleEffectType::VOLUME:
+ LOG(DEBUG) << __func__ << " disable bundle VOL";
+ mEffectInDrain |= 1 << int(lvm::BundleEffectType::VOLUME);
+ break;
}
- RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, "failSetControlParams");
mEnabled = false;
- // LvmEffect_limitLevel(pContext);
+ return disableOperatingMode();
+}
+
+RetCode BundleContext::disableOperatingMode() {
+ LVM_ControlParams_t params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, "failGetControlParams");
+ switch (mType) {
+ case lvm::BundleEffectType::EQUALIZER:
+ LOG(DEBUG) << __func__ << " disable bundle EQ";
+ params.EQNB_OperatingMode = LVM_EQNB_OFF;
+ break;
+ case lvm::BundleEffectType::BASS_BOOST:
+ LOG(DEBUG) << __func__ << " disable bundle BB";
+ params.BE_OperatingMode = LVM_BE_OFF;
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ LOG(DEBUG) << __func__ << " disable bundle VR";
+ params.VirtualizerOperatingMode = LVM_MODE_OFF;
+ break;
+ case lvm::BundleEffectType::VOLUME:
+ LOG(DEBUG) << __func__ << " disable bundle VOL";
+ break;
+ }
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, "failSetControlParams");
+ }
+ mEnabled = false;
+ return limitLevel();
+}
+
+RetCode BundleContext::limitLevel() {
+ int gainCorrection = 0;
+ // Count the energy contribution per band for EQ and BassBoost only if they are active.
+ float energyContribution = 0;
+ float energyCross = 0;
+ float energyBassBoost = 0;
+ float crossCorrection = 0;
+ LVM_ControlParams_t params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ bool eqEnabled = params.EQNB_OperatingMode == LVM_EQNB_ON;
+ bool bbEnabled = params.BE_OperatingMode == LVM_BE_ON;
+ bool viEnabled = params.VirtualizerOperatingMode == LVM_MODE_ON;
+
+ if (eqEnabled) {
+ for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ float bandFactor = mBandGaindB[i] / 15.0;
+ float bandCoefficient = lvm::kBandEnergyCoefficient[i];
+ float bandEnergy = bandFactor * bandCoefficient * bandCoefficient;
+ if (bandEnergy > 0) energyContribution += bandEnergy;
+ }
+
+ // cross EQ coefficients
+ float bandFactorSum = 0;
+ for (int i = 0; i < lvm::MAX_NUM_BANDS - 1; i++) {
+ float bandFactor1 = mBandGaindB[i] / 15.0;
+ float bandFactor2 = mBandGaindB[i + 1] / 15.0;
+
+ if (bandFactor1 > 0 && bandFactor2 > 0) {
+ float crossEnergy =
+ bandFactor1 * bandFactor2 * lvm::kBandEnergyCrossCoefficient[i];
+ bandFactorSum += bandFactor1 * bandFactor2;
+
+ if (crossEnergy > 0) energyCross += crossEnergy;
+ }
+ }
+ bandFactorSum -= 1.0;
+ if (bandFactorSum > 0) crossCorrection = bandFactorSum * 0.7;
+ }
+ // BassBoost contribution
+ if (bbEnabled) {
+ float boostFactor = mBassStrengthSaved / 1000.0;
+ float boostCoefficient = lvm::kBassBoostEnergyCoefficient;
+
+ energyContribution += boostFactor * boostCoefficient * boostCoefficient;
+
+ if (eqEnabled) {
+ for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ float bandFactor = mBandGaindB[i] / 15.0;
+ float bandCrossCoefficient = lvm::kBassBoostEnergyCrossCoefficient[i];
+ float bandEnergy = boostFactor * bandFactor * bandCrossCoefficient;
+ if (bandEnergy > 0) energyBassBoost += bandEnergy;
+ }
+ }
+ }
+ // Virtualizer contribution
+ if (viEnabled) {
+ energyContribution += lvm::kVirtualizerContribution * lvm::kVirtualizerContribution;
+ }
+
+ double totalEnergyEstimation =
+ sqrt(energyContribution + energyCross + energyBassBoost) - crossCorrection;
+ LOG(INFO) << " TOTAL energy estimation: " << totalEnergyEstimation << " dB";
+
+ // roundoff
+ int maxLevelRound = (int)(totalEnergyEstimation + 0.99);
+ if (maxLevelRound + mVolume > 0) {
+ gainCorrection = maxLevelRound + mVolume;
+ }
+
+ params.VC_EffectLevel = mVolume - gainCorrection;
+ if (params.VC_EffectLevel < -96) {
+ params.VC_EffectLevel = -96;
+ }
+ LOG(INFO) << "\tVol: " << mVolume << ", GainCorrection: " << gainCorrection
+ << ", Actual vol: " << params.VC_EffectLevel;
+
+ /* Activate the initial settings */
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+
+ if (mFirstVolume) {
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetVolumeNoSmoothing(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setVolumeNoSmoothingFailed");
+ LOG(INFO) << "\tLVM_VOLUME: Disabling Smoothing for first volume change to remove "
+ "spikes/clicks";
+ mFirstVolume = false;
+ }
+ }
+
+ return RetCode::SUCCESS;
+}
+
+bool BundleContext::isDeviceSupportedBassBoost(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device) {
+ return (device == AudioDeviceDescription{AudioDeviceType::OUT_SPEAKER, ""} ||
+ device == AudioDeviceDescription{AudioDeviceType::OUT_CARKIT,
+ AudioDeviceDescription::CONNECTION_BT_SCO} ||
+ device == AudioDeviceDescription{AudioDeviceType::OUT_SPEAKER,
+ AudioDeviceDescription::CONNECTION_BT_A2DP});
+}
+
+bool BundleContext::isDeviceSupportedVirtualizer(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device) {
+ return (device == AudioDeviceDescription{AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_ANALOG} ||
+ device == AudioDeviceDescription{AudioDeviceType::OUT_HEADPHONE,
+ AudioDeviceDescription::CONNECTION_ANALOG} ||
+ device == AudioDeviceDescription{AudioDeviceType::OUT_HEADPHONE,
+ AudioDeviceDescription::CONNECTION_BT_A2DP} ||
+ device == AudioDeviceDescription{AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_USB});
+}
+
+RetCode BundleContext::setOutputDevice(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device) {
+ mOutputDevice = device;
+ switch (mType) {
+ case lvm::BundleEffectType::BASS_BOOST:
+ if (isDeviceSupportedBassBoost(device)) {
+ // If a device doesn't support bass boost, the effect must be temporarily disabled.
+ // The effect must still report its original state as this can only be changed by
+ // the start/stop commands.
+ if (mEnabled) {
+ disableOperatingMode();
+ }
+ mBassTempDisabled = true;
+ } else {
+ // If a device supports bass boost and the effect has been temporarily disabled
+ // previously then re-enable it
+ if (!mEnabled) {
+ enableOperatingMode();
+ }
+ mBassTempDisabled = false;
+ }
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ if (isDeviceSupportedVirtualizer(device)) {
+ if (mEnabled) {
+ disableOperatingMode();
+ }
+ mVirtualizerTempDisabled = true;
+ } else {
+ if (!mEnabled) {
+ enableOperatingMode();
+ }
+ mVirtualizerTempDisabled = false;
+ }
+ break;
+ default:
+ break;
+ }
return RetCode::SUCCESS;
}
@@ -144,31 +399,32 @@
int rightdB = VolToDb(volume.right);
int maxdB = std::max(leftdB, rightdB);
int pandB = rightdB - leftdB;
- // TODO: add volume effect implementation here:
- // android::VolumeSetVolumeLevel(pContext, (int16_t)(maxdB * 100));
+ setVolumeLevel(maxdB * 100);
LOG(DEBUG) << __func__ << " pandB: " << pandB << " maxdB " << maxdB;
- RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, "");
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, "");
+ params.VC_Balance = pandB;
- params.VC_Balance = pandB;
-
- RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, "");
-
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, "");
+ }
mVolumeStereo = volume;
return RetCode::SUCCESS;
}
-RetCode BundleContext::setEqualizerPreset(const int presetIdx) {
+RetCode BundleContext::setEqualizerPreset(const std::size_t presetIdx) {
if (presetIdx < 0 || presetIdx >= lvm::MAX_NUM_PRESETS) {
return RetCode::ERROR_ILLEGAL_PARAMETER;
}
std::vector<Equalizer::BandLevel> bandLevels;
bandLevels.reserve(lvm::MAX_NUM_BANDS);
- for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
- bandLevels.emplace_back(Equalizer::BandLevel{i, lvm::kSoftPresets[presetIdx][i]});
+ for (std::size_t i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ bandLevels.emplace_back(
+ Equalizer::BandLevel{static_cast<int32_t>(i), lvm::kSoftPresets[presetIdx][i]});
}
RetCode ret = updateControlParameter(bandLevels);
@@ -197,8 +453,8 @@
std::vector<Equalizer::BandLevel> BundleContext::getEqualizerBandLevels() const {
std::vector<Equalizer::BandLevel> bandLevels;
bandLevels.reserve(lvm::MAX_NUM_BANDS);
- for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
- bandLevels.emplace_back(Equalizer::BandLevel{i, mBandGaindB[i]});
+ for (std::size_t i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ bandLevels.emplace_back(Equalizer::BandLevel{static_cast<int32_t>(i), mBandGaindB[i]});
}
return bandLevels;
}
@@ -221,23 +477,102 @@
}
LVM_ControlParams_t params;
- RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
- for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
- params.pEQNB_BandDefinition[i].Frequency = lvm::kPresetsFrequencies[i];
- params.pEQNB_BandDefinition[i].QFactor = lvm::kPresetsQFactors[i];
- params.pEQNB_BandDefinition[i].Gain = tempLevel[i];
+ for (std::size_t i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ params.pEQNB_BandDefinition[i].Frequency = lvm::kPresetsFrequencies[i];
+ params.pEQNB_BandDefinition[i].QFactor = lvm::kPresetsQFactors[i];
+ params.pEQNB_BandDefinition[i].Gain = tempLevel[i];
+ }
+
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
}
-
- RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
- RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
mBandGaindB = tempLevel;
LOG(INFO) << __func__ << " update bandGain to " << ::android::internal::ToString(mBandGaindB);
return RetCode::SUCCESS;
}
+RetCode BundleContext::setBassBoostStrength(int strength) {
+ if (strength < 0 || strength > lvm::kBassBoostCap.maxStrengthPm) {
+ LOG(ERROR) << __func__ << " invalid strength: " << strength;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ // Update Control Parameter
+ LVM_ControlParams_t params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ params.BE_EffectLevel = (LVM_INT16)((15 * strength) / 1000);
+ params.BE_CentreFreq = LVM_BE_CENTRE_90Hz;
+
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+ }
+ mBassStrengthSaved = strength;
+ LOG(INFO) << __func__ << " success with strength " << strength;
+ return limitLevel();
+}
+
+RetCode BundleContext::setVolumeLevel(int level) {
+ if (level < lvm::kVolumeCap.minLevelDb || level > lvm::kVolumeCap.maxLevelDb) {
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ if (mMuteEnabled) {
+ mLevelSaved = level / 100;
+ } else {
+ mVolume = level / 100;
+ }
+ LOG(INFO) << __func__ << " success with level " << level;
+ return limitLevel();
+}
+
+int BundleContext::getVolumeLevel() const {
+ return (mMuteEnabled ? mLevelSaved * 100 : mVolume * 100);
+}
+
+RetCode BundleContext::setVolumeMute(bool mute) {
+ mMuteEnabled = mute;
+ if (mMuteEnabled) {
+ mLevelSaved = mVolume;
+ mVolume = -96;
+ } else {
+ mVolume = mLevelSaved;
+ }
+ return limitLevel();
+}
+
+RetCode BundleContext::setVirtualizerStrength(int strength) {
+ if (strength < 0 || strength > lvm::kVirtualizerCap.maxStrengthPm) {
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ // Update Control Parameter
+ LVM_ControlParams_t params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ params.CS_EffectLevel = ((strength * 32767) / 1000);
+
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+ }
+
+ mVirtStrengthSaved = strength;
+ LOG(INFO) << __func__ << " success with strength " << strength;
+ return limitLevel();
+}
+
void BundleContext::initControlParameter(LVM_ControlParams_t& params) const {
/* General parameters */
params.OperatingMode = LVM_MODE_ON;
@@ -296,7 +631,7 @@
static LVM_EQNB_BandDef_t* BandDefs = []() {
static LVM_EQNB_BandDef_t tempDefs[lvm::MAX_NUM_BANDS];
/* N-Band Equaliser parameters */
- for (int i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ for (std::size_t i = 0; i < lvm::MAX_NUM_BANDS; i++) {
tempDefs[i].Frequency = lvm::kPresetsFrequencies[i];
tempDefs[i].QFactor = lvm::kPresetsQFactors[i];
tempDefs[i].Gain = lvm::kSoftPresets[0/* normal */][i];
@@ -323,4 +658,141 @@
return HeadroomBandDef;
}
+IEffect::Status BundleContext::lvmProcess(float* in, float* out, int samples) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!in, status, "nullInput");
+ RETURN_VALUE_IF(!out, status, "nullOutput");
+ status = {EX_ILLEGAL_STATE, 0, 0};
+ int64_t inputFrameCount = getCommon().input.frameCount;
+ int64_t outputFrameCount = getCommon().output.frameCount;
+ RETURN_VALUE_IF(inputFrameCount != outputFrameCount, status, "FrameCountMismatch");
+ int isDataAvailable = true;
+
+ auto frameSize = getInputFrameSize();
+ RETURN_VALUE_IF(0 == frameSize, status, "zeroFrameSize");
+
+ LOG(DEBUG) << __func__ << " start processing";
+ if ((mEffectProcessCalled & 1 << int(mType)) != 0) {
+ const int undrainedEffects = mEffectInDrain & ~mEffectProcessCalled;
+ if ((undrainedEffects & 1 << int(lvm::BundleEffectType::EQUALIZER)) != 0) {
+ LOG(DEBUG) << "Draining EQUALIZER";
+ mSamplesToExitCountEq = 0;
+ --mNumberEffectsEnabled;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::EQUALIZER));
+ }
+ if ((undrainedEffects & 1 << int(lvm::BundleEffectType::BASS_BOOST)) != 0) {
+ LOG(DEBUG) << "Draining BASS_BOOST";
+ mSamplesToExitCountBb = 0;
+ --mNumberEffectsEnabled;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::BASS_BOOST));
+ }
+ if ((undrainedEffects & 1 << int(lvm::BundleEffectType::VIRTUALIZER)) != 0) {
+ LOG(DEBUG) << "Draining VIRTUALIZER";
+ mSamplesToExitCountVirt = 0;
+ --mNumberEffectsEnabled;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::VIRTUALIZER));
+ }
+ if ((undrainedEffects & 1 << int(lvm::BundleEffectType::VOLUME)) != 0) {
+ LOG(DEBUG) << "Draining VOLUME";
+ --mNumberEffectsEnabled;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::VOLUME));
+ }
+ }
+ mEffectProcessCalled |= 1 << int(mType);
+ if (!mEnabled) {
+ switch (mType) {
+ case lvm::BundleEffectType::EQUALIZER:
+ if (mSamplesToExitCountEq > 0) {
+ mSamplesToExitCountEq -= samples;
+ }
+ if (mSamplesToExitCountEq <= 0) {
+ isDataAvailable = false;
+ if ((mEffectInDrain & 1 << int(lvm::BundleEffectType::EQUALIZER)) != 0) {
+ mNumberEffectsEnabled--;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::EQUALIZER));
+ }
+ LOG(DEBUG) << "Effect_process() this is the last frame for EQUALIZER";
+ }
+ break;
+ case lvm::BundleEffectType::BASS_BOOST:
+ if (mSamplesToExitCountBb > 0) {
+ mSamplesToExitCountBb -= samples;
+ }
+ if (mSamplesToExitCountBb <= 0) {
+ isDataAvailable = false;
+ if ((mEffectInDrain & 1 << int(lvm::BundleEffectType::BASS_BOOST)) != 0) {
+ mNumberEffectsEnabled--;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::BASS_BOOST));
+ }
+ LOG(DEBUG) << "Effect_process() this is the last frame for BASS_BOOST";
+ }
+ break;
+ case lvm::BundleEffectType::VIRTUALIZER:
+ if (mSamplesToExitCountVirt > 0) {
+ mSamplesToExitCountVirt -= samples;
+ }
+ if (mSamplesToExitCountVirt <= 0) {
+ isDataAvailable = false;
+ if ((mEffectInDrain & 1 << int(lvm::BundleEffectType::VIRTUALIZER)) != 0) {
+ mNumberEffectsEnabled--;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::VIRTUALIZER));
+ }
+ LOG(DEBUG) << "Effect_process() this is the last frame for VIRTUALIZER";
+ }
+ break;
+ case lvm::BundleEffectType::VOLUME:
+ isDataAvailable = false;
+ if ((mEffectInDrain & 1 << int(lvm::BundleEffectType::VOLUME)) != 0) {
+ mNumberEffectsEnabled--;
+ mEffectInDrain &= ~(1 << int(lvm::BundleEffectType::VOLUME));
+ }
+ LOG(DEBUG) << "Effect_process() LVM_VOLUME Effect is not enabled";
+ break;
+ }
+ }
+ if (isDataAvailable) {
+ mNumberEffectsCalled++;
+ }
+ bool accumulate = false;
+ if (mNumberEffectsCalled >= mNumberEffectsEnabled) {
+ // We expect the # effects called to be equal to # effects enabled in sequence (including
+ // draining effects). Warn if this is not the case due to inconsistent calls.
+ ALOGW_IF(mNumberEffectsCalled > mNumberEffectsEnabled,
+ "%s Number of effects called %d is greater than number of effects enabled %d",
+ __func__, mNumberEffectsCalled, mNumberEffectsEnabled);
+ mEffectProcessCalled = 0; // reset our consistency check.
+ if (!isDataAvailable) {
+ LOG(DEBUG) << "Effect_process() processing last frame";
+ }
+ mNumberEffectsCalled = 0;
+ LVM_UINT16 frames = samples * sizeof(float) / frameSize;
+ float* outTmp = (accumulate ? getWorkBuffer() : out);
+ /* Process the samples */
+ LVM_ReturnStatus_en lvmStatus;
+ {
+ std::lock_guard lg(mMutex);
+ lvmStatus = LVM_Process(mInstance, in, outTmp, frames, 0);
+ if (lvmStatus != LVM_SUCCESS) {
+ LOG(ERROR) << __func__ << lvmStatus;
+ return {EX_UNSUPPORTED_OPERATION, 0, 0};
+ }
+ if (accumulate) {
+ for (int i = 0; i < samples; i++) {
+ out[i] += outTmp[i];
+ }
+ }
+ }
+ } else {
+ for (int i = 0; i < samples; i++) {
+ if (accumulate) {
+ out[i] += in[i];
+ } else {
+ out[i] = in[i];
+ }
+ }
+ }
+ LOG(DEBUG) << __func__ << " done processing";
+ return {STATUS_OK, samples, samples};
+}
+
} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleContext.h b/media/libeffects/lvm/wrapper/Aidl/BundleContext.h
index 616ab78..be723f7 100644
--- a/media/libeffects/lvm/wrapper/Aidl/BundleContext.h
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleContext.h
@@ -17,7 +17,9 @@
#pragma once
#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
#include <array>
+#include <cstddef>
#include "BundleTypes.h"
#include "effect-impl/EffectContext.h"
@@ -41,11 +43,11 @@
lvm::BundleEffectType getBundleType() const { return mType; }
RetCode enable();
+ RetCode enableOperatingMode();
RetCode disable();
+ RetCode disableOperatingMode();
- LVM_Handle_t getLvmInstance() const { return mInstance; }
-
- void setSampleRate (const int sampleRate) { mSampleRate = sampleRate; }
+ void setSampleRate(const int sampleRate) { mSampleRate = sampleRate; }
int getSampleRate() const { return mSampleRate; }
void setChannelMask(const aidl::android::media::audio::common::AudioChannelLayout& chMask) {
@@ -54,19 +56,42 @@
aidl::android::media::audio::common::AudioChannelLayout getChannelMask() const {
return mChMask;
}
+ bool isDeviceSupportedBassBoost(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device);
+ bool isDeviceSupportedVirtualizer(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device);
+ RetCode setOutputDevice(
+ const aidl::android::media::audio::common::AudioDeviceDescription& device) override;
- RetCode setEqualizerPreset(const int presetIdx);
+ RetCode setEqualizerPreset(const std::size_t presetIdx);
int getEqualizerPreset() const { return mCurPresetIdx; }
RetCode setEqualizerBandLevels(const std::vector<Equalizer::BandLevel>& bandLevels);
std::vector<Equalizer::BandLevel> getEqualizerBandLevels() const;
+ RetCode setBassBoostStrength(int strength);
+ int getBassBoostStrength() const { return mBassStrengthSaved; }
+
+ RetCode setVolumeLevel(int level);
+ int getVolumeLevel() const;
+
+ RetCode setVolumeMute(bool mute);
+ int getVolumeMute() const { return mMuteEnabled; }
+
+ RetCode setVirtualizerStrength(int strength);
+ int getVirtualizerStrength() const { return mVirtStrengthSaved; }
+
RetCode setVolumeStereo(const Parameter::VolumeStereo& volumeStereo) override;
- Parameter::VolumeStereo getVolumeStereo() override { return mVolumeStereo; };
+ Parameter::VolumeStereo getVolumeStereo() override { return mVolumeStereo; }
+
+ IEffect::Status lvmProcess(float* in, float* out, int samples);
+
+ IEffect::Status processEffect(float* in, float* out, int sampleToProcess);
private:
+ std::mutex mMutex;
const lvm::BundleEffectType mType;
bool mEnabled = false;
- LVM_Handle_t mInstance = nullptr;
+ LVM_Handle_t mInstance GUARDED_BY(mMutex);
aidl::android::media::audio::common::AudioDeviceDescription mVirtualizerForcedDevice;
aidl::android::media::audio::common::AudioChannelLayout mChMask;
@@ -87,7 +112,7 @@
int mEffectProcessCalled = 0;
int mNumberEffectsEnabled = 0;
int mNumberEffectsCalled = 0;
- bool mFirstVolume = false;
+ bool mFirstVolume = true;
// Bass
bool mBassTempDisabled = false;
int mBassStrengthSaved = 0;
@@ -99,10 +124,12 @@
bool mVirtualizerTempDisabled = false;
// Volume
int mLevelSaved = 0; /* for when mute is set, level must be saved */
+ int mVolume = 0;
bool mMuteEnabled = false; /* Must store as mute = -96dB level */
void initControlParameter(LVM_ControlParams_t& params) const;
void initHeadroomParameter(LVM_HeadroomParams_t& params) const;
+ RetCode limitLevel();
int16_t VolToDb(uint32_t vol) const;
LVM_INT16 LVC_ToDB_s32Tos16(LVM_INT32 Lin_fix) const;
RetCode updateControlParameter(const std::vector<Equalizer::BandLevel>& bandLevels);
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h b/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h
index 1772bd1..1996240 100644
--- a/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h
@@ -70,21 +70,75 @@
static const Equalizer::Capability kEqCap = {.bandFrequencies = kEqBandFrequency,
.presets = kEqPresets};
+static const std::string kEqualizerEffectName = "EqualizerBundle";
+
static const Descriptor kEqualizerDesc = {
- .common = {.id = {.type = EqualizerTypeUUID,
- .uuid = EqualizerBundleImplUUID,
- .proxy = std::nullopt},
+ .common = {.id = {.type = kEqualizerTypeUUID,
+ .uuid = kEqualizerBundleImplUUID,
+ .proxy = kEqualizerProxyUUID},
.flags = {.type = Flags::Type::INSERT,
.insert = Flags::Insert::FIRST,
.volume = Flags::Volume::CTRL},
- .name = "EqualizerBundle",
+ .name = kEqualizerEffectName,
.implementor = "NXP Software Ltd."},
.capability = Capability::make<Capability::equalizer>(kEqCap)};
-// TODO: add descriptors for other bundle effect types here.
-static const Descriptor kVirtualizerDesc;
-static const Descriptor kBassBoostDesc;
-static const Descriptor kVolumeDesc;
+static const bool mStrengthSupported = true;
+
+static const BassBoost::Capability kBassBoostCap = {.maxStrengthPm = 1000,
+ .strengthSupported = mStrengthSupported};
+
+static const std::string kBassBoostEffectName = "Dynamic Bass Boost";
+
+static const Descriptor kBassBoostDesc = {
+ .common = {.id = {.type = kBassBoostTypeUUID,
+ .uuid = kBassBoostBundleImplUUID,
+ .proxy = kBassBoostProxyUUID},
+ .flags = {.type = Flags::Type::INSERT,
+ .insert = Flags::Insert::FIRST,
+ .volume = Flags::Volume::CTRL,
+ .deviceIndication = true},
+ .cpuLoad = BASS_BOOST_CUP_LOAD_ARM9E,
+ .memoryUsage = BUNDLE_MEM_USAGE,
+ .name = kBassBoostEffectName,
+ .implementor = "NXP Software Ltd."},
+ .capability = Capability::make<Capability::bassBoost>(kBassBoostCap)};
+
+static const Virtualizer::Capability kVirtualizerCap = {.maxStrengthPm = 1000,
+ .strengthSupported = mStrengthSupported};
+
+static const std::string kVirtualizerEffectName = "Virtualizer";
+
+static const Descriptor kVirtualizerDesc = {
+ .common = {.id = {.type = kVirtualizerTypeUUID,
+ .uuid = kVirtualizerBundleImplUUID,
+ .proxy = kVirtualizerProxyUUID},
+ .flags = {.type = Flags::Type::INSERT,
+ .insert = Flags::Insert::LAST,
+ .volume = Flags::Volume::CTRL,
+ .deviceIndication = true},
+ .cpuLoad = VIRTUALIZER_CUP_LOAD_ARM9E,
+ .memoryUsage = BUNDLE_MEM_USAGE,
+ .name = kVirtualizerEffectName,
+ .implementor = "NXP Software Ltd."},
+ .capability = Capability::make<Capability::virtualizer>(kVirtualizerCap)};
+
+static const Volume::Capability kVolumeCap = {.minLevelDb = -9600, .maxLevelDb = 0};
+
+static const std::string kVolumeEffectName = "Volume";
+
+static const Descriptor kVolumeDesc = {
+ .common = {.id = {.type = kVolumeTypeUUID,
+ .uuid = kVolumeBundleImplUUID,
+ .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::INSERT,
+ .insert = Flags::Insert::LAST,
+ .volume = Flags::Volume::CTRL},
+ .cpuLoad = VOLUME_CUP_LOAD_ARM9E,
+ .memoryUsage = BUNDLE_MEM_USAGE,
+ .name = kVolumeEffectName,
+ .implementor = "NXP Software Ltd."},
+ .capability = Capability::make<Capability::volume>(kVolumeCap)};
/* The following tables have been computed using the actual levels measured by the output of
* white noise or pink noise (IEC268-1) for the EQ and BassBoost Effects. These are estimates of
diff --git a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
index 569356f..81b8aca 100644
--- a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
+++ b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
@@ -29,15 +29,24 @@
#include <LVM.h>
#include <limits.h>
+using aidl::android::hardware::audio::effect::Descriptor;
using aidl::android::hardware::audio::effect::EffectBundleAidl;
-using aidl::android::hardware::audio::effect::EqualizerBundleImplUUID;
using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::kBassBoostBundleImplUUID;
+using aidl::android::hardware::audio::effect::kEqualizerBundleImplUUID;
+using aidl::android::hardware::audio::effect::kVirtualizerBundleImplUUID;
+using aidl::android::hardware::audio::effect::kVolumeBundleImplUUID;
using aidl::android::hardware::audio::effect::State;
using aidl::android::media::audio::common::AudioUuid;
+bool isUuidSupported(const AudioUuid* uuid) {
+ return (*uuid == kEqualizerBundleImplUUID || *uuid == kBassBoostBundleImplUUID ||
+ *uuid == kVirtualizerBundleImplUUID || *uuid == kVolumeBundleImplUUID);
+}
+
extern "C" binder_exception_t createEffect(const AudioUuid* uuid,
std::shared_ptr<IEffect>* instanceSpp) {
- if (uuid == nullptr || *uuid != EqualizerBundleImplUUID) {
+ if (uuid == nullptr || !isUuidSupported(uuid)) {
LOG(ERROR) << __func__ << "uuid not supported";
return EX_ILLEGAL_ARGUMENT;
}
@@ -51,19 +60,20 @@
}
}
-extern "C" binder_exception_t destroyEffect(const std::shared_ptr<IEffect>& instanceSp) {
- if (!instanceSp) {
- LOG(ERROR) << __func__ << "nullInstance";
+extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
+ if (!in_impl_uuid || !isUuidSupported(in_impl_uuid)) {
+ LOG(ERROR) << __func__ << "uuid not supported";
return EX_ILLEGAL_ARGUMENT;
}
- State state;
- ndk::ScopedAStatus status = instanceSp->getState(&state);
- if (!status.isOk() || State::INIT != state) {
- LOG(ERROR) << __func__ << " instance " << instanceSp.get()
- << " in state: " << toString(state) << ", status: " << status.getDescription();
- return EX_ILLEGAL_STATE;
+ if (*in_impl_uuid == kEqualizerBundleImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::lvm::kEqualizerDesc;
+ } else if (*in_impl_uuid == kBassBoostBundleImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::lvm:: kBassBoostDesc;
+ } else if (*in_impl_uuid == kVirtualizerBundleImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::lvm::kVirtualizerDesc;
+ } else if (*in_impl_uuid == kVolumeBundleImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::lvm::kVolumeDesc;
}
- LOG(DEBUG) << __func__ << " instance " << instanceSp.get() << " destroyed";
return EX_NONE;
}
@@ -71,17 +81,29 @@
EffectBundleAidl::EffectBundleAidl(const AudioUuid& uuid) {
LOG(DEBUG) << __func__ << uuid.toString();
- if (uuid == EqualizerBundleImplUUID) {
+ if (uuid == kEqualizerBundleImplUUID) {
mType = lvm::BundleEffectType::EQUALIZER;
mDescriptor = &lvm::kEqualizerDesc;
+ mEffectName = &lvm::kEqualizerEffectName;
+ } else if (uuid == kBassBoostBundleImplUUID) {
+ mType = lvm::BundleEffectType::BASS_BOOST;
+ mDescriptor = &lvm::kBassBoostDesc;
+ mEffectName = &lvm::kBassBoostEffectName;
+ } else if (uuid == kVirtualizerBundleImplUUID) {
+ mType = lvm::BundleEffectType::VIRTUALIZER;
+ mDescriptor = &lvm::kVirtualizerDesc;
+ mEffectName = &lvm::kVirtualizerEffectName;
+ } else if (uuid == kVolumeBundleImplUUID) {
+ mType = lvm::BundleEffectType::VOLUME;
+ mDescriptor = &lvm::kVolumeDesc;
+ mEffectName = &lvm::kVolumeEffectName;
} else {
- // TODO: add other bundle effect types here.
- LOG(ERROR) << __func__ << uuid.toString() << " not supported yet!";
+ LOG(ERROR) << __func__ << uuid.toString() << " not supported!";
}
}
EffectBundleAidl::~EffectBundleAidl() {
- releaseContext();
+ cleanUp();
LOG(DEBUG) << __func__;
}
@@ -93,8 +115,8 @@
}
ndk::ScopedAStatus EffectBundleAidl::setParameterCommon(const Parameter& param) {
- std::lock_guard lg(mMutex);
RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
auto tag = param.getTag();
switch (tag) {
case Parameter::common:
@@ -130,53 +152,128 @@
ndk::ScopedAStatus EffectBundleAidl::setParameterSpecific(const Parameter::Specific& specific) {
LOG(DEBUG) << __func__ << " specific " << specific.toString();
- auto tag = specific.getTag();
- RETURN_IF(tag != Parameter::Specific::equalizer, EX_ILLEGAL_ARGUMENT,
- "specificParamNotSupported");
- RETURN_IF(mContext == nullptr, EX_NULL_POINTER , "nullContext");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ auto tag = specific.getTag();
+ switch (tag) {
+ case Parameter::Specific::equalizer:
+ return setParameterEqualizer(specific);
+ case Parameter::Specific::bassBoost:
+ return setParameterBassBoost(specific);
+ case Parameter::Specific::virtualizer:
+ return setParameterVirtualizer(specific);
+ case Parameter::Specific::volume:
+ return setParameterVolume(specific);
+ default:
+ LOG(ERROR) << __func__ << " unsupported tag " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "specificParamNotSupported");
+ }
+}
+
+ndk::ScopedAStatus EffectBundleAidl::setParameterEqualizer(const Parameter::Specific& specific) {
auto& eq = specific.get<Parameter::Specific::equalizer>();
auto eqTag = eq.getTag();
switch (eqTag) {
case Equalizer::preset:
RETURN_IF(mContext->setEqualizerPreset(eq.get<Equalizer::preset>()) != RetCode::SUCCESS,
EX_ILLEGAL_ARGUMENT, "setBandLevelsFailed");
- break;
+ return ndk::ScopedAStatus::ok();
case Equalizer::bandLevels:
RETURN_IF(mContext->setEqualizerBandLevels(eq.get<Equalizer::bandLevels>()) !=
RetCode::SUCCESS,
EX_ILLEGAL_ARGUMENT, "setBandLevelsFailed");
- break;
+ return ndk::ScopedAStatus::ok();
default:
LOG(ERROR) << __func__ << " unsupported parameter " << specific.toString();
return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
"eqTagNotSupported");
}
- return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectBundleAidl::setParameterBassBoost(const Parameter::Specific& specific) {
+ auto& bb = specific.get<Parameter::Specific::bassBoost>();
+ auto bbTag = bb.getTag();
+ switch (bbTag) {
+ case BassBoost::strengthPm: {
+ RETURN_IF(mContext->setBassBoostStrength(bb.get<BassBoost::strengthPm>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setStrengthFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ default:
+ LOG(ERROR) << __func__ << " unsupported parameter " << specific.toString();
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "bbTagNotSupported");
+ }
+}
+
+ndk::ScopedAStatus EffectBundleAidl::setParameterVirtualizer(const Parameter::Specific& specific) {
+ auto& vr = specific.get<Parameter::Specific::virtualizer>();
+ auto vrTag = vr.getTag();
+ switch (vrTag) {
+ case Virtualizer::strengthPm: {
+ RETURN_IF(mContext->setVirtualizerStrength(vr.get<Virtualizer::strengthPm>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setStrengthFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ default:
+ LOG(ERROR) << __func__ << " unsupported parameter " << specific.toString();
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "vrTagNotSupported");
+ }
+}
+
+ndk::ScopedAStatus EffectBundleAidl::setParameterVolume(const Parameter::Specific& specific) {
+ auto& vol = specific.get<Parameter::Specific::volume>();
+ auto volTag = vol.getTag();
+ switch (volTag) {
+ case Volume::levelDb: {
+ RETURN_IF(mContext->setVolumeLevel(vol.get<Volume::levelDb>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setLevelFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case Volume::mute:
+ RETURN_IF(mContext->setVolumeMute(vol.get<Volume::mute>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setMuteFailed");
+ return ndk::ScopedAStatus::ok();
+ default:
+ LOG(ERROR) << __func__ << " unsupported parameter " << specific.toString();
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "volTagNotSupported");
+ }
}
ndk::ScopedAStatus EffectBundleAidl::getParameterSpecific(const Parameter::Id& id,
Parameter::Specific* specific) {
RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
auto tag = id.getTag();
- RETURN_IF(Parameter::Id::equalizerTag != tag, EX_ILLEGAL_ARGUMENT, "wrongIdTag");
- auto eqId = id.get<Parameter::Id::equalizerTag>();
- auto eqIdTag = eqId.getTag();
- switch (eqIdTag) {
- case Equalizer::Id::commonTag:
- return getParameterEqualizer(eqId.get<Equalizer::Id::commonTag>(), specific);
+
+ switch (tag) {
+ case Parameter::Id::equalizerTag:
+ return getParameterEqualizer(id.get<Parameter::Id::equalizerTag>(), specific);
+ case Parameter::Id::bassBoostTag:
+ return getParameterBassBoost(id.get<Parameter::Id::bassBoostTag>(), specific);
+ case Parameter::Id::virtualizerTag:
+ return getParameterVirtualizer(id.get<Parameter::Id::virtualizerTag>(), specific);
+ case Parameter::Id::volumeTag:
+ return getParameterVolume(id.get<Parameter::Id::volumeTag>(), specific);
default:
- LOG(ERROR) << __func__ << " tag " << toString(eqIdTag) << " not supported";
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
- "EqualizerTagNotSupported");
+ "wrongIdTag");
}
}
-ndk::ScopedAStatus EffectBundleAidl::getParameterEqualizer(const Equalizer::Tag& tag,
+ndk::ScopedAStatus EffectBundleAidl::getParameterEqualizer(const Equalizer::Id& id,
Parameter::Specific* specific) {
- std::lock_guard lg(mMutex);
+ RETURN_IF(id.getTag() != Equalizer::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "EqualizerTagNotSupported");
RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
Equalizer eqParam;
+
+ auto tag = id.get<Equalizer::Id::commonTag>();
switch (tag) {
case Equalizer::bandLevels: {
eqParam.set<Equalizer::bandLevels>(mContext->getEqualizerBandLevels());
@@ -197,15 +294,96 @@
return ndk::ScopedAStatus::ok();
}
+ndk::ScopedAStatus EffectBundleAidl::getParameterBassBoost(const BassBoost::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != BassBoost::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "BassBoostTagNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ BassBoost bbParam;
+
+ auto tag = id.get<BassBoost::Id::commonTag>();
+ switch (tag) {
+ case BassBoost::strengthPm: {
+ bbParam.set<BassBoost::strengthPm>(mContext->getBassBoostStrength());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " not handled tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "BassBoostTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::bassBoost>(bbParam);
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectBundleAidl::getParameterVolume(const Volume::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != Volume::Id::commonTag, EX_ILLEGAL_ARGUMENT, "VolumeTagNotSupported");
+
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ Volume volParam;
+
+ auto tag = id.get<Volume::Id::commonTag>();
+ switch (tag) {
+ case Volume::levelDb: {
+ volParam.set<Volume::levelDb>(mContext->getVolumeLevel());
+ break;
+ }
+ case Volume::mute: {
+ volParam.set<Volume::mute>(mContext->getVolumeMute());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " not handled tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "VolumeTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::volume>(volParam);
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectBundleAidl::getParameterVirtualizer(const Virtualizer::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != Virtualizer::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "VirtualizerTagNotSupported");
+
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ Virtualizer vrParam;
+
+ auto tag = id.get<Virtualizer::Id::commonTag>();
+ switch (tag) {
+ case Virtualizer::strengthPm: {
+ vrParam.set<Virtualizer::strengthPm>(mContext->getVirtualizerStrength());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " not handled tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "VirtualizerTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::virtualizer>(vrParam);
+ return ndk::ScopedAStatus::ok();
+}
+
std::shared_ptr<EffectContext> EffectBundleAidl::createContext(const Parameter::Common& common) {
if (mContext) {
LOG(DEBUG) << __func__ << " context already exist";
- return mContext;
+ } else {
+ // GlobalSession is a singleton
+ mContext = GlobalSession::getGlobalSession().createSession(mType, 1 /* statusFmqDepth */,
+ common);
}
- // GlobalSession is a singleton
- mContext =
- GlobalSession::getGlobalSession().createSession(mType, 1 /* statusFmqDepth */, common);
+ return mContext;
+}
+
+std::shared_ptr<EffectContext> EffectBundleAidl::getContext() {
return mContext;
}
@@ -217,29 +395,32 @@
return RetCode::SUCCESS;
}
+ndk::ScopedAStatus EffectBundleAidl::commandImpl(CommandId command) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ switch (command) {
+ case CommandId::START:
+ mContext->enable();
+ break;
+ case CommandId::STOP:
+ mContext->disable();
+ break;
+ case CommandId::RESET:
+ mContext->disable();
+ mContext->resetBuffer();
+ break;
+ default:
+ LOG(ERROR) << __func__ << " commandId " << toString(command) << " not supported";
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commandIdNotSupported");
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
// Processing method running in EffectWorker thread.
IEffect::Status EffectBundleAidl::effectProcessImpl(float* in, float* out, int sampleToProcess) {
- LOG(DEBUG) << __func__ << " in " << in << " out " << out << " sample " << sampleToProcess;
- if (!mContext) {
- LOG(ERROR) << __func__ << " nullContext";
- return {EX_NULL_POINTER, 0, 0};
- }
-
- auto frameSize = mContext->getInputFrameSize();
- if (0 == frameSize) {
- LOG(ERROR) << __func__ << " frameSizeIs0";
- return {EX_ILLEGAL_ARGUMENT, 0, 0};
- }
-
- LOG(DEBUG) << __func__ << " start processing";
- LVM_UINT16 frames = sampleToProcess * sizeof(float) / frameSize;
- LVM_ReturnStatus_en lvmStatus = LVM_Process(mContext->getLvmInstance(), in, out, frames, 0);
- if (lvmStatus != LVM_SUCCESS) {
- LOG(ERROR) << __func__ << lvmStatus;
- return {EX_UNSUPPORTED_OPERATION, 0, 0};
- }
- LOG(DEBUG) << __func__ << " done processing";
- return {STATUS_OK, sampleToProcess, sampleToProcess};
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!mContext, status, "nullContext");
+ return mContext->lvmProcess(in, out, sampleToProcess);
}
} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.h b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.h
index 5fdf301..0330e5a 100644
--- a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.h
+++ b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.h
@@ -18,7 +18,6 @@
#include <functional>
#include <map>
#include <memory>
-#include <mutex>
#include <aidl/android/hardware/audio/effect/BnEffect.h>
#include <android-base/logging.h>
@@ -42,32 +41,37 @@
ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
Parameter::Specific* specific) override;
- IEffect::Status effectProcessImpl(float *in, float *out, int process) override;
std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+ std::shared_ptr<EffectContext> getContext() override;
RetCode releaseContext() override;
- ndk::ScopedAStatus commandStart() override {
- mContext->enable();
- return ndk::ScopedAStatus::ok();
- }
- ndk::ScopedAStatus commandStop() override {
- mContext->disable();
- return ndk::ScopedAStatus::ok();
- }
- ndk::ScopedAStatus commandReset() override {
- mContext->disable();
- return ndk::ScopedAStatus::ok();
- }
+ IEffect::Status effectProcessImpl(float* in, float* out, int samples) override;
+
+ ndk::ScopedAStatus commandImpl(CommandId command) override;
+
+ std::string getEffectName() override { return *mEffectName; }
private:
- const Descriptor* mDescriptor;
- lvm::BundleEffectType mType = lvm::BundleEffectType::EQUALIZER;
std::shared_ptr<BundleContext> mContext;
+ const Descriptor* mDescriptor;
+ const std::string* mEffectName;
+ lvm::BundleEffectType mType = lvm::BundleEffectType::EQUALIZER;
IEffect::Status status(binder_status_t status, size_t consumed, size_t produced);
- ndk::ScopedAStatus getParameterEqualizer(const Equalizer::Tag& tag,
+
+ ndk::ScopedAStatus setParameterBassBoost(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterBassBoost(const BassBoost::Id& id,
Parameter::Specific* specific);
+
+ ndk::ScopedAStatus setParameterEqualizer(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterEqualizer(const Equalizer::Id& id,
+ Parameter::Specific* specific);
+ ndk::ScopedAStatus setParameterVolume(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterVolume(const Volume::Id& id, Parameter::Specific* specific);
+ ndk::ScopedAStatus setParameterVirtualizer(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterVirtualizer(const Virtualizer::Id& id,
+ Parameter::Specific* specific);
};
} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Aidl/GlobalSession.h b/media/libeffects/lvm/wrapper/Aidl/GlobalSession.h
index 9226274..d31763b 100644
--- a/media/libeffects/lvm/wrapper/Aidl/GlobalSession.h
+++ b/media/libeffects/lvm/wrapper/Aidl/GlobalSession.h
@@ -21,6 +21,7 @@
#include <unordered_map>
#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
#include "BundleContext.h"
#include "BundleTypes.h"
@@ -40,7 +41,10 @@
return instance;
}
- bool isSessionIdExist(int sessionId) const { return mSessionMap.count(sessionId); }
+ bool isSessionIdExist(int sessionId) {
+ std::lock_guard lg(mMutex);
+ return mSessionMap.count(sessionId);
+ }
static bool findBundleTypeInList(std::vector<std::shared_ptr<BundleContext>>& list,
const lvm::BundleEffectType& type, bool remove = false) {
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index a32188a..aef9295 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -122,6 +122,9 @@
shared_libs: [
"liblog",
],
+ cflags: [
+ "-Wthread-safety",
+ ],
visibility: [
"//hardware/interfaces/audio/aidl/default",
],
diff --git a/media/libeffects/preprocessing/PreProcessing.cpp b/media/libeffects/preprocessing/PreProcessing.cpp
index 61a2bf5..59f1fc3 100644
--- a/media/libeffects/preprocessing/PreProcessing.cpp
+++ b/media/libeffects/preprocessing/PreProcessing.cpp
@@ -102,7 +102,8 @@
uint32_t state; // current state (enum preproc_session_state)
int id; // audio session ID
int io; // handle of input stream this session is on
- webrtc::AudioProcessing* apm; // handle on webRTC audio processing module (APM)
+ rtc::scoped_refptr<webrtc::AudioProcessing>
+ apm; // handle on webRTC audio processing module (APM)
// Audio Processing module builder
webrtc::AudioProcessingBuilder ap_builder;
// frameCount represents the size of the buffers used for processing, and must represent 10ms.
@@ -260,9 +261,6 @@
ALOGV("Agc2Init");
effect->session->config = effect->session->apm->GetConfig();
effect->session->config.gain_controller2.fixed_digital.gain_db = 0.f;
- effect->session->config.gain_controller2.adaptive_digital.level_estimator =
- effect->session->config.gain_controller2.kRms;
- effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db = 2.f;
effect->session->apm->ApplyConfig(effect->session->config);
return 0;
}
@@ -332,24 +330,19 @@
ALOGV("Agc2GetParameter() target level %f dB", *(float*)pValue);
break;
case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
- *(uint32_t*)pValue = (uint32_t)(
- effect->session->config.gain_controller2.adaptive_digital.level_estimator);
- ALOGV("Agc2GetParameter() level estimator %d",
- *(webrtc::AudioProcessing::Config::GainController2::LevelEstimator*)pValue);
+ // WebRTC only supports RMS level estimator now
+ *(uint32_t*)pValue = (uint32_t)(0);
+ ALOGV("Agc2GetParameter() level estimator RMS");
break;
case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
- *(float*)pValue = (float)(effect->session->config.gain_controller2.adaptive_digital
- .extra_saturation_margin_db);
+ *(float*)pValue = (float)(2.0);
ALOGV("Agc2GetParameter() extra saturation margin %f dB", *(float*)pValue);
break;
case AGC2_PARAM_PROPERTIES:
pProperties->fixedDigitalGain =
(float)(effect->session->config.gain_controller2.fixed_digital.gain_db);
- pProperties->level_estimator = (uint32_t)(
- effect->session->config.gain_controller2.adaptive_digital.level_estimator);
- pProperties->extraSaturationMargin =
- (float)(effect->session->config.gain_controller2.adaptive_digital
- .extra_saturation_margin_db);
+ pProperties->level_estimator = 0;
+ pProperties->extraSaturationMargin = 2.0;
break;
default:
ALOGW("Agc2GetParameter() unknown param %d", param);
@@ -438,16 +431,19 @@
effect->session->config.gain_controller2.fixed_digital.gain_db = valueFloat;
break;
case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR:
- ALOGV("Agc2SetParameter() level estimator %d",
- *(webrtc::AudioProcessing::Config::GainController2::LevelEstimator*)pValue);
- effect->session->config.gain_controller2.adaptive_digital.level_estimator =
- (*(webrtc::AudioProcessing::Config::GainController2::LevelEstimator*)pValue);
+ ALOGV("Agc2SetParameter() level estimator %d", *(uint32_t*)pValue);
+ if (*(uint32_t*)pValue != 0) {
+ // only RMS is supported
+ status = -EINVAL;
+ }
break;
case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN:
valueFloat = (float)(*(int32_t*)pValue);
ALOGV("Agc2SetParameter() extra saturation margin %f dB", valueFloat);
- effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
- valueFloat;
+ if (valueFloat != 2.0) {
+ // extra_staturation_margin_db is no longer configurable in webrtc
+ status = -EINVAL;
+ }
break;
case AGC2_PARAM_PROPERTIES:
ALOGV("Agc2SetParameter() properties gain %f, level %d margin %f",
@@ -455,11 +451,9 @@
pProperties->extraSaturationMargin);
effect->session->config.gain_controller2.fixed_digital.gain_db =
pProperties->fixedDigitalGain;
- effect->session->config.gain_controller2.adaptive_digital.level_estimator =
- (webrtc::AudioProcessing::Config::GainController2::LevelEstimator)
- pProperties->level_estimator;
- effect->session->config.gain_controller2.adaptive_digital.extra_saturation_margin_db =
- pProperties->extraSaturationMargin;
+ if (pProperties->level_estimator != 0 || pProperties->extraSaturationMargin != 2.0) {
+ status = -EINVAL;
+ }
break;
default:
ALOGW("Agc2SetParameter() unknown param %08x value %08x", param, *(uint32_t*)pValue);
@@ -879,8 +873,8 @@
error:
if (session->createdMsk == 0) {
- delete session->apm;
- session->apm = NULL;
+ // Scoped_refptr will handle reference counting here
+ session->apm = nullptr;
}
return status;
}
@@ -889,8 +883,8 @@
ALOGW_IF(Effect_Release(fx) != 0, " Effect_Release() failed for proc ID %d", fx->procId);
session->createdMsk &= ~(1 << fx->procId);
if (session->createdMsk == 0) {
- delete session->apm;
- session->apm = NULL;
+ // Scoped_refptr will handle reference counting here
+ session->apm = nullptr;
session->id = 0;
}
diff --git a/media/libeffects/preprocessing/tests/EffectPreprocessingTest.cpp b/media/libeffects/preprocessing/tests/EffectPreprocessingTest.cpp
index 07006a1..8fdb864 100644
--- a/media/libeffects/preprocessing/tests/EffectPreprocessingTest.cpp
+++ b/media/libeffects/preprocessing/tests/EffectPreprocessingTest.cpp
@@ -143,10 +143,6 @@
const AGC2Params* agc2Params = ¶ms->agc2Params;
ASSERT_NO_FATAL_FAILURE(
effect.setParam(AGC2_PARAM_FIXED_DIGITAL_GAIN, agc2Params->fixedDigitalGain));
- ASSERT_NO_FATAL_FAILURE(effect.setParam(AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR,
- agc2Params->adaptDigiLevelEstimator));
- ASSERT_NO_FATAL_FAILURE(effect.setParam(AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN,
- agc2Params->extraSaturationMargin));
} else if (isAECEffect(uuid)) {
const AECParams* aecParams = ¶ms->aecParams;
ASSERT_NO_FATAL_FAILURE(effect.setParam(AEC_PARAM_ECHO_DELAY, aecParams->echoDelay));
diff --git a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
index 3bd93f8..03400d7 100644
--- a/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
+++ b/media/libeffects/preprocessing/tests/PreProcessingTest.cpp
@@ -400,20 +400,6 @@
ALOGE("Invalid AGC2 Fixed Digital Gain. Error %d\n", status);
return EXIT_FAILURE;
}
- if (int status = preProcSetConfigParam(AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR,
- (uint32_t)preProcCfgParams.agc2Level,
- effectHandle[PREPROC_AGC2]);
- status != 0) {
- ALOGE("Invalid AGC2 Level Estimator. Error %d\n", status);
- return EXIT_FAILURE;
- }
- if (int status = preProcSetConfigParam(AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN,
- (float)preProcCfgParams.agc2SaturationMargin,
- effectHandle[PREPROC_AGC2]);
- status != 0) {
- ALOGE("Invalid AGC2 Saturation Margin. Error %d\n", status);
- return EXIT_FAILURE;
- }
}
if (effectEn[PREPROC_NS]) {
if (int status = preProcSetConfigParam(NS_PARAM_LEVEL, (uint32_t)preProcCfgParams.nsLevel,
diff --git a/media/libeffects/visualizer/Android.bp b/media/libeffects/visualizer/Android.bp
index 8dd6789..fb9d7b7 100644
--- a/media/libeffects/visualizer/Android.bp
+++ b/media/libeffects/visualizer/Android.bp
@@ -18,34 +18,59 @@
],
}
-cc_library_shared {
- name: "libvisualizer",
-
+cc_defaults {
+ name: "visualizer_defaults",
vendor: true,
-
- srcs: [
- "EffectVisualizer.cpp",
- ],
-
cflags: [
- "-O2",
- "-fvisibility=hidden",
-
- "-DBUILD_FLOAT",
+ "-DBUILD_FLOAT", // TODO: remove BUILD_FLOAT and SUPPORT_MC in lvm libs
"-DSUPPORT_MC",
-
"-Wall",
"-Werror",
],
-
shared_libs: [
"liblog",
],
-
- relative_install_path: "soundfx",
-
header_libs: [
"libaudioeffects",
"libaudioutils_headers",
],
}
+
+cc_library_shared {
+ name: "libvisualizer",
+ defaults: [
+ "visualizer_defaults",
+ ],
+ srcs: [
+ "EffectVisualizer.cpp",
+ ],
+ relative_install_path: "soundfx",
+ cflags: [
+ "-O2",
+ "-fvisibility=hidden",
+ ],
+}
+
+cc_library_shared {
+ name: "libvisualizeraidl",
+ srcs: [
+ "aidl/Visualizer.cpp",
+ "aidl/VisualizerContext.cpp",
+ ":effectCommonFile",
+ ],
+ defaults: [
+ "aidlaudioeffectservice_defaults",
+ "latest_android_hardware_audio_effect_ndk_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
+ "visualizer_defaults",
+ ],
+ cflags: [
+ "-Wthread-safety",
+ ],
+ shared_libs: [
+ "libcutils",
+ ],
+ visibility: [
+ "//hardware/interfaces/audio/aidl/default",
+ ],
+}
diff --git a/media/libeffects/visualizer/aidl/Visualizer.cpp b/media/libeffects/visualizer/aidl/Visualizer.cpp
new file mode 100644
index 0000000..28a7287
--- /dev/null
+++ b/media/libeffects/visualizer/aidl/Visualizer.cpp
@@ -0,0 +1,258 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_VisualizerLibEffects"
+
+#include <android-base/logging.h>
+#include "Visualizer.h"
+
+using aidl::android::hardware::audio::effect::Descriptor;
+using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::VisualizerImpl;
+using aidl::android::hardware::audio::effect::kVisualizerImplUUID;
+using aidl::android::hardware::audio::effect::State;
+using aidl::android::media::audio::common::AudioUuid;
+
+extern "C" binder_exception_t createEffect(const AudioUuid* in_impl_uuid,
+ std::shared_ptr<IEffect>* instanceSpp) {
+ if (!in_impl_uuid || *in_impl_uuid != kVisualizerImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (instanceSpp) {
+ *instanceSpp = ndk::SharedRefBase::make<VisualizerImpl>();
+ LOG(DEBUG) << __func__ << " instance " << instanceSpp->get() << " created";
+ return EX_NONE;
+ } else {
+ LOG(ERROR) << __func__ << " invalid input parameter!";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+}
+
+extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
+ if (!in_impl_uuid || *in_impl_uuid != kVisualizerImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ *_aidl_return = VisualizerImpl::kDescriptor;
+ return EX_NONE;
+}
+
+namespace aidl::android::hardware::audio::effect {
+
+const std::string VisualizerImpl::kEffectName = "Visualizer";
+const Visualizer::Capability VisualizerImpl::kCapability = {
+ .maxLatencyMs = VisualizerContext::kMaxLatencyMs,
+ .captureSampleRange = {.min = 0, .max = VisualizerContext::kMaxCaptureBufSize}};
+const Descriptor VisualizerImpl::kDescriptor = {
+ .common = {.id = {.type = kVisualizerTypeUUID,
+ .uuid = kVisualizerImplUUID,
+ .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::INSERT,
+ .insert = Flags::Insert::LAST,
+ .volume = Flags::Volume::CTRL},
+ .name = VisualizerImpl::kEffectName,
+ .implementor = "The Android Open Source Project"},
+ .capability = Capability::make<Capability::visualizer>(VisualizerImpl::kCapability)};
+
+ndk::ScopedAStatus VisualizerImpl::getDescriptor(Descriptor* _aidl_return) {
+ RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
+ LOG(DEBUG) << __func__ << kDescriptor.toString();
+ *_aidl_return = kDescriptor;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus VisualizerImpl::commandImpl(CommandId command) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ switch (command) {
+ case CommandId::START:
+ mContext->enable();
+ break;
+ case CommandId::STOP:
+ mContext->disable();
+ break;
+ case CommandId::RESET:
+ mContext->disable();
+ mContext->resetBuffer();
+ break;
+ default:
+ LOG(ERROR) << __func__ << " commandId " << toString(command) << " not supported";
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commandIdNotSupported");
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus VisualizerImpl::setOnlyParameter(
+ const Visualizer::SetOnlyParameters& param) {
+ auto tag = param.getTag();
+ switch (tag) {
+ case Visualizer::SetOnlyParameters::latencyMs: {
+ RETURN_IF(mContext->setDownstreamLatency(
+ param.get<Visualizer::SetOnlyParameters::latencyMs>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setLatencyFailed");
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "setOnlyParameterTagNotSupported");
+ }
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus VisualizerImpl::setParameterSpecific(const Parameter::Specific& specific) {
+ RETURN_IF(Parameter::Specific::visualizer != specific.getTag(), EX_ILLEGAL_ARGUMENT,
+ "EffectNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto& param = specific.get<Parameter::Specific::visualizer>();
+ const auto tag = param.getTag();
+ switch (tag) {
+ case Visualizer::captureSamples: {
+ RETURN_IF(mContext->setCaptureSamples(param.get<Visualizer::captureSamples>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setCaptureSizeFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case Visualizer::scalingMode: {
+ RETURN_IF(mContext->setScalingMode(param.get<Visualizer::scalingMode>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setScalingModeFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case Visualizer::measurementMode: {
+ RETURN_IF(mContext->setMeasurementMode(param.get<Visualizer::measurementMode>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setMeasurementModeFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case Visualizer::setOnlyParameters: {
+ return setOnlyParameter(param.get<Visualizer::setOnlyParameters>());
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "VisualizerTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus VisualizerImpl::getOnlyParameter(const Visualizer::GetOnlyParameters::Tag tag,
+ Parameter::Specific* specific) {
+ Visualizer visualizer;
+ Visualizer::GetOnlyParameters param;
+ switch (tag) {
+ case Visualizer::GetOnlyParameters::measurement: {
+ param.set<Visualizer::GetOnlyParameters::measurement>(mContext->getMeasure());
+ break;
+ }
+ case Visualizer::GetOnlyParameters::captureSampleBuffer: {
+ param.set<Visualizer::GetOnlyParameters::captureSampleBuffer>(mContext->capture());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "setOnlyParameterTagNotSupported");
+ }
+ }
+ visualizer.set<Visualizer::getOnlyParameters>(param);
+ specific->set<Parameter::Specific::visualizer>(visualizer);
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus VisualizerImpl::getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
+ auto tag = id.getTag();
+ RETURN_IF(Parameter::Id::visualizerTag != tag, EX_ILLEGAL_ARGUMENT, "wrongIdTag");
+ auto specificId = id.get<Parameter::Id::visualizerTag>();
+ auto specificTag = specificId.getTag();
+ switch (specificTag) {
+ case Visualizer::Id::commonTag: {
+ return getParameterVisualizer(specificId.get<Visualizer::Id::commonTag>(), specific);
+ }
+ case Visualizer::Id::getOnlyParamTag: {
+ return getOnlyParameter(specificId.get<Visualizer::Id::getOnlyParamTag>(), specific);
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(specificTag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "VisualizerTagNotSupported");
+ }
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus VisualizerImpl::getParameterVisualizer(const Visualizer::Tag& tag,
+ Parameter::Specific* specific) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ Visualizer param;
+ switch (tag) {
+ case Visualizer::captureSamples: {
+ param.set<Visualizer::captureSamples>(mContext->getCaptureSamples());
+ break;
+ }
+ case Visualizer::scalingMode: {
+ param.set<Visualizer::scalingMode>(mContext->getScalingMode());
+ break;
+ }
+ case Visualizer::measurementMode: {
+ param.set<Visualizer::measurementMode>(mContext->getMeasurementMode());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "VisualizerTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::visualizer>(param);
+ return ndk::ScopedAStatus::ok();
+}
+
+std::shared_ptr<EffectContext> VisualizerImpl::createContext(const Parameter::Common& common) {
+ if (mContext) {
+ LOG(DEBUG) << __func__ << " context already exist";
+ return mContext;
+ }
+
+ mContext = std::make_shared<VisualizerContext>(1 /* statusFmqDepth */, common);
+ mContext->initParams(common);
+ return mContext;
+}
+
+RetCode VisualizerImpl::releaseContext() {
+ if (mContext) {
+ mContext->disable();
+ mContext->resetBuffer();
+ }
+ return RetCode::SUCCESS;
+}
+
+// Processing method running in EffectWorker thread.
+IEffect::Status VisualizerImpl::effectProcessImpl(float* in, float* out, int samples) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!mContext, status, "nullContext");
+ return mContext->process(in, out, samples);
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/visualizer/aidl/Visualizer.h b/media/libeffects/visualizer/aidl/Visualizer.h
new file mode 100644
index 0000000..5908d9a
--- /dev/null
+++ b/media/libeffects/visualizer/aidl/Visualizer.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+
+#include "effect-impl/EffectImpl.h"
+#include "effect-impl/EffectUUID.h"
+
+#include "VisualizerContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class VisualizerImpl final : public EffectImpl {
+ public:
+ static const std::string kEffectName;
+ static const Visualizer::Capability kCapability;
+ static const Descriptor kDescriptor;
+ VisualizerImpl() { LOG(DEBUG) << __func__; }
+ ~VisualizerImpl() {
+ cleanUp();
+ LOG(DEBUG) << __func__;
+ }
+
+ ndk::ScopedAStatus commandImpl(CommandId command) override;
+ ndk::ScopedAStatus getDescriptor(Descriptor* _aidl_return) override;
+ ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
+ ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) override;
+ IEffect::Status effectProcessImpl(float* in, float* out, int process) override;
+ std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+ RetCode releaseContext() override;
+
+ std::shared_ptr<EffectContext> getContext() override { return mContext; }
+ std::string getEffectName() override { return kEffectName; }
+
+ private:
+ std::shared_ptr<VisualizerContext> mContext;
+ ndk::ScopedAStatus getParameterVisualizer(const Visualizer::Tag& tag,
+ Parameter::Specific* specific);
+ ndk::ScopedAStatus setOnlyParameter(const Visualizer::SetOnlyParameters& param);
+ ndk::ScopedAStatus getOnlyParameter(const Visualizer::GetOnlyParameters::Tag tag,
+ Parameter::Specific* specific);
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/visualizer/aidl/VisualizerContext.cpp b/media/libeffects/visualizer/aidl/VisualizerContext.cpp
new file mode 100644
index 0000000..1965e0e
--- /dev/null
+++ b/media/libeffects/visualizer/aidl/VisualizerContext.cpp
@@ -0,0 +1,331 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "VisualizerContext.h"
+
+#include <algorithm>
+#include <android/binder_status.h>
+#include <audio_utils/primitives.h>
+#include <math.h>
+#include <system/audio.h>
+#include <time.h>
+#include <Utils.h>
+
+#ifndef BUILD_FLOAT
+ #error AIDL Visualizer only support float 32bits, make sure add cflags -DBUILD_FLOAT,
+#endif
+
+using android::hardware::audio::common::getChannelCount;
+
+namespace aidl::android::hardware::audio::effect {
+
+VisualizerContext::VisualizerContext(int statusDepth, const Parameter::Common& common)
+ : EffectContext(statusDepth, common) {
+}
+
+VisualizerContext::~VisualizerContext() {
+ std::lock_guard lg(mMutex);
+ LOG(DEBUG) << __func__;
+ mState = State::UNINITIALIZED;
+}
+
+RetCode VisualizerContext::initParams(const Parameter::Common& common) {
+ std::lock_guard lg(mMutex);
+ LOG(DEBUG) << __func__;
+ if (common.input != common.output) {
+ LOG(ERROR) << __func__ << " mismatch input: " << common.input.toString()
+ << " and output: " << common.output.toString();
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ mState = State::INITIALIZED;
+ auto channelCount = getChannelCount(common.input.base.channelMask);
+#ifdef SUPPORT_MC
+ if (channelCount < 1 || channelCount > FCC_LIMIT) return RetCode::ERROR_ILLEGAL_PARAMETER;
+#else
+ if (channelCount != FCC_2) return RetCode::ERROR_ILLEGAL_PARAMETER;
+#endif
+ mChannelCount = channelCount;
+ mCommon = common;
+ return RetCode::SUCCESS;
+}
+
+RetCode VisualizerContext::enable() {
+ std::lock_guard lg(mMutex);
+ if (mState != State::INITIALIZED) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = State::ACTIVE;
+ return RetCode::SUCCESS;
+}
+
+RetCode VisualizerContext::disable() {
+ std::lock_guard lg(mMutex);
+ if (mState != State::ACTIVE) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = State::INITIALIZED;
+ return RetCode::SUCCESS;
+}
+
+void VisualizerContext::reset() {
+ std::lock_guard lg(mMutex);
+ std::fill_n(mCaptureBuf.begin(), kMaxCaptureBufSize, 0x80);
+}
+
+RetCode VisualizerContext::setCaptureSamples(int samples) {
+ std::lock_guard lg(mMutex);
+ if (samples < 0 || (unsigned)samples > kMaxCaptureBufSize) {
+ LOG(ERROR) << __func__ << " captureSamples " << samples << " exceed valid range: 0 - "
+ << kMaxCaptureBufSize;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+ mCaptureSamples = samples;
+ return RetCode::SUCCESS;
+}
+int VisualizerContext::getCaptureSamples() {
+ std::lock_guard lg(mMutex);
+ return mCaptureSamples;
+}
+
+RetCode VisualizerContext::setMeasurementMode(Visualizer::MeasurementMode mode) {
+ std::lock_guard lg(mMutex);
+ mMeasurementMode = mode;
+ return RetCode::SUCCESS;
+}
+Visualizer::MeasurementMode VisualizerContext::getMeasurementMode() {
+ std::lock_guard lg(mMutex);
+ return mMeasurementMode;
+}
+
+RetCode VisualizerContext::setScalingMode(Visualizer::ScalingMode mode) {
+ std::lock_guard lg(mMutex);
+ mScalingMode = mode;
+ return RetCode::SUCCESS;
+}
+Visualizer::ScalingMode VisualizerContext::getScalingMode() {
+ std::lock_guard lg(mMutex);
+ return mScalingMode;
+}
+
+RetCode VisualizerContext::setDownstreamLatency(int latency) {
+ if (latency < 0 || (unsigned)latency > kMaxLatencyMs) {
+ LOG(ERROR) << __func__ << " latency " << latency << " exceed valid range: 0 - "
+ << kMaxLatencyMs;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+ std::lock_guard lg(mMutex);
+ mDownstreamLatency = latency;
+ return RetCode::SUCCESS;
+}
+
+uint32_t VisualizerContext::getDeltaTimeMsFromUpdatedTime_l() {
+ uint32_t deltaMs = 0;
+ if (mBufferUpdateTime.tv_sec != 0) {
+ struct timespec ts;
+ if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) {
+ time_t secs = ts.tv_sec - mBufferUpdateTime.tv_sec;
+ long nsec = ts.tv_nsec - mBufferUpdateTime.tv_nsec;
+ if (nsec < 0) {
+ --secs;
+ nsec += 1000000000;
+ }
+ deltaMs = secs * 1000 + nsec / 1000000;
+ }
+ }
+ return deltaMs;
+}
+
+Visualizer::GetOnlyParameters::Measurement VisualizerContext::getMeasure() {
+ uint16_t peakU16 = 0;
+ float sumRmsSquared = 0.0f;
+ uint8_t nbValidMeasurements = 0;
+
+ {
+ std::lock_guard lg(mMutex);
+ // reset measurements if last measurement was too long ago (which implies stored
+ // measurements aren't relevant anymore and shouldn't bias the new one)
+ const uint32_t delayMs = getDeltaTimeMsFromUpdatedTime_l();
+ if (delayMs > kDiscardMeasurementsTimeMs) {
+ LOG(INFO) << __func__ << " Discarding " << delayMs << " ms old measurements";
+ for (uint32_t i = 0; i < mMeasurementWindowSizeInBuffers; i++) {
+ mPastMeasurements[i].mIsValid = false;
+ mPastMeasurements[i].mPeakU16 = 0;
+ mPastMeasurements[i].mRmsSquared = 0;
+ }
+ mMeasurementBufferIdx = 0;
+ } else {
+ // only use actual measurements, otherwise the first RMS measure happening before
+ // MEASUREMENT_WINDOW_MAX_SIZE_IN_BUFFERS have been played will always be artificially
+ // low
+ for (uint32_t i = 0; i < mMeasurementWindowSizeInBuffers; i++) {
+ if (mPastMeasurements[i].mIsValid) {
+ if (mPastMeasurements[i].mPeakU16 > peakU16) {
+ peakU16 = mPastMeasurements[i].mPeakU16;
+ }
+ sumRmsSquared += mPastMeasurements[i].mRmsSquared;
+ nbValidMeasurements++;
+ }
+ }
+ }
+ }
+
+ float rms = nbValidMeasurements == 0 ? 0.0f : sqrtf(sumRmsSquared / nbValidMeasurements);
+ Visualizer::GetOnlyParameters::Measurement measure;
+ // convert from I16 sample values to mB and write results
+ measure.rms = (rms < 0.000016f) ? -9600 : (int32_t)(2000 * log10(rms / 32767.0f));
+ measure.peak = (peakU16 == 0) ? -9600 : (int32_t)(2000 * log10(peakU16 / 32767.0f));
+ LOG(INFO) << __func__ << " peak " << peakU16 << " (" << measure.peak << "mB), rms " << rms
+ << " (" << measure.rms << "mB)";
+ return measure;
+}
+
+std::vector<uint8_t> VisualizerContext::capture() {
+ std::vector<uint8_t> result;
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(mState != State::ACTIVE, result, "illegalState");
+ const uint32_t deltaMs = getDeltaTimeMsFromUpdatedTime_l();
+
+ // if audio framework has stopped playing audio although the effect is still active we must
+ // clear the capture buffer to return silence
+ if ((mLastCaptureIdx == mCaptureIdx) && (mBufferUpdateTime.tv_sec != 0) &&
+ (deltaMs > kMaxStallTimeMs)) {
+ LOG(INFO) << __func__ << " capture going to idle";
+ mBufferUpdateTime.tv_sec = 0;
+ return result;
+ }
+ int32_t latencyMs = mDownstreamLatency;
+ latencyMs -= deltaMs;
+ if (latencyMs < 0) {
+ latencyMs = 0;
+ }
+ uint32_t deltaSamples = mCaptureSamples + mCommon.input.base.sampleRate * latencyMs / 1000;
+
+ // large sample rate, latency, or capture size, could cause overflow.
+ // do not offset more than the size of buffer.
+ if (deltaSamples > kMaxCaptureBufSize) {
+ android_errorWriteLog(0x534e4554, "31781965");
+ deltaSamples = kMaxCaptureBufSize;
+ }
+
+ int32_t capturePoint;
+ //capturePoint = (int32_t)mCaptureIdx - deltaSamples;
+ __builtin_sub_overflow((int32_t) mCaptureIdx, deltaSamples, &capturePoint);
+ // a negative capturePoint means we wrap the buffer.
+ if (capturePoint < 0) {
+ uint32_t size = -capturePoint;
+ if (size > mCaptureSamples) {
+ size = mCaptureSamples;
+ }
+ result.insert(result.end(), &mCaptureBuf[kMaxCaptureBufSize + capturePoint],
+ &mCaptureBuf[kMaxCaptureBufSize + capturePoint + size]);
+ mCaptureSamples -= size;
+ capturePoint = 0;
+ }
+ result.insert(result.end(), &mCaptureBuf[capturePoint],
+ &mCaptureBuf[capturePoint + mCaptureSamples]);
+ mLastCaptureIdx = mCaptureIdx;
+ return result;
+}
+
+IEffect::Status VisualizerContext::process(float* in, float* out, int samples) {
+ IEffect::Status result = {STATUS_NOT_ENOUGH_DATA, 0, 0};
+ RETURN_VALUE_IF(in == nullptr || out == nullptr || samples == 0, result, "dataBufferError");
+
+ std::lock_guard lg(mMutex);
+ result.status = STATUS_INVALID_OPERATION;
+ RETURN_VALUE_IF(mState != State::ACTIVE, result, "stateNotActive");
+ LOG(DEBUG) << __func__ << " in " << in << " out " << out << " sample " << samples;
+ // perform measurements if needed
+ if (mMeasurementMode == Visualizer::MeasurementMode::PEAK_RMS) {
+ // find the peak and RMS squared for the new buffer
+ float rmsSqAcc = 0;
+ float maxSample = 0.f;
+ for (size_t inIdx = 0; inIdx < (unsigned)samples; ++inIdx) {
+ maxSample = fmax(maxSample, fabs(in[inIdx]));
+ rmsSqAcc += in[inIdx] * in[inIdx];
+ }
+ maxSample *= 1 << 15; // scale to int16_t, with exactly 1 << 15 representing positive num.
+ rmsSqAcc *= 1 << 30; // scale to int16_t * 2
+ mPastMeasurements[mMeasurementBufferIdx] = {
+ .mPeakU16 = (uint16_t)maxSample,
+ .mRmsSquared = rmsSqAcc / samples,
+ .mIsValid = true };
+ if (++mMeasurementBufferIdx >= mMeasurementWindowSizeInBuffers) {
+ mMeasurementBufferIdx = 0;
+ }
+ }
+
+ float fscale; // multiplicative scale
+ if (mScalingMode == Visualizer::ScalingMode::NORMALIZED) {
+ // derive capture scaling factor from peak value in current buffer
+ // this gives more interesting captures for display.
+ float maxSample = 0.f;
+ for (size_t inIdx = 0; inIdx < (unsigned)samples; ) {
+ // we reconstruct the actual summed value to ensure proper normalization
+ // for multichannel outputs (channels > 2 may often be 0).
+ float smp = 0.f;
+ for (int i = 0; i < mChannelCount; ++i) {
+ smp += in[inIdx++];
+ }
+ maxSample = fmax(maxSample, fabs(smp));
+ }
+ if (maxSample > 0.f) {
+ fscale = 0.99f / maxSample;
+ int exp; // unused
+ const float significand = frexp(fscale, &exp);
+ if (significand == 0.5f) {
+ fscale *= 255.f / 256.f; // avoid returning unaltered PCM signal
+ }
+ } else {
+ // scale doesn't matter, the values are all 0.
+ fscale = 1.f;
+ }
+ } else {
+ assert(mScalingMode == Visualizer::ScalingMode::AS_PLAYED);
+ // Note: if channels are uncorrelated, 1/sqrt(N) could be used at the risk of clipping.
+ fscale = 1.f / mChannelCount; // account for summing all the channels together.
+ }
+
+ uint32_t captIdx;
+ uint32_t inIdx;
+ for (inIdx = 0, captIdx = mCaptureIdx; inIdx < (unsigned)samples; captIdx++) {
+ // wrap
+ if (captIdx >= kMaxCaptureBufSize) {
+ captIdx = 0;
+ }
+
+ float smp = 0.f;
+ for (uint32_t i = 0; i < mChannelCount; ++i) {
+ smp += in[inIdx++];
+ }
+ mCaptureBuf[captIdx] = clamp8_from_float(smp * fscale);
+ }
+
+ // the following two should really be atomic, though it probably doesn't
+ // matter much for visualization purposes
+ mCaptureIdx = captIdx;
+ // update last buffer update time stamp
+ if (clock_gettime(CLOCK_MONOTONIC, &mBufferUpdateTime) < 0) {
+ mBufferUpdateTime.tv_sec = 0;
+ }
+
+ // TODO: handle access_mode
+ memcpy(out, in, samples * sizeof(float));
+ return {STATUS_OK, samples, samples};
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/visualizer/aidl/VisualizerContext.h b/media/libeffects/visualizer/aidl/VisualizerContext.h
new file mode 100644
index 0000000..bfda0b9
--- /dev/null
+++ b/media/libeffects/visualizer/aidl/VisualizerContext.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <audio_effects/effect_dynamicsprocessing.h>
+
+#include "effect-impl/EffectContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class VisualizerContext final : public EffectContext {
+ public:
+ static const uint32_t kMaxCaptureBufSize = 65536;
+ static const uint32_t kMaxLatencyMs = 3000; // 3 seconds of latency for audio pipeline
+
+ VisualizerContext(int statusDepth, const Parameter::Common& common);
+ ~VisualizerContext();
+
+ RetCode initParams(const Parameter::Common& common);
+
+ RetCode enable();
+ RetCode disable();
+ // keep all parameters and reset buffer.
+ void reset();
+
+ RetCode setCaptureSamples(int captureSize);
+ int getCaptureSamples();
+ RetCode setMeasurementMode(Visualizer::MeasurementMode mode);
+ Visualizer::MeasurementMode getMeasurementMode();
+ RetCode setScalingMode(Visualizer::ScalingMode mode);
+ Visualizer::ScalingMode getScalingMode();
+ RetCode setDownstreamLatency(int latency);
+
+ IEffect::Status process(float* in, float* out, int samples);
+ // Gets the current measurements, measured by process() and consumed by getParameter()
+ Visualizer::GetOnlyParameters::Measurement getMeasure();
+ // Gets the latest PCM capture, data captured by process() and consumed by getParameter()
+ std::vector<uint8_t> capture();
+
+ struct BufferStats {
+ bool mIsValid;
+ uint16_t mPeakU16; // the positive peak of the absolute value of the samples in a buffer
+ float mRmsSquared; // the average square of the samples in a buffer
+ };
+
+ enum State {
+ UNINITIALIZED,
+ INITIALIZED,
+ ACTIVE,
+ };
+
+ private:
+ // maximum time since last capture buffer update before resetting capture buffer. This means
+ // that the framework has stopped playing audio and we must start returning silence
+ static const uint32_t kMaxStallTimeMs = 1000;
+ // discard measurements older than this number of ms
+ static const uint32_t kDiscardMeasurementsTimeMs = 2000;
+ // maximum number of buffers for which we keep track of the measurements
+ // note: buffer index is stored in uint8_t
+ static const uint32_t kMeasurementWindowMaxSizeInBuffers = 25;
+
+ // serialize process() and parameter setting
+ std::mutex mMutex;
+ Parameter::Common mCommon GUARDED_BY(mMutex);
+ State mState GUARDED_BY(mMutex) = State::UNINITIALIZED;
+ uint32_t mCaptureIdx GUARDED_BY(mMutex) = 0;
+ uint32_t mLastCaptureIdx GUARDED_BY(mMutex) = 0;
+ Visualizer::ScalingMode mScalingMode GUARDED_BY(mMutex) = Visualizer::ScalingMode::NORMALIZED;
+ struct timespec mBufferUpdateTime GUARDED_BY(mMutex);
+ // capture buf with 8 bits PCM
+ std::array<uint8_t, kMaxCaptureBufSize> mCaptureBuf GUARDED_BY(mMutex);
+ uint32_t mDownstreamLatency GUARDED_BY(mMutex) = 0;
+ uint32_t mCaptureSamples GUARDED_BY(mMutex) = kMaxCaptureBufSize;
+
+ // to avoid recomputing it every time a buffer is processed
+ uint8_t mChannelCount GUARDED_BY(mMutex) = 0;
+ Visualizer::MeasurementMode mMeasurementMode GUARDED_BY(mMutex) =
+ Visualizer::MeasurementMode::NONE;
+ uint8_t mMeasurementWindowSizeInBuffers = kMeasurementWindowMaxSizeInBuffers;
+ uint8_t mMeasurementBufferIdx GUARDED_BY(mMutex) = 0;
+ std::array<BufferStats, kMeasurementWindowMaxSizeInBuffers> mPastMeasurements;
+ void init_params();
+
+ uint32_t getDeltaTimeMsFromUpdatedTime_l() REQUIRES(mMutex);
+};
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
index 2fc9ff9..101b825 100644
--- a/media/libheadtracking/HeadTrackingProcessor.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -88,10 +88,12 @@
}
void calculate(int64_t timestamp) override {
- // Handle the screen first, since it might trigger a recentering of the head.
+ bool screenStable = true;
+
+ // Handle the screen first, since it might: trigger a recentering of the head.
if (mWorldToScreenTimestamp.has_value()) {
const Pose3f worldToLogicalScreen = mScreenPoseBias.getOutput();
- bool screenStable = mScreenStillnessDetector.calculate(timestamp);
+ screenStable = mScreenStillnessDetector.calculate(timestamp);
mModeSelector.setScreenStable(mWorldToScreenTimestamp.value(), screenStable);
// Whenever the screen is unstable, recenter the head pose.
if (!screenStable) {
@@ -105,7 +107,8 @@
if (mWorldToHeadTimestamp.has_value()) {
Pose3f worldToHead = mHeadPoseBias.getOutput();
// Auto-recenter.
- if (mHeadStillnessDetector.calculate(timestamp)) {
+ bool headStable = mHeadStillnessDetector.calculate(timestamp);
+ if (headStable || !screenStable) {
recenter(true, false);
worldToHead = mHeadPoseBias.getOutput();
}
diff --git a/media/libheadtracking/SensorPoseProvider.cpp b/media/libheadtracking/SensorPoseProvider.cpp
index bd8af04..31d469c 100644
--- a/media/libheadtracking/SensorPoseProvider.cpp
+++ b/media/libheadtracking/SensorPoseProvider.cpp
@@ -42,6 +42,7 @@
// Identifier to use for our event queue on the loop.
// The number 19 is arbitrary, only useful if using multiple objects on the same looper.
+// Note: Instead of a fixed number, the SensorEventQueue's fd could be used instead.
constexpr int kIdent = 19;
static inline Looper* ALooper_to_Looper(ALooper* alooper) {
@@ -60,7 +61,8 @@
EventQueueGuard(const sp<SensorEventQueue>& queue, Looper* looper) : mQueue(queue) {
mQueue->looper = Looper_to_ALooper(looper);
mQueue->requestAdditionalInfo = false;
- looper->addFd(mQueue->getFd(), kIdent, ALOOPER_EVENT_INPUT, nullptr, nullptr);
+ looper->addFd(mQueue->getFd(), kIdent, ALOOPER_EVENT_INPUT,
+ nullptr /* callback */, nullptr /* data */);
}
~EventQueueGuard() {
@@ -75,7 +77,7 @@
[[nodiscard]] SensorEventQueue* get() const { return mQueue.get(); }
private:
- sp<SensorEventQueue> mQueue;
+ const sp<SensorEventQueue> mQueue;
};
/**
@@ -95,10 +97,7 @@
}
}
- SensorEnableGuard(const SensorEnableGuard&) = delete;
- SensorEnableGuard& operator=(const SensorEnableGuard&) = delete;
-
- // Enable moving.
+ // Enable move and delete default copy-ctor/copy-assignment.
SensorEnableGuard(SensorEnableGuard&& other) : mQueue(other.mQueue), mSensor(other.mSensor) {
other.mSensor = SensorPoseProvider::INVALID_HANDLE;
}
@@ -122,6 +121,7 @@
~SensorPoseProviderImpl() override {
// Disable all active sensors.
mEnabledSensors.clear();
+ mQuit = true;
mLooper->wake();
mThread.join();
}
@@ -130,7 +130,7 @@
// Figure out the sensor's data format.
DataFormat format = getSensorFormat(sensor);
if (format == DataFormat::kUnknown) {
- ALOGE("Unknown format for sensor %" PRId32, sensor);
+ ALOGE("%s: Unknown format for sensor %" PRId32, __func__, sensor);
return false;
}
@@ -144,17 +144,19 @@
// Enable the sensor.
if (mQueue->enableSensor(sensor, samplingPeriod.count(), 0, 0)) {
- ALOGE("Failed to enable sensor");
+ ALOGE("%s: Failed to enable sensor %" PRId32, __func__, sensor);
std::lock_guard lock(mMutex);
mEnabledSensorsExtra.erase(sensor);
return false;
}
- mEnabledSensors.emplace(sensor, SensorEnableGuard(mQueue.get(), sensor));
+ mEnabledSensors.emplace(sensor, SensorEnableGuard(mQueue, sensor));
+ ALOGD("%s: Sensor %" PRId32 " started", __func__, sensor);
return true;
}
void stopSensor(int handle) override {
+ ALOGD("%s: Sensor %" PRId32 " stopped", __func__, handle);
mEnabledSensors.erase(handle);
std::lock_guard lock(mMutex);
mEnabledSensorsExtra.erase(handle);
@@ -217,13 +219,14 @@
std::optional<int32_t> discontinuityCount;
};
+ bool mQuit = false;
sp<Looper> mLooper;
Listener* const mListener;
SensorManager* const mSensorManager;
std::timed_mutex mMutex;
+ sp<SensorEventQueue> mQueue;
std::map<int32_t, SensorEnableGuard> mEnabledSensors;
std::map<int32_t, SensorExtra> mEnabledSensorsExtra GUARDED_BY(mMutex);
- sp<SensorEventQueue> mQueue;
// We must do some of the initialization operations on the worker thread, because the API relies
// on the thread-local looper. In addition, as a matter of convenience, we store some of the
@@ -244,7 +247,13 @@
bool waitInitFinished() { return mInitPromise.get_future().get(); }
void threadFunc() {
- // Obtain looper.
+ // Name our std::thread to help identification. As is, canCallJava == false.
+ androidSetThreadName("SensorPoseProvider-looper");
+
+ // Run at the highest non-realtime priority.
+ androidSetThreadPriority(gettid(), PRIORITY_URGENT_AUDIO);
+
+ // The looper is started on the created std::thread.
mLooper = Looper::prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
// Create event queue.
@@ -260,20 +269,28 @@
initFinished(true);
- while (true) {
- int ret = mLooper->pollOnce(-1 /* no timeout */, nullptr, nullptr, nullptr);
+ while (!mQuit) {
+ const int ret = mLooper->pollOnce(-1 /* no timeout */, nullptr /* outFd */,
+ nullptr /* outEvents */, nullptr /* outData */);
switch (ret) {
case ALOOPER_POLL_WAKE:
- // Normal way to exit.
- return;
+ // Continue to see if mQuit flag is set.
+ // This can be spurious (due to bugreport being taken).
+ continue;
case kIdent:
// Possible events on our queue.
break;
default:
- ALOGE("Unexpected status out of Looper::pollOnce: %d", ret);
+ // Besides WAKE and kIdent, there should be no timeouts, callbacks,
+ // ALOOPER_POLL_ERROR, or other events.
+ // Exit now to avoid high frequency log spam on error,
+ // e.g. if the fd becomes invalid (b/31093485).
+ ALOGE("%s: Unexpected status out of Looper::pollOnce: %d", __func__, ret);
+ mQuit = true;
+ continue;
}
// Process an event.
@@ -285,7 +302,8 @@
ssize_t size = mQueue->filterEvents(&event, actual);
if (size < 0 || size > 1) {
- ALOGE("Unexpected return value from SensorEventQueue::filterEvents: %zd", size);
+ ALOGE("%s: Unexpected return value from SensorEventQueue::filterEvents: %zd",
+ __func__, size);
break;
}
if (size == 0) {
@@ -295,6 +313,7 @@
handleEvent(event);
}
+ ALOGD("%s: Exiting sensor event loop", __func__);
}
void handleEvent(const ASensorEvent& event) {
diff --git a/media/libheadtracking/StillnessDetector.cpp b/media/libheadtracking/StillnessDetector.cpp
index be7c893..5232084 100644
--- a/media/libheadtracking/StillnessDetector.cpp
+++ b/media/libheadtracking/StillnessDetector.cpp
@@ -26,6 +26,9 @@
mFifo.clear();
mWindowFull = false;
mSuppressionDeadline.reset();
+ // A "true" state indicates stillness is detected (default = true)
+ mCurrentState = true;
+ mPreviousState = true;
}
void StillnessDetector::setInput(int64_t timestamp, const Pose3f& input) {
@@ -33,7 +36,15 @@
discardOld(timestamp);
}
+bool StillnessDetector::getPreviousState() const {
+ return mPreviousState;
+}
+
bool StillnessDetector::calculate(int64_t timestamp) {
+ // Move the current stillness state to the previous state.
+ // This allows us to detect transitions into and out of stillness.
+ mPreviousState = mCurrentState;
+
discardOld(timestamp);
// Check whether all the poses in the queue are in the proximity of the new one. We want to do
@@ -60,15 +71,17 @@
// If the window has not been full, return the default value.
if (!mWindowFull) {
- return mOptions.defaultValue;
+ mCurrentState = mOptions.defaultValue;
}
-
// Force "in motion" while the suppression deadline is active.
- if (mSuppressionDeadline.has_value()) {
- return false;
+ else if (mSuppressionDeadline.has_value()) {
+ mCurrentState = false;
+ }
+ else {
+ mCurrentState = !moved;
}
- return !moved;
+ return mCurrentState;
}
void StillnessDetector::discardOld(int64_t timestamp) {
diff --git a/media/libheadtracking/StillnessDetector.h b/media/libheadtracking/StillnessDetector.h
index ee4b2d8..abd7cc4 100644
--- a/media/libheadtracking/StillnessDetector.h
+++ b/media/libheadtracking/StillnessDetector.h
@@ -80,7 +80,8 @@
void setInput(int64_t timestamp, const Pose3f& input);
/** Calculate whether the stream is still at the given timestamp. */
bool calculate(int64_t timestamp);
-
+ /** Return the stillness state from the previous call to calculate() */
+ bool getPreviousState() const;
private:
struct TimestampedPose {
int64_t timestamp;
@@ -92,6 +93,8 @@
const float mCosHalfRotationalThreshold;
std::deque<TimestampedPose> mFifo;
bool mWindowFull = false;
+ bool mCurrentState = true;
+ bool mPreviousState = true;
// As soon as motion is detected, this will be set for the time of detection + window duration,
// and during this time we will always consider outselves in motion without checking. This is
// used for hyteresis purposes, since because of the approximate method we use for determining
diff --git a/media/libmedia/tests/codeclist/CodecListTest.cpp b/media/libmedia/tests/codeclist/CodecListTest.cpp
index bd2adf7..75adcca 100644
--- a/media/libmedia/tests/codeclist/CodecListTest.cpp
+++ b/media/libmedia/tests/codeclist/CodecListTest.cpp
@@ -18,6 +18,8 @@
#define LOG_TAG "CodecListTest"
#include <utils/Log.h>
+#include <memory>
+
#include <gtest/gtest.h>
#include <binder/Parcel.h>
@@ -194,16 +196,15 @@
}
}
- Parcel *codecInfoParcel = new Parcel();
+ std::unique_ptr<Parcel> codecInfoParcel(new Parcel());
ASSERT_NE(codecInfoParcel, nullptr) << "Unable to create parcel";
- status_t status = info->writeToParcel(codecInfoParcel);
+ status_t status = info->writeToParcel(codecInfoParcel.get());
ASSERT_EQ(status, OK) << "Writing to parcel failed";
codecInfoParcel->setDataPosition(0);
sp<MediaCodecInfo> parcelCodecInfo = info->FromParcel(*codecInfoParcel);
ASSERT_NE(parcelCodecInfo, nullptr) << "CodecInfo from parcel is null";
- delete codecInfoParcel;
EXPECT_STREQ(info->getCodecName(), parcelCodecInfo->getCodecName())
<< "Returned codec name in info doesn't match";
diff --git a/media/libmediaplayerservice/StagefrightRecorder.cpp b/media/libmediaplayerservice/StagefrightRecorder.cpp
index ea1fdf4..a0bc8ca 100644
--- a/media/libmediaplayerservice/StagefrightRecorder.cpp
+++ b/media/libmediaplayerservice/StagefrightRecorder.cpp
@@ -1966,6 +1966,10 @@
format->setString("mime", MEDIA_MIMETYPE_VIDEO_HEVC);
break;
+ case VIDEO_ENCODER_DOLBY_VISION:
+ format->setString("mime", MEDIA_MIMETYPE_VIDEO_DOLBY_VISION);
+ break;
+
default:
CHECK(!"Should not be here, unsupported video encoding.");
break;
diff --git a/media/libmediaplayerservice/nuplayer/AWakeLock.cpp b/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
index c3bd207..25a8ae4 100644
--- a/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
+++ b/media/libmediaplayerservice/nuplayer/AWakeLock.cpp
@@ -59,11 +59,10 @@
if (mPowerManager != NULL) {
sp<IBinder> binder = new BBinder();
int64_t token = IPCThreadState::self()->clearCallingIdentity();
- binder::Status status = mPowerManager->acquireWakeLock(
+ binder::Status status = mPowerManager->acquireWakeLockAsync(
binder, POWERMANAGER_PARTIAL_WAKE_LOCK,
String16("AWakeLock"), String16("media"),
- {} /* workSource */, {} /* historyTag */, -1 /* displayId */,
- nullptr /* callback */);
+ {} /* workSource */, {} /* historyTag */);
IPCThreadState::self()->restoreCallingIdentity(token);
if (status.isOk()) {
mWakeLockToken = binder;
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
index 6788b56..5e29b3f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
@@ -191,8 +191,8 @@
uint8_t key[kBlockSize],
uint8_t iv[kBlockSize],
CryptoPlugin::Mode mode,
- size_t *clearbytes,
- size_t *encryptedbytes)
+ uint32_t *clearbytes,
+ uint32_t *encryptedbytes)
{
// size needed to store all the crypto data
size_t cryptosize;
@@ -236,7 +236,7 @@
if (!meta.findData(kKeyEncryptedSizes, &type, &crypteddata, &cryptedsize)) {
return NULL;
}
- size_t numSubSamples = cryptedsize / sizeof(size_t);
+ size_t numSubSamples = cryptedsize / sizeof(uint32_t);
if (numSubSamples <= 0) {
ALOGE("getSampleCryptoInfo INVALID numSubSamples: %zu", numSubSamples);
@@ -285,8 +285,8 @@
(uint8_t*) key,
(uint8_t*) iv,
(CryptoPlugin::Mode)mode,
- (size_t*) cleardata,
- (size_t*) crypteddata);
+ (uint32_t*) cleardata,
+ (uint32_t*) crypteddata);
}
} // namespace android
diff --git a/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDrm.h b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDrm.h
index 4360656..232638c 100644
--- a/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDrm.h
+++ b/media/libmediaplayerservice/nuplayer/include/nuplayer/NuPlayerDrm.h
@@ -106,8 +106,8 @@
uint8_t key[kBlockSize],
uint8_t iv[kBlockSize],
CryptoPlugin::Mode mode,
- size_t *clearbytes,
- size_t *encryptedbytes);
+ uint32_t *clearbytes,
+ uint32_t *encryptedbytes);
static CryptoInfo *getSampleCryptoInfo(MetaDataBase &meta);
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index ce4b4e6..42815b3 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -86,11 +86,14 @@
displayHeight = height;
}
- if (allocRotated && (rotationAngle == 90 || rotationAngle == 270)) {
- int32_t tmp;
- tmp = width; width = height; height = tmp;
- tmp = displayWidth; displayWidth = displayHeight; displayHeight = tmp;
- tmp = tileWidth; tileWidth = tileHeight; tileHeight = tmp;
+ if (allocRotated) {
+ if (rotationAngle == 90 || rotationAngle == 270) {
+ // swap width and height for 90 & 270 degrees rotation
+ std::swap(width, height);
+ std::swap(displayWidth, displayHeight);
+ std::swap(tileWidth, tileHeight);
+ }
+ // Rotation is already applied.
rotationAngle = 0;
}
diff --git a/media/libstagefright/HevcUtils.cpp b/media/libstagefright/HevcUtils.cpp
index 5f9c20e..60df162 100644
--- a/media/libstagefright/HevcUtils.cpp
+++ b/media/libstagefright/HevcUtils.cpp
@@ -102,10 +102,11 @@
static bool findParam(uint32_t key, T *param,
KeyedVector<uint32_t, uint64_t> ¶ms) {
CHECK(param);
- if (params.indexOfKey(key) < 0) {
+ ssize_t index = params.indexOfKey(key);
+ if (index < 0) {
return false;
}
- *param = (T) params[key];
+ *param = (T) params[index];
return true;
}
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 52bef62..e995931 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -3534,6 +3534,17 @@
}
setState(STARTED);
postPendingRepliesAndDeferredMessages("kWhatStartCompleted");
+
+ // Now that the codec has started, configure, by default, the peek behavior to
+ // be undefined for backwards compatibility with older releases. Later, if an
+ // app explicitly enables or disables peek, the parameter will be turned off and
+ // the legacy undefined behavior is disallowed.
+ // See updateTunnelPeek called in onSetParameters for more details.
+ if (mTunneled && mTunnelPeekState == TunnelPeekState::kLegacyMode) {
+ sp<AMessage> params = new AMessage;
+ params->setInt32("android._tunnel-peek-set-legacy", 1);
+ mCodec->signalSetParameters(params);
+ }
break;
}
@@ -3973,14 +3984,6 @@
mTunneled = false;
}
- // If mTunnelPeekState is still in kLegacyMode at this point,
- // configure the codec in legacy mode
- if (mTunneled && (mTunnelPeekState == TunnelPeekState::kLegacyMode)) {
- sp<AMessage> params = new AMessage;
- params->setInt32("android._tunnel-peek-set-legacy", 1);
- onSetParameters(params);
- }
-
int32_t background = 0;
if (format->findInt32("android._background-mode", &background) && background) {
androidSetThreadPriority(gettid(), ANDROID_PRIORITY_BACKGROUND);
@@ -4697,7 +4700,7 @@
// rendering.
int32_t dataSpace;
if (mOutputFormat->findInt32("android._dataspace", &dataSpace)) {
- ALOGD("[%s] setting dataspace on output surface to #%x",
+ ALOGD("[%s] setting dataspace on output surface to %#x",
mComponentName.c_str(), dataSpace);
int err = native_window_set_buffers_data_space(
mSurface.get(), (android_dataspace)dataSpace);
diff --git a/media/libstagefright/NuMediaExtractor.cpp b/media/libstagefright/NuMediaExtractor.cpp
index b4baf03..0536f2a 100644
--- a/media/libstagefright/NuMediaExtractor.cpp
+++ b/media/libstagefright/NuMediaExtractor.cpp
@@ -72,6 +72,37 @@
}
}
+status_t NuMediaExtractor::initMediaExtractor(const sp<DataSource>& dataSource) {
+ status_t err = OK;
+
+ mImpl = MediaExtractorFactory::Create(dataSource);
+ if (mImpl == NULL) {
+ ALOGE("%s: failed to create MediaExtractor", __FUNCTION__);
+ return ERROR_UNSUPPORTED;
+ }
+
+ setEntryPointToRemoteMediaExtractor();
+
+ if (!mCasToken.empty()) {
+ err = mImpl->setMediaCas(mCasToken);
+ if (err != OK) {
+ ALOGE("%s: failed to setMediaCas (%d)", __FUNCTION__, err);
+ return err;
+ }
+ }
+
+ // Get the name of the implementation.
+ mName = mImpl->name();
+
+ // Update the duration and bitrate
+ err = updateDurationAndBitrate();
+ if (err == OK) {
+ mDataSource = dataSource;
+ }
+
+ return OK;
+}
+
status_t NuMediaExtractor::setDataSource(
const sp<MediaHTTPService> &httpService,
const char *path,
@@ -89,28 +120,8 @@
return -ENOENT;
}
- mImpl = MediaExtractorFactory::Create(dataSource);
-
- if (mImpl == NULL) {
- return ERROR_UNSUPPORTED;
- }
- setEntryPointToRemoteMediaExtractor();
-
- status_t err = OK;
- if (!mCasToken.empty()) {
- err = mImpl->setMediaCas(mCasToken);
- if (err != OK) {
- ALOGE("%s: failed to setMediaCas (%d)", __FUNCTION__, err);
- return err;
- }
- }
-
- err = updateDurationAndBitrate();
- if (err == OK) {
- mDataSource = dataSource;
- }
-
- return OK;
+ // Initialize MediaExtractor using the data source
+ return initMediaExtractor(dataSource);
}
status_t NuMediaExtractor::setDataSource(int fd, off64_t offset, off64_t size) {
@@ -131,27 +142,8 @@
return err;
}
- mImpl = MediaExtractorFactory::Create(fileSource);
-
- if (mImpl == NULL) {
- return ERROR_UNSUPPORTED;
- }
- setEntryPointToRemoteMediaExtractor();
-
- if (!mCasToken.empty()) {
- err = mImpl->setMediaCas(mCasToken);
- if (err != OK) {
- ALOGE("%s: failed to setMediaCas (%d)", __FUNCTION__, err);
- return err;
- }
- }
-
- err = updateDurationAndBitrate();
- if (err == OK) {
- mDataSource = fileSource;
- }
-
- return OK;
+ // Initialize MediaExtractor using the file source
+ return initMediaExtractor(fileSource);
}
status_t NuMediaExtractor::setDataSource(const sp<DataSource> &source) {
@@ -166,32 +158,13 @@
return err;
}
- mImpl = MediaExtractorFactory::Create(source);
-
- if (mImpl == NULL) {
- return ERROR_UNSUPPORTED;
- }
- setEntryPointToRemoteMediaExtractor();
-
- if (!mCasToken.empty()) {
- err = mImpl->setMediaCas(mCasToken);
- if (err != OK) {
- ALOGE("%s: failed to setMediaCas (%d)", __FUNCTION__, err);
- return err;
- }
- }
-
- err = updateDurationAndBitrate();
- if (err == OK) {
- mDataSource = source;
- }
-
- return err;
+ // Initialize MediaExtractor using the given data source
+ return initMediaExtractor(source);
}
const char* NuMediaExtractor::getName() const {
Mutex::Autolock autoLock(mLock);
- return mImpl == nullptr ? nullptr : mImpl->name().string();
+ return mImpl == nullptr ? nullptr : mName.string();
}
static String8 arrayToString(const std::vector<uint8_t> &array) {
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
index 0f5e95e..cff37a3 100644
--- a/media/libstagefright/OggWriter.cpp
+++ b/media/libstagefright/OggWriter.cpp
@@ -96,6 +96,7 @@
return ERROR_UNSUPPORTED;
}
+ // NOLINTNEXTLINE(clang-analyzer-unix.MallocSizeof)
mOs = (OggStreamState*) malloc(sizeof(ogg_stream_state));
if (ogg_stream_init((ogg_stream_state*)mOs, rand()) == -1) {
ALOGE("ogg stream init failed");
@@ -242,13 +243,15 @@
mDone = true;
- void* dummy;
- pthread_join(mThread, &dummy);
+ status_t status = mSource->stop();
- status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(dummy));
+ void *pthread_res;
+ pthread_join(mThread, &pthread_res);
+
+ status_t err = static_cast<status_t>(reinterpret_cast<uintptr_t>(pthread_res));
{
- status_t status = mSource->stop();
- if (err == OK && (status != OK && status != ERROR_END_OF_STREAM)) {
+ if (err == OK &&
+ (status != OK && status != ERROR_END_OF_STREAM)) {
err = status;
}
}
diff --git a/media/libstagefright/colorconversion/SoftwareRenderer.cpp b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
index 4711315..2409315 100644
--- a/media/libstagefright/colorconversion/SoftwareRenderer.cpp
+++ b/media/libstagefright/colorconversion/SoftwareRenderer.cpp
@@ -441,7 +441,7 @@
((dataSpace & ~HAL_DATASPACE_RANGE_MASK) | HAL_DATASPACE_RANGE_FULL);
}
- ALOGD("setting dataspace on output surface to #%x", dataSpace);
+ ALOGD("setting dataspace on output surface to %#x", dataSpace);
if ((err = native_window_set_buffers_data_space(mNativeWindow.get(), dataSpace))) {
ALOGW("failed to set dataspace on surface (%d)", err);
}
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index ec75e79..b29c3b6 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -27,66 +27,77 @@
<Limit name="channel-count" max="2" />
<Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
<Limit name="bitrate" range="8000-320000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.amrnb.decoder" type="audio/3gpp">
<Alias name="OMX.google.amrnb.decoder" />
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="8000" />
<Limit name="bitrate" range="4750-12200" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.amrwb.decoder" type="audio/amr-wb">
<Alias name="OMX.google.amrwb.decoder" />
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="16000" />
<Limit name="bitrate" range="6600-23850" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.aac.decoder" type="audio/mp4a-latm">
<Alias name="OMX.google.aac.decoder" />
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="7350,8000,11025,12000,16000,22050,24000,32000,44100,48000" />
<Limit name="bitrate" range="8000-960000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.g711.alaw.decoder" type="audio/g711-alaw">
<Alias name="OMX.google.g711.alaw.decoder" />
<Limit name="channel-count" max="6" />
<Limit name="sample-rate" ranges="8000-48000" />
<Limit name="bitrate" range="64000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.g711.mlaw.decoder" type="audio/g711-mlaw">
<Alias name="OMX.google.g711.mlaw.decoder" />
<Limit name="channel-count" max="6" />
<Limit name="sample-rate" ranges="8000-48000" />
<Limit name="bitrate" range="64000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.vorbis.decoder" type="audio/vorbis">
<Alias name="OMX.google.vorbis.decoder" />
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="8000-96000" />
<Limit name="bitrate" range="32000-500000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.opus.decoder" type="audio/opus">
<Alias name="OMX.google.opus.decoder" />
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="8000,12000,16000,24000,48000" />
<Limit name="bitrate" range="6000-510000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.raw.decoder" type="audio/raw">
<Alias name="OMX.google.raw.decoder" />
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="8000-192000" />
<Limit name="bitrate" range="1-10000000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.flac.decoder" type="audio/flac">
<Alias name="OMX.google.flac.decoder" />
<Limit name="channel-count" max="8" />
<Limit name="sample-rate" ranges="1-655350" />
<Limit name="bitrate" range="1-21000000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.gsm.decoder" type="audio/gsm" domain="telephony">
<Alias name="OMX.google.gsm.decoder" />
<Limit name="channel-count" max="1" />
<Limit name="sample-rate" ranges="8000" />
<Limit name="bitrate" range="13000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.mpeg4.decoder" type="video/mp4v-es">
<Alias name="OMX.google.mpeg4.decoder" />
@@ -97,6 +108,7 @@
<Limit name="blocks-per-second" range="1-432000" />
<Limit name="bitrate" range="1-40000000" />
<Feature name="adaptive-playback" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.h263.decoder" type="video/3gpp">
<Alias name="OMX.google.h263.decoder" />
@@ -106,6 +118,7 @@
<Limit name="alignment" value="2x2" />
<Limit name="bitrate" range="1-384000" />
<Feature name="adaptive-playback" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.avc.decoder" type="video/avc" variant="slow-cpu,!slow-cpu">
<Alias name="OMX.google.h264.decoder" />
@@ -126,6 +139,7 @@
<Limit name="bitrate" range="1-40000000" />
</Variant>
<Feature name="adaptive-playback" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.hevc.decoder" type="video/hevc" variant="slow-cpu,!slow-cpu">
<Alias name="OMX.google.hevc.decoder" />
@@ -146,6 +160,7 @@
<Limit name="bitrate" range="1-5000000" />
</Variant>
<Feature name="adaptive-playback" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.vp8.decoder" type="video/x-vnd.on2.vp8" variant="slow-cpu,!slow-cpu">
<Alias name="OMX.google.vp8.decoder" />
@@ -163,6 +178,7 @@
<Limit name="bitrate" range="1-40000000" />
</Variant>
<Feature name="adaptive-playback" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.vp9.decoder" type="video/x-vnd.on2.vp9" variant="slow-cpu,!slow-cpu">
<Alias name="OMX.google.vp9.decoder" />
@@ -181,6 +197,7 @@
<Limit name="bitrate" range="1-5000000" />
</Variant>
<Feature name="adaptive-playback" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.av1.decoder" type="video/av01" variant="!slow-cpu">
<Limit name="size" min="2x2" max="2048x2048" />
@@ -190,6 +207,7 @@
<Limit name="blocks-per-second" range="1-2073600" />
<Limit name="bitrate" range="1-120000000" />
<Feature name="adaptive-playback" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.mpeg2.decoder" type="video/mpeg2" domain="tv">
<Alias name="OMX.google.mpeg2.decoder" />
@@ -200,6 +218,7 @@
<Limit name="blocks-per-second" range="1-244800" />
<Limit name="bitrate" range="1-20000000" />
<Feature name="adaptive-playback" />
+ <Attribute name="software-codec" />
</MediaCodec>
</Decoders>
<Encoders>
@@ -209,6 +228,7 @@
<Limit name="sample-rate" ranges="8000,11025,12000,16000,22050,24000,32000,44100,48000" />
<!-- also may support 64000, 88200 and 96000 Hz -->
<Limit name="bitrate" range="8000-960000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.amrnb.encoder" type="audio/3gpp">
<Alias name="OMX.google.amrnb.encoder" />
@@ -216,6 +236,7 @@
<Limit name="sample-rate" ranges="8000" />
<Limit name="bitrate" range="4750-12200" />
<Feature name="bitrate-modes" value="CBR" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.amrwb.encoder" type="audio/amr-wb">
<Alias name="OMX.google.amrwb.encoder" />
@@ -223,6 +244,7 @@
<Limit name="sample-rate" ranges="16000" />
<Limit name="bitrate" range="6600-23850" />
<Feature name="bitrate-modes" value="CBR" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.flac.encoder" type="audio/flac">
<Alias name="OMX.google.flac.encoder" />
@@ -231,6 +253,7 @@
<Limit name="bitrate" range="1-21000000" />
<Limit name="complexity" range="0-8" default="5" />
<Feature name="bitrate-modes" value="CQ" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.opus.encoder" type="audio/opus">
<Limit name="channel-count" max="2" />
@@ -238,6 +261,7 @@
<Limit name="bitrate" range="500-512000" />
<Limit name="complexity" range="0-10" default="5" />
<Feature name="bitrate-modes" value="CBR,VBR" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.h263.encoder" type="video/3gpp">
<Alias name="OMX.google.h263.encoder" />
@@ -245,6 +269,7 @@
<Limit name="size" min="176x144" max="176x144" />
<Limit name="alignment" value="16x16" />
<Limit name="bitrate" range="1-128000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.mpeg4.encoder" type="video/mp4v-es">
<Alias name="OMX.google.mpeg4.encoder" />
@@ -254,6 +279,7 @@
<Limit name="block-size" value="16x16" />
<Limit name="blocks-per-second" range="12-1485" />
<Limit name="bitrate" range="1-64000" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.avc.encoder" type="video/avc" variant="slow-cpu,!slow-cpu">
<Alias name="OMX.google.h264.encoder" />
@@ -277,6 +303,7 @@
<!-- Video Quality control -->
<!-- supports QP bounding with standard keys -->
<Feature name="qp-bounds" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.vp8.encoder" type="video/x-vnd.on2.vp8" variant="slow-cpu,!slow-cpu">
<Alias name="OMX.google.vp8.encoder" />
@@ -296,6 +323,7 @@
<Limit name="bitrate" range="1-20000000" />
</Variant>
<Feature name="bitrate-modes" value="VBR,CBR" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.hevc.encoder" type="video/hevc" variant="!slow-cpu">
<!-- profiles and levels: ProfileMain : MainTierLevel51 -->
@@ -309,6 +337,7 @@
<Limit name="complexity" range="0-10" default="0" />
<Limit name="quality" range="0-100" default="80" />
<Feature name="bitrate-modes" value="VBR,CBR,CQ" />
+ <Attribute name="software-codec" />
</MediaCodec>
<MediaCodec name="c2.android.vp9.encoder" type="video/x-vnd.on2.vp9" variant="!slow-cpu">
<Alias name="OMX.google.vp9.encoder" />
@@ -320,6 +349,7 @@
<Limit name="block-count" range="1-3600" /> <!-- max 1280x720 -->
<Limit name="bitrate" range="1-40000000" />
<Feature name="bitrate-modes" value="VBR,CBR" />
+ <Attribute name="software-codec" />
</MediaCodec>
</Encoders>
</MediaCodecs>
diff --git a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
index d17a480..52ea28b 100644
--- a/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
+++ b/media/libstagefright/include/media/stagefright/NuMediaExtractor.h
@@ -146,6 +146,7 @@
Vector<TrackInfo> mSelectedTracks;
int64_t mTotalBitrate; // in bits/sec
int64_t mDurationUs;
+ String8 mName;
void setEntryPointToRemoteMediaExtractor();
@@ -165,6 +166,7 @@
bool getTotalBitrate(int64_t *bitRate) const;
status_t updateDurationAndBitrate();
status_t appendVorbisNumPageSamples(MediaBufferBase *mbuf, const sp<ABuffer> &buffer);
+ status_t initMediaExtractor(const sp<DataSource>& dataSource);
DISALLOW_EVIL_CONSTRUCTORS(NuMediaExtractor);
};
diff --git a/media/libstagefright/renderfright/gl/ProgramCache.cpp b/media/libstagefright/renderfright/gl/ProgramCache.cpp
index 1a3b4e7..af55172 100644
--- a/media/libstagefright/renderfright/gl/ProgramCache.cpp
+++ b/media/libstagefright/renderfright/gl/ProgramCache.cpp
@@ -299,8 +299,8 @@
highp vec3 ScaleLuminance(highp vec3 color) {
// The formula is:
// alpha * pow(Y, gamma - 1.0) * color + beta;
- // where alpha is 1000.0, gamma is 1.2, beta is 0.0.
- return color * 1000.0 * pow(color.y, 0.2);
+ // where alpha is displayMaxLuminance, gamma is 1.2, beta is 0.0.
+ return color * displayMaxLuminance * pow(color.y, 0.2);
}
)__SHADER__";
break;
@@ -316,7 +316,6 @@
// Tone map absolute light to display luminance range.
switch (needs.getInputTF()) {
case Key::INPUT_TF_ST2084:
- case Key::INPUT_TF_HLG:
switch (needs.getOutputTF()) {
case Key::OUTPUT_TF_HLG:
// Right now when mixed PQ and HLG contents are presented,
@@ -400,6 +399,14 @@
break;
}
break;
+ case Key::INPUT_TF_HLG:
+ // HLG OOTF is already applied as part of ScaleLuminance
+ fs << R"__SHADER__(
+ highp vec3 ToneMap(highp vec3 color) {
+ return color;
+ }
+ )__SHADER__";
+ break;
default:
// inverse tone map; the output luminance can be up to maxOutLumi.
fs << R"__SHADER__(
diff --git a/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp b/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
index c43e1f8..19c8577 100644
--- a/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
+++ b/media/libstagefright/tests/HEVC/HEVCUtilsUnitTest.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include <fstream>
+#include <memory>
#include <media/stagefright/foundation/ABitReader.h>
#include <HevcUtils.h>
@@ -88,10 +89,10 @@
stringLine >> type >> chunkLength;
ASSERT_GT(chunkLength, 0) << "Length of data chunk must be greater than 0";
- char *data = (char *)malloc(chunkLength);
+ std::unique_ptr<char[]> data(new char[chunkLength]);
ASSERT_NE(data, nullptr) << "Failed to allocate data buffer of size: " << chunkLength;
- mMediaFileStream.read(data, chunkLength);
+ mMediaFileStream.read(data.get(), chunkLength);
ASSERT_EQ(mMediaFileStream.gcount(), chunkLength)
<< "Failed to read complete file, bytes read: " << mMediaFileStream.gcount();
@@ -105,7 +106,7 @@
offset += 3;
ASSERT_LE(offset, chunkLength) << "NAL unit offset must not exceed the chunk length";
- uint8_t *nalUnit = (uint8_t *)(data + offset);
+ uint8_t *nalUnit = (uint8_t *)(data.get() + offset);
size_t nalUnitLength = chunkLength - offset;
// Add NAL units only if they're of type: VPS/SPS/PPS/SEI
@@ -118,20 +119,18 @@
size_t sizeNalUnit = hevcParams.getSize(index);
ASSERT_EQ(sizeNalUnit, nalUnitLength) << "Invalid size returned for NAL: " << type;
- uint8_t *destination = (uint8_t *)malloc(nalUnitLength);
+ std::unique_ptr<uint8_t[]> destination(new uint8_t[nalUnitLength]);
ASSERT_NE(destination, nullptr)
<< "Failed to allocate buffer of size: " << nalUnitLength;
- bool status = hevcParams.write(index, destination, nalUnitLength);
+ bool status = hevcParams.write(index, destination.get(), nalUnitLength);
ASSERT_TRUE(status) << "Unable to write NAL Unit data";
- free(destination);
index++;
} else {
err = hevcParams.addNalUnit(nalUnit, nalUnitLength);
ASSERT_NE(err, (status_t)OK) << "Invalid NAL Unit added, type: " << type;
}
- free(data);
}
size_t numNalUnits = hevcParams.getNumNalUnitsOfType(kVPSCode);
@@ -166,10 +165,10 @@
<< "Expected NAL type: 34(PPS), found: " << typeNalUnit;
size_t hvccBoxSize = kHvccBoxMaxSize;
- uint8_t *hvcc = (uint8_t *)malloc(kHvccBoxMaxSize);
+ std::unique_ptr<uint8_t[]> hvcc(new uint8_t[kHvccBoxMaxSize]);
ASSERT_NE(hvcc, nullptr) << "Failed to allocate a hvcc buffer of size: " << kHvccBoxMaxSize;
- err = hevcParams.makeHvcc(hvcc, &hvccBoxSize, kNALSizeLength);
+ err = hevcParams.makeHvcc(hvcc.get(), &hvccBoxSize, kNALSizeLength);
ASSERT_EQ(err, (status_t)OK) << "Unable to create hvcc box";
ASSERT_GT(hvccBoxSize, kHvccBoxMinSize)
@@ -179,8 +178,6 @@
if (frameRate != mFrameRate)
cout << "[ WARN ] Expected frame rate: " << mFrameRate << " Found: " << frameRate
<< endl;
-
- free(hvcc);
}
// Info File contains the type and length for each chunk/frame
diff --git a/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp b/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp
index f934b54..b2044d3 100644
--- a/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp
+++ b/media/libstagefright/timedtext/test/TimedTextUnitTest.cpp
@@ -22,6 +22,7 @@
#include <string.h>
#include <sys/stat.h>
#include <fstream>
+#include <memory>
#include <binder/Parcel.h>
#include <media/stagefright/foundation/AString.h>
@@ -240,10 +241,10 @@
if (remaining < tempFontNameLength) break;
const uint8_t *tmpFont = tmpData;
- char *tmpFontName = strndup((const char *)tmpFont, tempFontNameLength);
+ std::unique_ptr<char[]> tmpFontName(new char[tempFontNameLength]);
+ strncpy(tmpFontName.get(), (const char *)tmpFont, tempFontNameLength);
ASSERT_NE(tmpFontName, nullptr) << "Font Name is null";
- ALOGI("FontName = %s", tmpFontName);
- free(tmpFontName);
+ ALOGI("FontName = %s", tmpFontName.get());
tmpData += tempFontNameLength;
remaining -= tempFontNameLength;
fontRecordEntries.push_back({tempFontID, tempFontNameLength, tmpFont});
diff --git a/media/module/codecs/amrwb/dec/test/AmrwbDecoderTest.cpp b/media/module/codecs/amrwb/dec/test/AmrwbDecoderTest.cpp
index 7221b92..2cc88ce 100644
--- a/media/module/codecs/amrwb/dec/test/AmrwbDecoderTest.cpp
+++ b/media/module/codecs/amrwb/dec/test/AmrwbDecoderTest.cpp
@@ -21,6 +21,7 @@
#include <utils/Log.h>
#include <audio_utils/sndfile.h>
+#include <memory>
#include <stdio.h>
#include "pvamrwbdecoder.h"
@@ -121,7 +122,7 @@
TEST_F(AmrwbDecoderTest, MultiCreateAmrwbDecoderTest) {
uint32_t memRequirements = pvDecoder_AmrWbMemRequirements();
- void *decoderBuf = malloc(memRequirements);
+ std::unique_ptr<char[]> decoderBuf(new char[memRequirements]);
ASSERT_NE(decoderBuf, nullptr)
<< "Failed to allocate decoder memory of size " << memRequirements;
@@ -129,25 +130,21 @@
void *amrHandle = nullptr;
int16_t *decoderCookie;
for (int count = 0; count < kMaxCount; count++) {
- pvDecoder_AmrWb_Init(&amrHandle, decoderBuf, &decoderCookie);
+ pvDecoder_AmrWb_Init(&amrHandle, decoderBuf.get(), &decoderCookie);
ASSERT_NE(amrHandle, nullptr) << "Failed to initialize decoder";
ALOGV("Decoder created successfully");
}
- if (decoderBuf) {
- free(decoderBuf);
- decoderBuf = nullptr;
- }
}
TEST_P(AmrwbDecoderTest, DecodeTest) {
uint32_t memRequirements = pvDecoder_AmrWbMemRequirements();
- void *decoderBuf = malloc(memRequirements);
+ std::unique_ptr<char[]> decoderBuf(new char[memRequirements]);
ASSERT_NE(decoderBuf, nullptr)
<< "Failed to allocate decoder memory of size " << memRequirements;
void *amrHandle = nullptr;
int16_t *decoderCookie;
- pvDecoder_AmrWb_Init(&amrHandle, decoderBuf, &decoderCookie);
+ pvDecoder_AmrWb_Init(&amrHandle, decoderBuf.get(), &decoderCookie);
ASSERT_NE(amrHandle, nullptr) << "Failed to initialize decoder";
string inputFile = gEnv->getRes() + GetParam();
@@ -159,25 +156,21 @@
SNDFILE *outFileHandle = openOutputFile(&sfInfo);
ASSERT_NE(outFileHandle, nullptr) << "Error opening output file for writing decoded output";
- int32_t decoderErr = DecodeFrames(decoderCookie, decoderBuf, outFileHandle);
+ int32_t decoderErr = DecodeFrames(decoderCookie, decoderBuf.get(), outFileHandle);
ASSERT_EQ(decoderErr, 0) << "DecodeFrames returned error";
sf_close(outFileHandle);
- if (decoderBuf) {
- free(decoderBuf);
- decoderBuf = nullptr;
- }
}
TEST_P(AmrwbDecoderTest, ResetDecoderTest) {
uint32_t memRequirements = pvDecoder_AmrWbMemRequirements();
- void *decoderBuf = malloc(memRequirements);
+ std::unique_ptr<char[]> decoderBuf(new char[memRequirements]);
ASSERT_NE(decoderBuf, nullptr)
<< "Failed to allocate decoder memory of size " << memRequirements;
void *amrHandle = nullptr;
int16_t *decoderCookie;
- pvDecoder_AmrWb_Init(&amrHandle, decoderBuf, &decoderCookie);
+ pvDecoder_AmrWb_Init(&amrHandle, decoderBuf.get(), &decoderCookie);
ASSERT_NE(amrHandle, nullptr) << "Failed to initialize decoder";
string inputFile = gEnv->getRes() + GetParam();
@@ -190,20 +183,18 @@
ASSERT_NE(outFileHandle, nullptr) << "Error opening output file for writing decoded output";
// Decode 150 frames first
- int32_t decoderErr = DecodeFrames(decoderCookie, decoderBuf, outFileHandle, kNumFrameReset);
+ int32_t decoderErr =
+ DecodeFrames(decoderCookie, decoderBuf.get(), outFileHandle, kNumFrameReset);
ASSERT_EQ(decoderErr, 0) << "DecodeFrames returned error";
// Reset Decoder
- pvDecoder_AmrWb_Reset(decoderBuf, 1);
+ pvDecoder_AmrWb_Reset(decoderBuf.get(), 1);
// Start decoding again
- decoderErr = DecodeFrames(decoderCookie, decoderBuf, outFileHandle);
+ decoderErr = DecodeFrames(decoderCookie, decoderBuf.get(), outFileHandle);
ASSERT_EQ(decoderErr, 0) << "DecodeFrames returned error";
sf_close(outFileHandle);
- if (decoderBuf) {
- free(decoderBuf);
- }
}
INSTANTIATE_TEST_SUITE_P(AmrwbDecoderTestAll, AmrwbDecoderTest,
diff --git a/media/module/codecs/mp3dec/test/Mp3DecoderTest.cpp b/media/module/codecs/mp3dec/test/Mp3DecoderTest.cpp
index 91326a8..88e9eae 100644
--- a/media/module/codecs/mp3dec/test/Mp3DecoderTest.cpp
+++ b/media/module/codecs/mp3dec/test/Mp3DecoderTest.cpp
@@ -20,6 +20,7 @@
#include <utils/Log.h>
#include <audio_utils/sndfile.h>
+#include <memory>
#include <stdio.h>
#include "mp3reader.h"
@@ -99,27 +100,23 @@
TEST_F(Mp3DecoderTest, MultiCreateMp3DecoderTest) {
size_t memRequirements = pvmp3_decoderMemRequirements();
ASSERT_NE(memRequirements, 0) << "Failed to get the memory requirement size";
- void *decoderBuf = malloc(memRequirements);
+ unique_ptr<char[]> decoderBuf(new char[memRequirements]);
ASSERT_NE(decoderBuf, nullptr)
<< "Failed to allocate decoder memory of size " << memRequirements;
for (int count = 0; count < kMaxCount; count++) {
- pvmp3_InitDecoder(mConfig, decoderBuf);
+ pvmp3_InitDecoder(mConfig, (void*)decoderBuf.get());
ALOGV("Decoder created successfully");
}
- if (decoderBuf) {
- free(decoderBuf);
- decoderBuf = nullptr;
- }
}
TEST_P(Mp3DecoderTest, DecodeTest) {
size_t memRequirements = pvmp3_decoderMemRequirements();
ASSERT_NE(memRequirements, 0) << "Failed to get the memory requirement size";
- void *decoderBuf = malloc(memRequirements);
+ unique_ptr<char[]> decoderBuf(new char[memRequirements]);
ASSERT_NE(decoderBuf, nullptr)
<< "Failed to allocate decoder memory of size " << memRequirements;
- pvmp3_InitDecoder(mConfig, decoderBuf);
+ pvmp3_InitDecoder(mConfig, (void*)decoderBuf.get());
ALOGV("Decoder created successfully");
string inputFile = gEnv->getRes() + GetParam();
bool status = mMp3Reader.init(inputFile.c_str());
@@ -130,27 +127,23 @@
SNDFILE *outFileHandle = openOutputFile(&sfInfo);
ASSERT_NE(outFileHandle, nullptr) << "Error opening output file for writing decoded output";
- ERROR_CODE decoderErr = DecodeFrames(decoderBuf, outFileHandle, sfInfo);
+ ERROR_CODE decoderErr = DecodeFrames((void*)decoderBuf.get(), outFileHandle, sfInfo);
ASSERT_EQ(decoderErr, NO_DECODING_ERROR) << "Failed to decode the frames";
ASSERT_EQ(sfInfo.channels, mConfig->num_channels) << "Number of channels does not match";
ASSERT_EQ(sfInfo.samplerate, mConfig->samplingRate) << "Sample rate does not match";
mMp3Reader.close();
sf_close(outFileHandle);
- if (decoderBuf) {
- free(decoderBuf);
- decoderBuf = nullptr;
- }
}
TEST_P(Mp3DecoderTest, ResetDecoderTest) {
size_t memRequirements = pvmp3_decoderMemRequirements();
ASSERT_NE(memRequirements, 0) << "Failed to get the memory requirement size";
- void *decoderBuf = malloc(memRequirements);
+ unique_ptr<char[]> decoderBuf(new char[memRequirements]);
ASSERT_NE(decoderBuf, nullptr)
<< "Failed to allocate decoder memory of size " << memRequirements;
- pvmp3_InitDecoder(mConfig, decoderBuf);
+ pvmp3_InitDecoder(mConfig, (void*)decoderBuf.get());
ALOGV("Decoder created successfully.");
string inputFile = gEnv->getRes() + GetParam();
bool status = mMp3Reader.init(inputFile.c_str());
@@ -162,24 +155,20 @@
ASSERT_NE(outFileHandle, nullptr) << "Error opening output file for writing decoded output";
ERROR_CODE decoderErr;
- decoderErr = DecodeFrames(decoderBuf, outFileHandle, sfInfo, kNumFrameReset);
+ decoderErr = DecodeFrames((void*)decoderBuf.get(), outFileHandle, sfInfo, kNumFrameReset);
ASSERT_EQ(decoderErr, NO_DECODING_ERROR) << "Failed to decode the frames";
ASSERT_EQ(sfInfo.channels, mConfig->num_channels) << "Number of channels does not match";
ASSERT_EQ(sfInfo.samplerate, mConfig->samplingRate) << "Sample rate does not match";
- pvmp3_resetDecoder(decoderBuf);
+ pvmp3_resetDecoder((void*)decoderBuf.get());
// Decode the same file.
- decoderErr = DecodeFrames(decoderBuf, outFileHandle, sfInfo);
+ decoderErr = DecodeFrames((void*)decoderBuf.get(), outFileHandle, sfInfo);
ASSERT_EQ(decoderErr, NO_DECODING_ERROR) << "Failed to decode the frames";
ASSERT_EQ(sfInfo.channels, mConfig->num_channels) << "Number of channels does not match";
ASSERT_EQ(sfInfo.samplerate, mConfig->samplingRate) << "Sample rate does not match";
mMp3Reader.close();
sf_close(outFileHandle);
- if (decoderBuf) {
- free(decoderBuf);
- decoderBuf = nullptr;
- }
}
INSTANTIATE_TEST_SUITE_P(Mp3DecoderTestAll, Mp3DecoderTest,
diff --git a/media/module/esds/TEST_MAPPING b/media/module/esds/TEST_MAPPING
new file mode 100644
index 0000000..9368b6d
--- /dev/null
+++ b/media/module/esds/TEST_MAPPING
@@ -0,0 +1,9 @@
+// mappings for frameworks/av/media/module/esds
+{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "ESDSTest" }
+ ]
+}
diff --git a/media/module/esds/tests/ESDSTest.cpp b/media/module/esds/tests/ESDSTest.cpp
index ea9a888..33bdcac 100644
--- a/media/module/esds/tests/ESDSTest.cpp
+++ b/media/module/esds/tests/ESDSTest.cpp
@@ -21,6 +21,7 @@
#include <stdio.h>
#include <string.h>
#include <fstream>
+#include <memory>
#include <media/esds/ESDS.h>
#include <binder/ProcessState.h>
@@ -121,18 +122,16 @@
};
TEST_P(ESDSUnitTest, InvalidDataTest) {
- void *invalidData = calloc(mESDSSize, 1);
+ std::unique_ptr<char[]> invalidData(new char[mESDSSize]());
ASSERT_NE(invalidData, nullptr) << "Unable to allocate memory";
- ESDS esds(invalidData, mESDSSize);
- free(invalidData);
+ ESDS esds((void*)invalidData.get(), mESDSSize);
ASSERT_NE(esds.InitCheck(), OK) << "invalid ESDS data accepted";
}
TEST(ESDSSanityUnitTest, ConstructorSanityTest) {
- void *invalidData = malloc(1);
+ std::unique_ptr<char[]> invalidData(new char[1]());
ASSERT_NE(invalidData, nullptr) << "Unable to allocate memory";
- ESDS esds_zero(invalidData, 0);
- free(invalidData);
+ ESDS esds_zero((void*)invalidData.get(), 0);
ASSERT_NE(esds_zero.InitCheck(), OK) << "invalid ESDS data accepted";
ESDS esds_null(NULL, 0);
diff --git a/media/module/extractors/mp4/MPEG4Extractor.cpp b/media/module/extractors/mp4/MPEG4Extractor.cpp
index 374f8d3..3a5a869 100644
--- a/media/module/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/module/extractors/mp4/MPEG4Extractor.cpp
@@ -394,6 +394,15 @@
return MEDIA_MIMETYPE_AUDIO_MPEGH_MHA1;
case FOURCC("mhm1"):
return MEDIA_MIMETYPE_AUDIO_MPEGH_MHM1;
+ case FOURCC("dtsc"):
+ return MEDIA_MIMETYPE_AUDIO_DTS;
+ case FOURCC("dtse"):
+ case FOURCC("dtsh"):
+ return MEDIA_MIMETYPE_AUDIO_DTS_HD;
+ case FOURCC("dtsl"):
+ return MEDIA_MIMETYPE_AUDIO_DTS_HD_MA;
+ case FOURCC("dtsx"):
+ return MEDIA_MIMETYPE_AUDIO_DTS_UHD_P2;
default:
ALOGW("Unknown fourcc: %c%c%c%c",
(fourcc >> 24) & 0xff,
@@ -1804,6 +1813,11 @@
case 0x6D730055: // "ms U" mp3 audio
case FOURCC("mha1"):
case FOURCC("mhm1"):
+ case FOURCC("dtsc"):
+ case FOURCC("dtse"):
+ case FOURCC("dtsh"):
+ case FOURCC("dtsl"):
+ case FOURCC("dtsx"):
{
if (mIsQT && depth >= 1 && mPath[depth - 1] == FOURCC("wave")) {
@@ -5908,12 +5922,18 @@
return -EINVAL;
}
- int32_t dataOffsetDelta;
- if (!mDataSource->getUInt32(offset, (uint32_t*)&dataOffsetDelta)) {
+ uint32_t dataOffsetDelta;
+ if (!mDataSource->getUInt32(offset, &dataOffsetDelta)) {
return ERROR_MALFORMED;
}
- dataOffset = mTrackFragmentHeaderInfo.mBaseDataOffset + dataOffsetDelta;
+ if (__builtin_add_overflow(
+ mTrackFragmentHeaderInfo.mBaseDataOffset, dataOffsetDelta, &dataOffset)) {
+ ALOGW("b/232242894 mBaseDataOffset(%" PRIu64 ") + dataOffsetDelta(%u) overflows uint64",
+ mTrackFragmentHeaderInfo.mBaseDataOffset, dataOffsetDelta);
+ android_errorWriteLog(0x534e4554, "232242894");
+ return ERROR_MALFORMED;
+ }
offset += 4;
size -= 4;
@@ -6047,7 +6067,12 @@
return NO_MEMORY;
}
- dataOffset += sampleSize;
+ if (__builtin_add_overflow(dataOffset, sampleSize, &dataOffset)) {
+ ALOGW("b/232242894 dataOffset(%" PRIu64 ") + sampleSize(%u) overflows uint64",
+ dataOffset, sampleSize);
+ android_errorWriteLog(0x534e4554, "232242894");
+ return ERROR_MALFORMED;
+ }
}
mTrackFragmentHeaderInfo.mDataOffset = dataOffset;
@@ -6712,7 +6737,7 @@
const Sample *smpl = &mCurrentSamples[mCurrentSampleIndex];
offset = smpl->offset;
size = smpl->size;
- cts = mCurrentTime + smpl->compositionOffset;
+ cts = (int64_t)mCurrentTime + (int64_t)smpl->compositionOffset;
if (mElstInitialEmptyEditTicks > 0) {
cts += mElstInitialEmptyEditTicks;
diff --git a/media/module/foundation/MediaDefs.cpp b/media/module/foundation/MediaDefs.cpp
index 5c4ec17..4a75f90 100644
--- a/media/module/foundation/MediaDefs.cpp
+++ b/media/module/foundation/MediaDefs.cpp
@@ -71,7 +71,9 @@
const char *MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM = "audio/x-adpcm-dvi-ima";
const char *MEDIA_MIMETYPE_AUDIO_DTS = "audio/vnd.dts";
const char *MEDIA_MIMETYPE_AUDIO_DTS_HD = "audio/vnd.dts.hd";
-const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD = "audio/vnd.dts.uhd";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_HD_MA = "audio/vnd.dts.hd;profile=dtsma";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD_P1 = "audio/vnd.dts.uhd;profile=p1";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD_P2 = "audio/vnd.dts.uhd;profile=p2";
const char *MEDIA_MIMETYPE_AUDIO_EVRC = "audio/evrc";
const char *MEDIA_MIMETYPE_AUDIO_EVRCB = "audio/evrcb";
const char *MEDIA_MIMETYPE_AUDIO_EVRCWB = "audio/evrcwb";
diff --git a/media/module/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/module/foundation/include/media/stagefright/foundation/MediaDefs.h
index fb8c299..740336a 100644
--- a/media/module/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/module/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -73,7 +73,9 @@
extern const char *MEDIA_MIMETYPE_AUDIO_DVI_IMA_ADPCM;
extern const char *MEDIA_MIMETYPE_AUDIO_DTS;
extern const char *MEDIA_MIMETYPE_AUDIO_DTS_HD;
-extern const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_HD_MA;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD_P1;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD_P2;
extern const char *MEDIA_MIMETYPE_AUDIO_EVRC;
extern const char *MEDIA_MIMETYPE_AUDIO_EVRCB;
extern const char *MEDIA_MIMETYPE_AUDIO_EVRCWB;
diff --git a/media/module/foundation/tests/AVCUtils/AVCUtilsUnitTest.cpp b/media/module/foundation/tests/AVCUtils/AVCUtilsUnitTest.cpp
index 77a8599..57ac822 100644
--- a/media/module/foundation/tests/AVCUtils/AVCUtilsUnitTest.cpp
+++ b/media/module/foundation/tests/AVCUtils/AVCUtilsUnitTest.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include <fstream>
+#include <memory>
#include "media/stagefright/foundation/ABitReader.h"
#include "media/stagefright/foundation/avc_utils.h"
@@ -151,10 +152,10 @@
size_t fileSize = buf.st_size;
ASSERT_NE(fileSize, 0) << "Invalid file size found";
- const uint8_t *volBuffer = new uint8_t[fileSize];
+ std::unique_ptr<uint8_t[]> volBuffer(new uint8_t[fileSize]);
ASSERT_NE(volBuffer, nullptr) << "Failed to allocate VOL buffer of size: " << fileSize;
- inputFileStream.read((char *)(volBuffer), fileSize);
+ inputFileStream.read((char *)(volBuffer.get()), fileSize);
ASSERT_EQ(inputFileStream.gcount(), fileSize)
<< "Failed to read complete file, bytes read: " << inputFileStream.gcount();
@@ -163,15 +164,13 @@
int32_t volWidth = -1;
int32_t volHeight = -1;
- bool status = ExtractDimensionsFromVOLHeader(volBuffer, fileSize, &volWidth, &volHeight);
+ bool status = ExtractDimensionsFromVOLHeader(volBuffer.get(), fileSize, &volWidth, &volHeight);
ASSERT_TRUE(status)
<< "Failed to get VOL dimensions from function: ExtractDimensionsFromVOLHeader()";
ASSERT_EQ(volWidth, width) << "Expected width: " << width << "Found: " << volWidth;
ASSERT_EQ(volHeight, height) << "Expected height: " << height << "Found: " << volHeight;
-
- delete[] volBuffer;
}
TEST_P(AVCDimensionTest, DimensionTest) {
@@ -186,7 +185,8 @@
stringLine >> type >> chunkLength;
ASSERT_GT(chunkLength, 0) << "Length of the data chunk must be greater than zero";
- const uint8_t *data = new uint8_t[chunkLength];
+ std::unique_ptr<uint8_t[]> dataArray(new uint8_t[chunkLength]);
+ const uint8_t *data = dataArray.get();
ASSERT_NE(data, nullptr) << "Failed to create a data buffer of size: " << chunkLength;
const uint8_t *nalStart;
@@ -197,9 +197,11 @@
<< "Failed to read complete file, bytes read: " << mInputFileStream.gcount();
size_t smallBufferSize = kSmallBufferSize;
- const uint8_t *sanityData = new uint8_t[smallBufferSize];
+ uint8_t sanityDataBuffer[smallBufferSize];
+ const uint8_t *sanityData = sanityDataBuffer;
memcpy((void *)sanityData, (void *)data, smallBufferSize);
+ // sanityData could be changed, but sanityDataPtr is not and can be cleaned up.
status_t result = getNextNALUnit(&sanityData, &smallBufferSize, &nalStart, &nalSize, true);
ASSERT_EQ(result, -EAGAIN) << "Invalid result found when wrong NAL unit passed";
@@ -221,7 +223,6 @@
ASSERT_EQ(avcHeight, mFrameHeight)
<< "Expected height: " << mFrameHeight << "Found: " << avcHeight;
}
- delete[] data;
}
if (mNalUnitsExpected < 0) {
ASSERT_GT(numNalUnits, 0) << "Failed to find an NAL Unit";
@@ -251,7 +252,8 @@
accessUnitLength += chunkLength;
if (!type.compare("SPS")) {
- const uint8_t *data = new uint8_t[chunkLength];
+ std::unique_ptr<uint8_t[]> dataArray(new uint8_t[chunkLength]);
+ const uint8_t *data = dataArray.get();
ASSERT_NE(data, nullptr) << "Failed to create a data buffer of size: " << chunkLength;
const uint8_t *nalStart;
@@ -271,14 +273,13 @@
profile = nalStart[1];
level = nalStart[3];
}
- delete[] data;
}
}
- const uint8_t *accessUnitData = new uint8_t[accessUnitLength];
+ std::unique_ptr<uint8_t[]> accessUnitData(new uint8_t[accessUnitLength]);
ASSERT_NE(accessUnitData, nullptr) << "Failed to create a buffer of size: " << accessUnitLength;
mInputFileStream.seekg(0, ios::beg);
- mInputFileStream.read((char *)accessUnitData, accessUnitLength);
+ mInputFileStream.read((char *)accessUnitData.get(), accessUnitLength);
ASSERT_EQ(mInputFileStream.gcount(), accessUnitLength)
<< "Failed to read complete file, bytes read: " << mInputFileStream.gcount();
@@ -286,7 +287,7 @@
ASSERT_NE(accessUnit, nullptr)
<< "Failed to create an android data buffer of size: " << accessUnitLength;
- memcpy(accessUnit->data(), accessUnitData, accessUnitLength);
+ memcpy(accessUnit->data(), accessUnitData.get(), accessUnitLength);
sp<ABuffer> csdDataBuffer = MakeAVCCodecSpecificData(accessUnit, &avcWidth, &avcHeight);
ASSERT_NE(csdDataBuffer, nullptr) << "No data returned from MakeAVCCodecSpecificData()";
@@ -306,7 +307,6 @@
ASSERT_EQ(*(csdData + 3), level)
<< "Expected AVC level: " << level << " found: " << *(csdData + 3);
csdDataBuffer.clear();
- delete[] accessUnitData;
accessUnit.clear();
}
@@ -321,32 +321,31 @@
stringLine >> type >> chunkLength >> frameLayerID;
ASSERT_GT(chunkLength, 0) << "Length of the data chunk must be greater than zero";
- char *data = new char[chunkLength];
+ std::unique_ptr<char[]> data(new char[chunkLength]);
ASSERT_NE(data, nullptr) << "Failed to allocation data buffer of size: " << chunkLength;
- mInputFileStream.read(data, chunkLength);
+ mInputFileStream.read(data.get(), chunkLength);
ASSERT_EQ(mInputFileStream.gcount(), chunkLength)
<< "Failed to read complete file, bytes read: " << mInputFileStream.gcount();
if (!type.compare("IDR")) {
- bool isIDR = IsIDR((uint8_t *)data, chunkLength);
+ bool isIDR = IsIDR((uint8_t *)data.get(), chunkLength);
ASSERT_TRUE(isIDR);
- layerID = FindAVCLayerId((uint8_t *)data, chunkLength);
+ layerID = FindAVCLayerId((uint8_t *)data.get(), chunkLength);
ASSERT_EQ(layerID, frameLayerID) << "Wrong layer ID found";
} else if (!type.compare("P") || !type.compare("B")) {
sp<ABuffer> accessUnit = new ABuffer(chunkLength);
ASSERT_NE(accessUnit, nullptr) << "Unable to create access Unit";
- memcpy(accessUnit->data(), data, chunkLength);
+ memcpy(accessUnit->data(), data.get(), chunkLength);
bool isReferenceFrame = IsAVCReferenceFrame(accessUnit);
ASSERT_TRUE(isReferenceFrame);
accessUnit.clear();
- layerID = FindAVCLayerId((uint8_t *)data, chunkLength);
+ layerID = FindAVCLayerId((uint8_t *)data.get(), chunkLength);
ASSERT_EQ(layerID, frameLayerID) << "Wrong layer ID found";
}
- delete[] data;
}
}
diff --git a/media/module/foundation/tests/MetaDataBaseUnitTest.cpp b/media/module/foundation/tests/MetaDataBaseUnitTest.cpp
index 0aed4d2..b562e07 100644
--- a/media/module/foundation/tests/MetaDataBaseUnitTest.cpp
+++ b/media/module/foundation/tests/MetaDataBaseUnitTest.cpp
@@ -19,6 +19,7 @@
#include <string.h>
#include <sys/stat.h>
#include <fstream>
+#include <memory>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MetaDataBase.h>
@@ -48,18 +49,16 @@
class MetaDataBaseUnitTest : public ::testing::Test {};
TEST_F(MetaDataBaseUnitTest, CreateMetaDataBaseTest) {
- MetaDataBase *metaData = new MetaDataBase();
+ std::unique_ptr<MetaDataBase> metaData(new MetaDataBase());
ASSERT_NE(metaData, nullptr) << "Failed to create meta data";
// Testing copy constructor
- MetaDataBase *metaDataCopy = metaData;
+ MetaDataBase *metaDataCopy = metaData.get();
ASSERT_NE(metaDataCopy, nullptr) << "Failed to create meta data copy";
-
- delete metaData;
}
TEST_F(MetaDataBaseUnitTest, SetAndFindDataTest) {
- MetaDataBase *metaData = new MetaDataBase();
+ std::unique_ptr<MetaDataBase> metaData(new MetaDataBase());
ASSERT_NE(metaData, nullptr) << "Failed to create meta data";
// Setting the different key-value pair type for first time, overwrite
@@ -142,12 +141,10 @@
int32_t angle;
status = metaData->findInt32(kKeyRotation, &angle);
ASSERT_FALSE(status) << "Value for an invalid key is returned when the key is not set";
-
- delete (metaData);
}
TEST_F(MetaDataBaseUnitTest, OverWriteFunctionalityTest) {
- MetaDataBase *metaData = new MetaDataBase();
+ std::unique_ptr<MetaDataBase> metaData(new MetaDataBase());
ASSERT_NE(metaData, nullptr) << "Failed to create meta data";
// set/set/read to check first overwrite operation
@@ -186,12 +183,10 @@
status = metaData->findInt32(kKeyHeight, &height);
ASSERT_TRUE(status) << "kKeyHeight key does not exists in metadata";
ASSERT_EQ(height, kHeight3) << "Value of height is not overwritten";
-
- delete (metaData);
}
TEST_F(MetaDataBaseUnitTest, RemoveKeyTest) {
- MetaDataBase *metaData = new MetaDataBase();
+ std::unique_ptr<MetaDataBase> metaData(new MetaDataBase());
ASSERT_NE(metaData, nullptr) << "Failed to create meta data";
bool status = metaData->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_AVC);
@@ -246,12 +241,10 @@
metaData->setCString(kKeyMIMEType, MEDIA_MIMETYPE_VIDEO_HEVC);
ASSERT_FALSE(status) << "Overwrite should be false since the metadata was cleared";
-
- delete (metaData);
}
TEST_F(MetaDataBaseUnitTest, ConvertToStringTest) {
- MetaDataBase *metaData = new MetaDataBase();
+ std::unique_ptr<MetaDataBase> metaData(new MetaDataBase());
ASSERT_NE(metaData, nullptr) << "Failed to create meta data";
String8 info = metaData->toString();
@@ -281,8 +274,6 @@
info = metaData->toString();
ASSERT_EQ(info.length(), 0) << "MetaData length is non-zero after clearing it: "
<< info.length();
-
- delete (metaData);
}
} // namespace android
diff --git a/media/module/metadatautils/TEST_MAPPING b/media/module/metadatautils/TEST_MAPPING
new file mode 100644
index 0000000..21836a5
--- /dev/null
+++ b/media/module/metadatautils/TEST_MAPPING
@@ -0,0 +1,9 @@
+// mappings for frameworks/av/media/module/metadatautils
+{
+ // tests which require dynamic content
+ // invoke with: atest -- --enable-module-dynamic-download=true
+ // TODO(b/148094059): unit tests not allowed to download content
+ "dynamic-presubmit": [
+ { "name": "MetaDataUtilsTest" }
+ ]
+}
diff --git a/media/module/metadatautils/test/MetaDataUtilsTest.cpp b/media/module/metadatautils/test/MetaDataUtilsTest.cpp
index 08c9284..7c35249 100644
--- a/media/module/metadatautils/test/MetaDataUtilsTest.cpp
+++ b/media/module/metadatautils/test/MetaDataUtilsTest.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include <fstream>
+#include <memory>
#include <string>
#include <media/esds/ESDS.h>
@@ -228,7 +229,7 @@
ASSERT_TRUE(status) << "Failed to get the mime type";
ASSERT_STREQ(mimeType, MEDIA_MIMETYPE_VIDEO_AVC);
- MetaDataBase *metaData = new MetaDataBase();
+ std::unique_ptr<MetaDataBase> metaData(new MetaDataBase());
ASSERT_NE(metaData, nullptr) << "Failed to create MetaData Base";
status = MakeAVCCodecSpecificData(*metaData, mInputBuffer, mInputBufferSize);
@@ -264,7 +265,6 @@
int32_t result = memcmp(csdAMediaFormatBuffer, csdMetaDataBaseBuffer, csdAMediaFormatSize);
ASSERT_EQ(result, 0) << "CSD from AMediaFormat and MetaDataBase do not match";
- delete metaData;
AMediaFormat_delete(csdData);
}
@@ -275,7 +275,7 @@
bool status = MakeAVCCodecSpecificData(csdData, mInputBuffer, mInputBufferSize);
ASSERT_FALSE(status) << "MakeAVCCodecSpecificData with AMediaFormat succeeds with invalid data";
- MetaDataBase *metaData = new MetaDataBase();
+ std::unique_ptr<MetaDataBase> metaData(new MetaDataBase());
ASSERT_NE(metaData, nullptr) << "Failed to create MetaData Base";
status = MakeAVCCodecSpecificData(*metaData, mInputBuffer, mInputBufferSize);
@@ -307,7 +307,7 @@
ASSERT_TRUE(status) << "Failed to get the mime type";
ASSERT_STREQ(mimeType, MEDIA_MIMETYPE_AUDIO_AAC);
- MetaDataBase *metaData = new MetaDataBase();
+ std::unique_ptr<MetaDataBase> metaData(new MetaDataBase());
ASSERT_NE(metaData, nullptr) << "Failed to create MetaData Base";
status = MakeAACCodecSpecificData(*metaData, mAacProfile, mAacSamplingFreqIndex,
@@ -356,11 +356,10 @@
ASSERT_EQ(memcmpResult, 0) << "AMediaFormat and MetaDataBase CSDs do not match";
AMediaFormat_delete(csdData);
- delete metaData;
}
TEST_P(AacADTSTest, AacADTSValidationTest) {
- MetaDataBase *metaData = new MetaDataBase();
+ std::unique_ptr<MetaDataBase> metaData(new MetaDataBase());
ASSERT_NE(metaData, nullptr) << "Failed to create meta data";
bool status = MakeAACCodecSpecificData(*metaData, mInputBuffer, kAdtsCsdSize);
@@ -380,12 +379,10 @@
status = metaData->findCString(kKeyMIMEType, &mimeType);
ASSERT_TRUE(status) << "Failed to get mime type";
ASSERT_STREQ(mimeType, MEDIA_MIMETYPE_AUDIO_AAC);
-
- delete metaData;
}
TEST_P(AacCSDValidateTest, AacInvalidInputTest) {
- MetaDataBase *metaData = new MetaDataBase();
+ std::unique_ptr<MetaDataBase> metaData(new MetaDataBase());
ASSERT_NE(metaData, nullptr) << "Failed to create meta data";
bool status = MakeAACCodecSpecificData(*metaData, mInputBuffer, kAdtsCsdSize);
@@ -412,14 +409,14 @@
istringstream dataStringLine(dataLine);
dataStringLine >> comment;
- char *buffer = strndup(comment.c_str(), commentLength);
+ std::unique_ptr<char[]> buffer(new char[commentLength]);
ASSERT_NE(buffer, nullptr) << "Failed to allocate buffer of size: " << commentLength;
+ strncpy(buffer.get(), comment.c_str(), commentLength);
AMediaFormat *fileMeta = AMediaFormat_new();
ASSERT_NE(fileMeta, nullptr) << "Failed to create AMedia format";
- parseVorbisComment(fileMeta, buffer, commentLength);
- free(buffer);
+ parseVorbisComment(fileMeta, buffer.get(), commentLength);
if (!strncasecmp(tag.c_str(), "ANDROID_HAPTIC", sizeof(tag))) {
int32_t numChannelExpected = stoi(value);
diff --git a/media/mtp/tests/MtpFuzzer/mtp_property_fuzzer.cpp b/media/mtp/tests/MtpFuzzer/mtp_property_fuzzer.cpp
index 8577e62..b4e659c 100644
--- a/media/mtp/tests/MtpFuzzer/mtp_property_fuzzer.cpp
+++ b/media/mtp/tests/MtpFuzzer/mtp_property_fuzzer.cpp
@@ -33,7 +33,11 @@
class MtpPropertyFuzzer : MtpPacketFuzzerUtils {
public:
- MtpPropertyFuzzer(const uint8_t* data, size_t size) : mFdp(data, size){};
+ MtpPropertyFuzzer(const uint8_t* data, size_t size) : mFdp(data, size) {
+ mUsbDevFsUrb = (struct usbdevfs_urb*)malloc(sizeof(struct usbdevfs_urb) +
+ sizeof(struct usbdevfs_iso_packet_desc));
+ };
+ ~MtpPropertyFuzzer() { free(mUsbDevFsUrb); };
void process();
private:
@@ -41,7 +45,7 @@
};
void MtpPropertyFuzzer::process() {
- MtpProperty* mtpProperty;
+ MtpProperty* mtpProperty = nullptr;
if (mFdp.ConsumeBool()) {
mtpProperty = new MtpProperty();
} else {
@@ -75,6 +79,7 @@
fillFilePath(&mFdp);
int32_t fd = memfd_create(mPath.c_str(), MFD_ALLOW_SEALING);
fillUsbRequest(fd, &mFdp);
+ mUsbRequest.dev = usb_device_new(mPath.c_str(), fd);
mtpDataPacket.write(&mUsbRequest,
mFdp.PickValueInArray<UrbPacketDivisionMode>(
kUrbPacketDivisionModes),
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 12a0d53..c46a692 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -247,6 +247,13 @@
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
*pixelStride = (planeIdx == 0) ? 1 : 2;
return AMEDIA_OK;
+ case HAL_PIXEL_FORMAT_YCBCR_P010:
+ if (mLockedBuffer->dataCb && mLockedBuffer->dataCr) {
+ *pixelStride = (planeIdx == 0) ? 2 : mLockedBuffer->chromaStep;
+ } else {
+ *pixelStride = (planeIdx == 0) ? 2 : 4;
+ }
+ return AMEDIA_OK;
case HAL_PIXEL_FORMAT_Y8:
*pixelStride = 1;
return AMEDIA_OK;
@@ -316,6 +323,13 @@
*rowStride = (planeIdx == 0) ? mLockedBuffer->stride
: ALIGN(mLockedBuffer->stride / 2, 16);
return AMEDIA_OK;
+ case HAL_PIXEL_FORMAT_YCBCR_P010:
+ if (mLockedBuffer->dataCb && mLockedBuffer->dataCr) {
+ *rowStride = (planeIdx == 0) ? mLockedBuffer->stride : mLockedBuffer->chromaStride;
+ } else {
+ *rowStride = mLockedBuffer->stride * 2;
+ }
+ return AMEDIA_OK;
case HAL_PIXEL_FORMAT_RAW10:
case HAL_PIXEL_FORMAT_RAW12:
// RAW10 and RAW12 are used for 10-bit and 12-bit raw data, they are single plane
@@ -473,6 +487,47 @@
: (planeIdx == 1) ? cb : cr;
dataSize = (planeIdx == 0) ? ySize : cSize;
break;
+ case HAL_PIXEL_FORMAT_YCBCR_P010:
+ if (mLockedBuffer->height % 2 != 0) {
+ ALOGE("YCBCR_P010: height (%d) should be a multiple of 2", mLockedBuffer->height);
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
+ if (mLockedBuffer->width <= 0) {
+ ALOGE("YCBCR_P010: width (%d) should be a > 0", mLockedBuffer->width);
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
+ if (mLockedBuffer->height <= 0) {
+ ALOGE("YCBCR_P010: height (%d) should be a > 0", mLockedBuffer->height);
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
+ if (mLockedBuffer->dataCb && mLockedBuffer->dataCr) {
+ pData = (planeIdx == 0) ? mLockedBuffer->data :
+ (planeIdx == 1) ? mLockedBuffer->dataCb : mLockedBuffer->dataCr;
+ // only map until last pixel
+ if (planeIdx == 0) {
+ cStride = mLockedBuffer->stride;
+ dataSize = cStride * (mLockedBuffer->height - 1) + mLockedBuffer->width * 2;
+ } else {
+ bytesPerPixel = mLockedBuffer->chromaStep;
+ cStride = mLockedBuffer->chromaStride;
+ dataSize = cStride * (mLockedBuffer->height / 2 - 1) +
+ bytesPerPixel * (mLockedBuffer->width / 2);
+ }
+ break;
+ }
+
+ cStride = mLockedBuffer->stride * 2;
+ ySize = cStride * mLockedBuffer->height;
+ cSize = ySize / 2;
+ cb = mLockedBuffer->data + ySize;
+ cr = cb + 2;
+
+ pData = (planeIdx == 0) ? mLockedBuffer->data : (planeIdx == 1) ? cb : cr;
+ dataSize = (planeIdx == 0) ? ySize : cSize;
+ break;
case HAL_PIXEL_FORMAT_Y8:
// Single plane, 8bpp.
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index 1067e24..9270499 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -73,6 +73,7 @@
case AIMAGE_FORMAT_HEIC:
case AIMAGE_FORMAT_DEPTH_JPEG:
case AIMAGE_FORMAT_RAW_DEPTH10:
+ case HAL_PIXEL_FORMAT_YCBCR_P010:
return true;
case AIMAGE_FORMAT_PRIVATE:
// For private format, cpu usage is prohibited.
@@ -86,6 +87,7 @@
AImageReader::getNumPlanesForFormat(int32_t format) {
switch (format) {
case AIMAGE_FORMAT_YUV_420_888:
+ case HAL_PIXEL_FORMAT_YCBCR_P010:
return 3;
case AIMAGE_FORMAT_RGBA_8888:
case AIMAGE_FORMAT_RGBX_8888:
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 38e422d..ed31c02 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <charconv>
#include <inttypes.h>
#include <mutex>
#include <set>
@@ -324,29 +325,61 @@
}
AMessage::Type type;
- int64_t mediaTimeUs, systemNano;
- size_t index = 0;
+ size_t n = data->countEntries();
- // TODO. This code has dependency with MediaCodec::CreateFramesRenderedMessage.
- for (size_t ix = 0; ix < data->countEntries(); ix++) {
- AString name = data->getEntryNameAt(ix, &type);
- if (name.startsWith(AStringPrintf("%zu-media-time-us", index).c_str())) {
- AMessage::ItemData data = msg->getEntryAt(index);
- data.find(&mediaTimeUs);
- } else if (name.startsWith(AStringPrintf("%zu-system-nano", index).c_str())) {
- AMessage::ItemData data = msg->getEntryAt(index);
- data.find(&systemNano);
+ thread_local std::vector<std::optional<int64_t>> mediaTimesInUs;
+ thread_local std::vector<std::optional<int64_t>> systemTimesInNs;
+ mediaTimesInUs.resize(n);
+ systemTimesInNs.resize(n);
+ std::fill_n(mediaTimesInUs.begin(), n, std::nullopt);
+ std::fill_n(systemTimesInNs.begin(), n, std::nullopt);
+ for (size_t i = 0; i < n; i++) {
+ AString name = data->getEntryNameAt(i, &type);
+ if (name.endsWith("-media-time-us")) {
+ int64_t mediaTimeUs;
+ AMessage::ItemData itemData = data->getEntryAt(i);
+ itemData.find(&mediaTimeUs);
- Mutex::Autolock _l(mCodec->mFrameRenderedCallbackLock);
- if (mCodec->mFrameRenderedCallback != NULL) {
+ int index = -1;
+ std::from_chars_result result = std::from_chars(
+ name.c_str(), name.c_str() + name.find("-"), index);
+ if (result.ec == std::errc() && 0 <= index && index < n) {
+ mediaTimesInUs[index] = mediaTimeUs;
+ } else {
+ std::error_code ec = std::make_error_code(result.ec);
+ ALOGE("Unexpected media time index: #%d with value %lldus (err=%d %s)",
+ index, (long long)mediaTimeUs, ec.value(), ec.message().c_str());
+ }
+ } else if (name.endsWith("-system-nano")) {
+ int64_t systemNano;
+ AMessage::ItemData itemData = data->getEntryAt(i);
+ itemData.find(&systemNano);
+
+ int index = -1;
+ std::from_chars_result result = std::from_chars(
+ name.c_str(), name.c_str() + name.find("-"), index);
+ if (result.ec == std::errc() && 0 <= index && index < n) {
+ systemTimesInNs[index] = systemNano;
+ } else {
+ std::error_code ec = std::make_error_code(result.ec);
+ ALOGE("Unexpected system time index: #%d with value %lldns (err=%d %s)",
+ index, (long long)systemNano, ec.value(), ec.message().c_str());
+ }
+ }
+ }
+
+ Mutex::Autolock _l(mCodec->mFrameRenderedCallbackLock);
+ if (mCodec->mFrameRenderedCallback != NULL) {
+ for (size_t i = 0; i < n; ++i) {
+ if (mediaTimesInUs[i] && systemTimesInNs[i]) {
mCodec->mFrameRenderedCallback(
mCodec,
mCodec->mFrameRenderedCallbackUserData,
- mediaTimeUs,
- systemNano);
+ mediaTimesInUs[i].value(),
+ systemTimesInNs[i].value());
+ } else {
+ break;
}
-
- index++;
}
}
break;
@@ -529,22 +562,31 @@
AMediaCodecOnAsyncNotifyCallback callback,
void *userdata) {
- Mutex::Autolock _l(mData->mAsyncCallbackLock);
-
- if (mData->mAsyncNotify == NULL) {
- mData->mAsyncNotify = new AMessage(kWhatAsyncNotify, mData->mHandler);
+ {
+ Mutex::Autolock _l(mData->mAsyncCallbackLock);
+ if (mData->mAsyncNotify == NULL) {
+ mData->mAsyncNotify = new AMessage(kWhatAsyncNotify, mData->mHandler);
+ }
+ // we set this ahead so that we can be ready
+ // to receive callbacks as soon as the next call is a
+ // success.
+ mData->mAsyncCallback = callback;
+ mData->mAsyncCallbackUserData = userdata;
}
// always call, codec may have been reset/re-configured since last call.
status_t err = mData->mCodec->setCallback(mData->mAsyncNotify);
if (err != OK) {
+ {
+ //The setup gone wrong. clean up the pointers.
+ Mutex::Autolock _l(mData->mAsyncCallbackLock);
+ mData->mAsyncCallback = {};
+ mData->mAsyncCallbackUserData = nullptr;
+ }
ALOGE("setAsyncNotifyCallback: err(%d), failed to set async callback", err);
return translate_error(err);
}
- mData->mAsyncCallback = callback;
- mData->mAsyncCallbackUserData = userdata;
-
return AMEDIA_OK;
}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeDecoder.cpp b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeDecoder.cpp
index 043bc9e..a0628fa 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeDecoder.cpp
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeDecoder.cpp
@@ -19,6 +19,7 @@
#include <jni.h>
#include <fstream>
+#include <memory>
#include <stdio.h>
#include <string.h>
#include <sys/stat.h>
@@ -42,7 +43,7 @@
return -1;
}
- Decoder *decoder = new Decoder();
+ std::unique_ptr<Decoder> decoder(new (std::nothrow) Decoder());
Extractor *extractor = decoder->getExtractor();
if (!extractor) {
ALOGE("Extractor creation failed");
@@ -125,6 +126,5 @@
inputFp = nullptr;
}
extractor->deInitExtractor();
- delete decoder;
return 0;
}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeMuxer.cpp b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeMuxer.cpp
index a5ef5b8..a297526 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeMuxer.cpp
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/cpp/NativeMuxer.cpp
@@ -19,6 +19,7 @@
#include <jni.h>
#include <fstream>
+#include <memory>
#include <string>
#include <sys/stat.h>
@@ -47,7 +48,7 @@
return MUXER_OUTPUT_FORMAT_INVALID;
}
- Muxer *muxerObj = new Muxer();
+ std::unique_ptr<Muxer> muxerObj(new (std::nothrow) Muxer());
Extractor *extractor = muxerObj->getExtractor();
if (!extractor) {
ALOGE("Extractor creation failed");
@@ -159,7 +160,6 @@
}
env->ReleaseStringUTFChars(jFormat, fmt);
extractor->deInitExtractor();
- delete muxerObj;
return 0;
}
diff --git a/media/tests/benchmark/src/native/encoder/C2Encoder.cpp b/media/tests/benchmark/src/native/encoder/C2Encoder.cpp
index ca79881..5b7a471 100644
--- a/media/tests/benchmark/src/native/encoder/C2Encoder.cpp
+++ b/media/tests/benchmark/src/native/encoder/C2Encoder.cpp
@@ -17,11 +17,13 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "C2Encoder"
+#include <memory>
+
#include "C2Encoder.h"
int32_t C2Encoder::createCodec2Component(string compName, AMediaFormat *format) {
ALOGV("In %s", __func__);
- mListener.reset(new CodecListener(
+ mListener.reset(new (std::nothrow) CodecListener(
[this](std::list<std::unique_ptr<C2Work>> &workItems) { handleWorkDone(workItems); }));
if (!mListener) return -1;
@@ -125,7 +127,7 @@
}
int32_t numFrames = (inputBufferSize + frameSize - 1) / frameSize;
// Temporary buffer to read data from the input file
- char *data = (char *)malloc(frameSize);
+ std::unique_ptr<char[]> data(new (std::nothrow) char[frameSize]);
if (!data) {
ALOGE("Insufficient memory to read from input file");
return -1;
@@ -169,7 +171,7 @@
if (inputBufferSize - offset < frameSize) {
frameSize = inputBufferSize - offset;
}
- eleStream.read(data, frameSize);
+ eleStream.read(data.get(), frameSize);
if (eleStream.gcount() != frameSize) {
ALOGE("read() from file failed. Incorrect bytes read");
return -1;
@@ -191,8 +193,8 @@
return view.error();
}
- memcpy(view.base(), data, frameSize);
- work->input.buffers.emplace_back(new LinearBuffer(block));
+ memcpy(view.base(), data.get(), frameSize);
+ work->input.buffers.emplace_back(new (std::nothrow) LinearBuffer(block));
} else {
std::shared_ptr<C2GraphicBlock> block;
status = mGraphicPool->fetchGraphicBlock(
@@ -211,16 +213,16 @@
uint8_t *pY = view.data()[C2PlanarLayout::PLANE_Y];
uint8_t *pU = view.data()[C2PlanarLayout::PLANE_U];
uint8_t *pV = view.data()[C2PlanarLayout::PLANE_V];
- memcpy(pY, data, mWidth * mHeight);
- memcpy(pU, data + mWidth * mHeight, (mWidth * mHeight >> 2));
- memcpy(pV, data + (mWidth * mHeight * 5 >> 2), mWidth * mHeight >> 2);
- work->input.buffers.emplace_back(new GraphicBuffer(block));
+ memcpy(pY, data.get(), mWidth * mHeight);
+ memcpy(pU, data.get() + mWidth * mHeight, (mWidth * mHeight >> 2));
+ memcpy(pV, data.get() + (mWidth * mHeight * 5 >> 2), mWidth * mHeight >> 2);
+ work->input.buffers.emplace_back(new (std::nothrow) GraphicBuffer(block));
}
mStats->addFrameSize(frameSize);
}
work->worklets.clear();
- work->worklets.emplace_back(new C2Worklet);
+ work->worklets.emplace_back(new (std::nothrow) C2Worklet);
std::list<std::unique_ptr<C2Work>> items;
items.push_back(std::move(work));
@@ -234,7 +236,6 @@
numFrames--;
mNumInputFrame++;
}
- free(data);
return status;
}
diff --git a/media/tests/benchmark/tests/C2DecoderTest.cpp b/media/tests/benchmark/tests/C2DecoderTest.cpp
index 85dcbc1..6e4d9e1 100644
--- a/media/tests/benchmark/tests/C2DecoderTest.cpp
+++ b/media/tests/benchmark/tests/C2DecoderTest.cpp
@@ -20,6 +20,7 @@
#include <fstream>
#include <iostream>
#include <limits>
+#include <memory>
#include "BenchmarkTestEnvironment.h"
#include "C2Decoder.h"
@@ -50,7 +51,7 @@
};
void C2DecoderTest::setupC2DecoderTest() {
- mDecoder = new C2Decoder();
+ mDecoder = new (std::nothrow) C2Decoder();
ASSERT_NE(mDecoder, nullptr) << "C2Decoder creation failed";
int32_t status = mDecoder->setupCodec2();
@@ -66,7 +67,7 @@
FILE *inputFp = fopen(inputFile.c_str(), "rb");
ASSERT_NE(inputFp, nullptr) << "Unable to open " << inputFile << " file for reading";
- Extractor *extractor = new Extractor();
+ std::unique_ptr<Extractor> extractor(new (std::nothrow) Extractor());
ASSERT_NE(extractor, nullptr) << "Extractor creation failed";
// Read file properties
@@ -85,7 +86,7 @@
int32_t status = extractor->setupTrackFormat(curTrack);
ASSERT_EQ(status, 0) << "Track Format invalid";
- uint8_t *inputBuffer = (uint8_t *)malloc(fileSize);
+ std::unique_ptr<uint8_t[]> inputBuffer(new (std::nothrow) uint8_t[fileSize]);
ASSERT_NE(inputBuffer, nullptr) << "Insufficient memory";
vector<AMediaCodecBufferInfo> frameInfo;
@@ -100,7 +101,7 @@
// copy the meta data and buffer to be passed to decoder
ASSERT_LE(inputBufferOffset + info.size, fileSize) << "Memory allocated not sufficient";
- memcpy(inputBuffer + inputBufferOffset, csdBuffer, info.size);
+ memcpy(inputBuffer.get() + inputBufferOffset, csdBuffer, info.size);
frameInfo.push_back(info);
inputBufferOffset += info.size;
idx++;
@@ -113,7 +114,7 @@
// copy the meta data and buffer to be passed to decoder
ASSERT_LE(inputBufferOffset + info.size, fileSize) << "Memory allocated not sufficient";
- memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+ memcpy(inputBuffer.get() + inputBufferOffset, extractor->getFrameBuf(), info.size);
frameInfo.push_back(info);
inputBufferOffset += info.size;
}
@@ -127,7 +128,7 @@
ASSERT_EQ(status, 0) << "Create component failed for " << codecName;
// Send the inputs to C2 Decoder and wait till all buffers are returned.
- status = mDecoder->decodeFrames(inputBuffer, frameInfo);
+ status = mDecoder->decodeFrames(inputBuffer.get(), frameInfo);
ASSERT_EQ(status, 0) << "Decoder failed for " << codecName;
mDecoder->waitOnInputConsumption();
@@ -141,10 +142,8 @@
mDecoder->resetDecoder();
}
}
- free(inputBuffer);
fclose(inputFp);
extractor->deInitExtractor();
- delete extractor;
delete mDecoder;
mDecoder = nullptr;
}
@@ -174,7 +173,7 @@
make_pair("crowd_1920x1080_25fps_4000kbps_h265.mkv", "hevc")));
int main(int argc, char **argv) {
- gEnv = new BenchmarkTestEnvironment();
+ gEnv = new (std::nothrow) BenchmarkTestEnvironment();
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
int status = gEnv->initFromOptions(argc, argv);
diff --git a/media/tests/benchmark/tests/C2EncoderTest.cpp b/media/tests/benchmark/tests/C2EncoderTest.cpp
index b18d856..dfbe496 100644
--- a/media/tests/benchmark/tests/C2EncoderTest.cpp
+++ b/media/tests/benchmark/tests/C2EncoderTest.cpp
@@ -20,6 +20,7 @@
#include <fstream>
#include <iostream>
#include <limits>
+#include <memory>
#include "BenchmarkTestEnvironment.h"
#include "C2Encoder.h"
@@ -50,7 +51,7 @@
};
void C2EncoderTest::setupC2EncoderTest() {
- mEncoder = new C2Encoder();
+ mEncoder = new (std::nothrow) C2Encoder();
ASSERT_NE(mEncoder, nullptr) << "C2Encoder creation failed";
int32_t status = mEncoder->setupCodec2();
@@ -66,7 +67,7 @@
FILE *inputFp = fopen(inputFile.c_str(), "rb");
ASSERT_NE(inputFp, nullptr) << "Unable to open input file for reading";
- Decoder *decoder = new Decoder();
+ std::unique_ptr<Decoder> decoder(new (std::nothrow) Decoder());
ASSERT_NE(decoder, nullptr) << "Decoder creation failed";
Extractor *extractor = decoder->getExtractor();
@@ -88,7 +89,7 @@
int32_t status = extractor->setupTrackFormat(curTrack);
ASSERT_EQ(status, 0) << "Track Format invalid";
- uint8_t *inputBuffer = (uint8_t *)malloc(fileSize);
+ std::unique_ptr<uint8_t[]> inputBuffer(new (std::nothrow) uint8_t[fileSize]);
ASSERT_NE(inputBuffer, nullptr) << "Insufficient memory";
vector<AMediaCodecBufferInfo> frameInfo;
@@ -102,7 +103,7 @@
// copy the meta data and buffer to be passed to decoder
ASSERT_LE(inputBufferOffset + info.size, fileSize) << "Memory allocated not sufficient";
- memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+ memcpy(inputBuffer.get() + inputBufferOffset, extractor->getFrameBuf(), info.size);
frameInfo.push_back(info);
inputBufferOffset += info.size;
}
@@ -114,7 +115,8 @@
<< " for dumping decoder's output";
decoder->setupDecoder();
- status = decoder->decode(inputBuffer, frameInfo, decName, false /*asyncMode */, outFp);
+ status = decoder->decode(inputBuffer.get(), frameInfo, decName,
+ false /*asyncMode */, outFp);
ASSERT_EQ(status, AMEDIA_OK) << "Decode returned error : " << status;
// Encode the given input stream for all C2 codecs supported by device
@@ -149,11 +151,9 @@
// Destroy the decoder for the given input
decoder->deInitCodec();
decoder->resetDecoder();
- free(inputBuffer);
}
fclose(inputFp);
extractor->deInitExtractor();
- delete decoder;
delete mEncoder;
mEncoder = nullptr;
}
@@ -176,7 +176,7 @@
make_pair("crowd_1920x1080_25fps_4000kbps_h265.mkv", "hevc")));
int main(int argc, char **argv) {
- gEnv = new BenchmarkTestEnvironment();
+ gEnv = new (std::nothrow) BenchmarkTestEnvironment();
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
int status = gEnv->initFromOptions(argc, argv);
diff --git a/media/tests/benchmark/tests/DecoderTest.cpp b/media/tests/benchmark/tests/DecoderTest.cpp
index 3666724..b363df3 100644
--- a/media/tests/benchmark/tests/DecoderTest.cpp
+++ b/media/tests/benchmark/tests/DecoderTest.cpp
@@ -20,6 +20,7 @@
#include <fstream>
#include <iostream>
#include <limits>
+#include <memory>
#include <android/binder_process.h>
@@ -38,7 +39,7 @@
FILE *inputFp = fopen(inputFile.c_str(), "rb");
ASSERT_NE(inputFp, nullptr) << "Unable to open " << inputFile << " file for reading";
- Decoder *decoder = new Decoder();
+ std::unique_ptr<Decoder> decoder(new (std::nothrow) Decoder());
ASSERT_NE(decoder, nullptr) << "Decoder creation failed";
Extractor *extractor = decoder->getExtractor();
@@ -57,7 +58,7 @@
int32_t status = extractor->setupTrackFormat(curTrack);
ASSERT_EQ(status, 0) << "Track Format invalid";
- uint8_t *inputBuffer = (uint8_t *)malloc(kMaxBufferSize);
+ std::unique_ptr<uint8_t[]> inputBuffer(new (std::nothrow) uint8_t[kMaxBufferSize]);
ASSERT_NE(inputBuffer, nullptr) << "Insufficient memory";
vector<AMediaCodecBufferInfo> frameInfo;
@@ -72,7 +73,7 @@
ASSERT_LE(inputBufferOffset + info.size, kMaxBufferSize)
<< "Memory allocated not sufficient";
- memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+ memcpy(inputBuffer.get() + inputBufferOffset, extractor->getFrameBuf(), info.size);
frameInfo.push_back(info);
inputBufferOffset += info.size;
}
@@ -80,7 +81,7 @@
string codecName = get<1>(params);
bool asyncMode = get<2>(params);
decoder->setupDecoder();
- status = decoder->decode(inputBuffer, frameInfo, codecName, asyncMode);
+ status = decoder->decode(inputBuffer.get(), frameInfo, codecName, asyncMode);
ASSERT_EQ(status, AMEDIA_OK) << "Decoder failed for " << codecName;
decoder->deInitCodec();
@@ -88,12 +89,10 @@
string inputReference = get<0>(params);
decoder->dumpStatistics(inputReference, codecName, (asyncMode ? "async" : "sync"),
gEnv->getStatsFile());
- free(inputBuffer);
decoder->resetDecoder();
}
fclose(inputFp);
extractor->deInitExtractor();
- delete decoder;
}
// TODO: (b/140549596)
@@ -178,7 +177,7 @@
int main(int argc, char **argv) {
ABinderProcess_startThreadPool();
- gEnv = new BenchmarkTestEnvironment();
+ gEnv = new (std::nothrow) BenchmarkTestEnvironment();
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
int status = gEnv->initFromOptions(argc, argv);
@@ -190,4 +189,4 @@
ALOGV("Decoder Test result = %d\n", status);
}
return status;
-}
\ No newline at end of file
+}
diff --git a/media/tests/benchmark/tests/EncoderTest.cpp b/media/tests/benchmark/tests/EncoderTest.cpp
index 7e1681d..6703b99 100644
--- a/media/tests/benchmark/tests/EncoderTest.cpp
+++ b/media/tests/benchmark/tests/EncoderTest.cpp
@@ -18,6 +18,7 @@
#define LOG_TAG "encoderTest"
#include <fstream>
+#include <memory>
#include "BenchmarkTestEnvironment.h"
#include "Decoder.h"
@@ -39,7 +40,7 @@
FILE *inputFp = fopen(inputFile.c_str(), "rb");
ASSERT_NE(inputFp, nullptr) << "Unable to open " << inputFile << " file for reading";
- Decoder *decoder = new Decoder();
+ std::unique_ptr<Decoder> decoder(new (std::nothrow) Decoder());
ASSERT_NE(decoder, nullptr) << "Decoder creation failed";
Extractor *extractor = decoder->getExtractor();
@@ -54,14 +55,14 @@
int32_t trackCount = extractor->initExtractor(fd, fileSize);
ASSERT_GT(trackCount, 0) << "initExtractor failed";
- Encoder *encoder = new Encoder();
+ std::unique_ptr<Encoder> encoder(new (std::nothrow) Encoder());
ASSERT_NE(encoder, nullptr) << "Decoder creation failed";
for (int curTrack = 0; curTrack < trackCount; curTrack++) {
int32_t status = extractor->setupTrackFormat(curTrack);
ASSERT_EQ(status, 0) << "Track Format invalid";
- uint8_t *inputBuffer = (uint8_t *)malloc(kMaxBufferSize);
+ std::unique_ptr<uint8_t[]> inputBuffer(new (std::nothrow) uint8_t[kMaxBufferSize]);
ASSERT_NE(inputBuffer, nullptr) << "Insufficient memory";
vector<AMediaCodecBufferInfo> frameInfo;
@@ -76,7 +77,7 @@
ASSERT_LE(inputBufferOffset + info.size, kMaxBufferSize)
<< "Memory allocated not sufficient";
- memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+ memcpy(inputBuffer.get() + inputBufferOffset, extractor->getFrameBuf(), info.size);
frameInfo.push_back(info);
inputBufferOffset += info.size;
}
@@ -88,7 +89,7 @@
<< " for dumping decoder's output";
decoder->setupDecoder();
- status = decoder->decode(inputBuffer, frameInfo, decName, false /*asyncMode */, outFp);
+ status = decoder->decode(inputBuffer.get(), frameInfo, decName, false /*asyncMode */, outFp);
ASSERT_EQ(status, AMEDIA_OK) << "Decode returned error : " << status;
AMediaFormat *decoderFormat = decoder->getFormat();
@@ -154,13 +155,10 @@
}
encoder->resetEncoder();
decoder->deInitCodec();
- free(inputBuffer);
decoder->resetDecoder();
}
- delete encoder;
fclose(inputFp);
extractor->deInitExtractor();
- delete decoder;
}
INSTANTIATE_TEST_SUITE_P(
@@ -220,7 +218,7 @@
"c2.android.hevc.encoder", true)));
int main(int argc, char **argv) {
- gEnv = new BenchmarkTestEnvironment();
+ gEnv = new (std::nothrow) BenchmarkTestEnvironment();
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
int status = gEnv->initFromOptions(argc, argv);
diff --git a/media/tests/benchmark/tests/ExtractorTest.cpp b/media/tests/benchmark/tests/ExtractorTest.cpp
index 27ee9ba..35fb465 100644
--- a/media/tests/benchmark/tests/ExtractorTest.cpp
+++ b/media/tests/benchmark/tests/ExtractorTest.cpp
@@ -17,6 +17,8 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "extractorTest"
+#include <memory>
+
#include <gtest/gtest.h>
#include <android/binder_process.h>
@@ -29,7 +31,7 @@
class ExtractorTest : public ::testing::TestWithParam<pair<string, int32_t>> {};
TEST_P(ExtractorTest, Extract) {
- Extractor *extractObj = new Extractor();
+ std::unique_ptr<Extractor> extractObj(new (std::nothrow) Extractor());
ASSERT_NE(extractObj, nullptr) << "Extractor creation failed";
string inputFile = gEnv->getRes() + GetParam().first;
@@ -53,7 +55,6 @@
extractObj->dumpStatistics(GetParam().first, "", gEnv->getStatsFile());
fclose(inputFp);
- delete extractObj;
}
INSTANTIATE_TEST_SUITE_P(ExtractorTestAll, ExtractorTest,
@@ -76,7 +77,7 @@
int main(int argc, char **argv) {
ABinderProcess_startThreadPool();
- gEnv = new BenchmarkTestEnvironment();
+ gEnv = new (std::nothrow) BenchmarkTestEnvironment();
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
int status = gEnv->initFromOptions(argc, argv);
diff --git a/media/tests/benchmark/tests/MuxerTest.cpp b/media/tests/benchmark/tests/MuxerTest.cpp
index 991644b..65a3df4 100644
--- a/media/tests/benchmark/tests/MuxerTest.cpp
+++ b/media/tests/benchmark/tests/MuxerTest.cpp
@@ -20,6 +20,7 @@
#include <fstream>
#include <iostream>
+#include <memory>
#include "BenchmarkTestEnvironment.h"
#include "Muxer.h"
@@ -59,7 +60,7 @@
MUXER_OUTPUT_T outputFormat = getMuxerOutFormat(fmt);
ASSERT_NE(outputFormat, MUXER_OUTPUT_FORMAT_INVALID) << "Invalid muxer output format";
- Muxer *muxerObj = new Muxer();
+ std::unique_ptr<Muxer> muxerObj(new (std::nothrow) Muxer());
ASSERT_NE(muxerObj, nullptr) << "Muxer creation failed";
Extractor *extractor = muxerObj->getExtractor();
@@ -78,7 +79,7 @@
int32_t status = extractor->setupTrackFormat(curTrack);
ASSERT_EQ(status, 0) << "Track Format invalid";
- uint8_t *inputBuffer = (uint8_t *)malloc(kMaxBufferSize);
+ std::unique_ptr<uint8_t[]> inputBuffer(new (std::nothrow) uint8_t[kMaxBufferSize]);
ASSERT_NE(inputBuffer, nullptr) << "Insufficient memory";
// AMediaCodecBufferInfo : <size of frame> <flags> <presentationTimeUs> <offset>
@@ -94,7 +95,7 @@
ASSERT_LE(inputBufferOffset + info.size, kMaxBufferSize)
<< "Memory allocated not sufficient";
- memcpy(inputBuffer + inputBufferOffset, extractor->getFrameBuf(), info.size);
+ memcpy(inputBuffer.get() + inputBufferOffset, extractor->getFrameBuf(), info.size);
info.offset = inputBufferOffset;
frameInfos.push_back(info);
inputBufferOffset += info.size;
@@ -109,18 +110,16 @@
status = muxerObj->initMuxer(fd, outputFormat);
ASSERT_EQ(status, 0) << "initMuxer failed";
- status = muxerObj->mux(inputBuffer, frameInfos);
+ status = muxerObj->mux(inputBuffer.get(), frameInfos);
ASSERT_EQ(status, 0) << "Mux failed";
muxerObj->deInitMuxer();
muxerObj->dumpStatistics(GetParam().first + "." + fmt.c_str(), fmt, gEnv->getStatsFile());
- free(inputBuffer);
fclose(outputFp);
muxerObj->resetMuxer();
}
fclose(inputFp);
extractor->deInitExtractor();
- delete muxerObj;
}
INSTANTIATE_TEST_SUITE_P(
@@ -146,7 +145,7 @@
make_pair("bbb_16000hz_1ch_9kbps_amrwb_5mins.3gp", "3gpp")));
int main(int argc, char **argv) {
- gEnv = new BenchmarkTestEnvironment();
+ gEnv = new (std::nothrow) BenchmarkTestEnvironment();
::testing::AddGlobalTestEnvironment(gEnv);
::testing::InitGoogleTest(&argc, argv);
int status = gEnv->initFromOptions(argc, argv);
diff --git a/media/utils/MethodStatistics.cpp b/media/utils/MethodStatistics.cpp
index b179b20..086757b 100644
--- a/media/utils/MethodStatistics.cpp
+++ b/media/utils/MethodStatistics.cpp
@@ -22,7 +22,8 @@
std::shared_ptr<std::vector<std::string>>
getStatisticsClassesForModule(std::string_view moduleName) {
- static const std::map<std::string, std::shared_ptr<std::vector<std::string>>> m {
+ static const std::map<std::string, std::shared_ptr<std::vector<std::string>>,
+ std::less<> /* transparent comparator */> m {
{
METHOD_STATISTICS_MODULE_NAME_AUDIO_HIDL,
std::shared_ptr<std::vector<std::string>>(
@@ -34,13 +35,14 @@
})
},
};
- auto it = m.find({moduleName.begin(), moduleName.end()});
+ auto it = m.find(moduleName);
if (it == m.end()) return {};
return it->second;
}
static void addClassesToMap(const std::shared_ptr<std::vector<std::string>> &classNames,
- std::map<std::string, std::shared_ptr<MethodStatistics<std::string>>> &map) {
+ std::map<std::string, std::shared_ptr<MethodStatistics<std::string>>,
+ std::less<> /* transparent comparator */> &map) {
if (classNames) {
for (const auto& className : *classNames) {
map.emplace(className, std::make_shared<MethodStatistics<std::string>>());
@@ -51,17 +53,18 @@
// singleton statistics for DeviceHalHidl StreamOutHalHidl StreamInHalHidl
std::shared_ptr<MethodStatistics<std::string>>
getStatisticsForClass(std::string_view className) {
- static const std::map<std::string, std::shared_ptr<MethodStatistics<std::string>>> m =
+ static const std::map<std::string, std::shared_ptr<MethodStatistics<std::string>>,
+ std::less<> /* transparent comparator */> m =
// copy elided initialization of map m.
[](){
- std::map<std::string, std::shared_ptr<MethodStatistics<std::string>>> m;
+ std::map<std::string, std::shared_ptr<MethodStatistics<std::string>>, std::less<>> m;
addClassesToMap(
getStatisticsClassesForModule(METHOD_STATISTICS_MODULE_NAME_AUDIO_HIDL),
m);
return m;
}();
- auto it = m.find({className.begin(), className.end()});
+ auto it = m.find(className);
if (it == m.end()) return {};
return it->second;
}
diff --git a/media/utils/SchedulingPolicyService.cpp b/media/utils/SchedulingPolicyService.cpp
index ad38862..6e515ff 100644
--- a/media/utils/SchedulingPolicyService.cpp
+++ b/media/utils/SchedulingPolicyService.cpp
@@ -18,6 +18,7 @@
//#define LOG_NDEBUG 0
#include <binder/IServiceManager.h>
+#include <cutils/properties.h>
#include <utils/Mutex.h>
#include "ISchedulingPolicyService.h"
#include "mediautils/SchedulingPolicyService.h"
@@ -86,4 +87,25 @@
return ret;
}
+int requestSpatializerPriority(pid_t pid, pid_t tid) {
+ if (pid == -1 || tid == -1) return BAD_VALUE;
+
+ // update priority to RT if specified.
+ constexpr int32_t kRTPriorityMin = 1;
+ constexpr int32_t kRTPriorityMax = 3;
+ const int32_t priorityBoost =
+ property_get_int32("audio.spatializer.priority", kRTPriorityMin);
+ if (priorityBoost >= kRTPriorityMin && priorityBoost <= kRTPriorityMax) {
+ const status_t status = requestPriority(
+ pid, tid, priorityBoost, false /* isForApp */, true /*asynchronous*/);
+ if (status != OK) {
+ ALOGW("%s: Cannot request spatializer priority boost %d, status:%d",
+ __func__, priorityBoost, status);
+ return status < 0 ? status : UNKNOWN_ERROR;
+ }
+ return priorityBoost;
+ }
+ return 0; // no boost requested
+}
+
} // namespace android
diff --git a/media/utils/TimeCheck.cpp b/media/utils/TimeCheck.cpp
index 0848eb3..6823f4f 100644
--- a/media/utils/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -21,6 +21,7 @@
#include <android-base/logging.h>
#include <audio_utils/clock.h>
#include <mediautils/EventLog.h>
+#include <mediautils/FixedString.h>
#include <mediautils/MethodStatistics.h>
#include <mediautils/TimeCheck.h>
#include <utils/Log.h>
@@ -138,22 +139,24 @@
return getTimeCheckThread().toString();
}
-TimeCheck::TimeCheck(std::string tag, OnTimerFunc&& onTimer, uint32_t timeoutMs,
- bool crashOnTimeout)
- : mTimeCheckHandler(new TimeCheckHandler{
- std::move(tag), std::move(onTimer), crashOnTimeout,
- std::chrono::system_clock::now(), gettid()})
- , mTimerHandle(timeoutMs == 0
+TimeCheck::TimeCheck(std::string_view tag, OnTimerFunc&& onTimer, Duration requestedTimeoutDuration,
+ Duration secondChanceDuration, bool crashOnTimeout)
+ : mTimeCheckHandler{ std::make_shared<TimeCheckHandler>(
+ tag, std::move(onTimer), crashOnTimeout, requestedTimeoutDuration,
+ secondChanceDuration, std::chrono::system_clock::now(), gettid()) }
+ , mTimerHandle(requestedTimeoutDuration.count() == 0
+ /* for TimeCheck we don't consider a non-zero secondChanceDuration here */
? getTimeCheckThread().trackTask(mTimeCheckHandler->tag)
: getTimeCheckThread().scheduleTask(
mTimeCheckHandler->tag,
// Pass in all the arguments by value to this task for safety.
// The thread could call the callback before the constructor is finished.
// The destructor is not blocked on callback.
- [ timeCheckHandler = mTimeCheckHandler ] {
- timeCheckHandler->onTimeout();
+ [ timeCheckHandler = mTimeCheckHandler ](TimerThread::Handle timerHandle) {
+ timeCheckHandler->onTimeout(timerHandle);
},
- std::chrono::milliseconds(timeoutMs))) {}
+ requestedTimeoutDuration,
+ secondChanceDuration)) {}
TimeCheck::~TimeCheck() {
if (mTimeCheckHandler) {
@@ -161,23 +164,77 @@
}
}
+/* static */
+std::string TimeCheck::analyzeTimeouts(
+ float requestedTimeoutMs, float elapsedSteadyMs, float elapsedSystemMs) {
+ // Track any OS clock issues with suspend.
+ // It is possible that the elapsedSystemMs is much greater than elapsedSteadyMs if
+ // a suspend occurs; however, we always expect the timeout ms should always be slightly
+ // less than the elapsed steady ms regardless of whether a suspend occurs or not.
+
+ std::string s("Timeout ms ");
+ s.append(std::to_string(requestedTimeoutMs))
+ .append(" elapsed steady ms ").append(std::to_string(elapsedSteadyMs))
+ .append(" elapsed system ms ").append(std::to_string(elapsedSystemMs));
+
+ // Is there something unusual?
+ static constexpr float TOLERANCE_CONTEXT_SWITCH_MS = 200.f;
+
+ if (requestedTimeoutMs > elapsedSteadyMs || requestedTimeoutMs > elapsedSystemMs) {
+ s.append("\nError: early expiration - "
+ "requestedTimeoutMs should be less than elapsed time");
+ }
+
+ if (elapsedSteadyMs > elapsedSystemMs + TOLERANCE_CONTEXT_SWITCH_MS) {
+ s.append("\nWarning: steady time should not advance faster than system time");
+ }
+
+ // This has been found in suspend stress testing.
+ if (elapsedSteadyMs > requestedTimeoutMs + TOLERANCE_CONTEXT_SWITCH_MS) {
+ s.append("\nWarning: steady time significantly exceeds timeout "
+ "- possible thread stall or aborted suspend");
+ }
+
+ // This has been found in suspend stress testing.
+ if (elapsedSystemMs > requestedTimeoutMs + TOLERANCE_CONTEXT_SWITCH_MS) {
+ s.append("\nInformation: system time significantly exceeds timeout "
+ "- possible suspend");
+ }
+ return s;
+}
+
+// To avoid any potential race conditions, the timer handle
+// (expiration = clock steady start + timeout) is passed into the callback.
void TimeCheck::TimeCheckHandler::onCancel(TimerThread::Handle timerHandle) const
{
if (TimeCheck::getTimeCheckThread().cancelTask(timerHandle) && onTimer) {
- const std::chrono::system_clock::time_point endTime = std::chrono::system_clock::now();
- onTimer(false /* timeout */,
- std::chrono::duration_cast<std::chrono::duration<float, std::milli>>(
- endTime - startTime).count());
+ const std::chrono::steady_clock::time_point endSteadyTime =
+ std::chrono::steady_clock::now();
+ const float elapsedSteadyMs = std::chrono::duration_cast<FloatMs>(
+ endSteadyTime - timerHandle + timeoutDuration).count();
+ // send the elapsed steady time for statistics.
+ onTimer(false /* timeout */, elapsedSteadyMs);
}
}
-void TimeCheck::TimeCheckHandler::onTimeout() const
+// To avoid any potential race conditions, the timer handle
+// (expiration = clock steady start + timeout) is passed into the callback.
+void TimeCheck::TimeCheckHandler::onTimeout(TimerThread::Handle timerHandle) const
{
- const std::chrono::system_clock::time_point endTime = std::chrono::system_clock::now();
+ const std::chrono::steady_clock::time_point endSteadyTime = std::chrono::steady_clock::now();
+ const std::chrono::system_clock::time_point endSystemTime = std::chrono::system_clock::now();
+ // timerHandle incorporates the timeout
+ const float elapsedSteadyMs = std::chrono::duration_cast<FloatMs>(
+ endSteadyTime - (timerHandle - timeoutDuration)).count();
+ const float elapsedSystemMs = std::chrono::duration_cast<FloatMs>(
+ endSystemTime - startSystemTime).count();
+ const float requestedTimeoutMs = std::chrono::duration_cast<FloatMs>(
+ timeoutDuration).count();
+ const float secondChanceMs = std::chrono::duration_cast<FloatMs>(
+ secondChanceDuration).count();
+
if (onTimer) {
- onTimer(true /* timeout */,
- std::chrono::duration_cast<std::chrono::duration<float, std::milli>>(
- endTime - startTime).count());
+ onTimer(true /* timeout */, elapsedSteadyMs);
}
if (!crashOnTimeout) return;
@@ -207,8 +264,10 @@
// Create abort message string - caution: this can be very large.
const std::string abortMessage = std::string("TimeCheck timeout for ")
.append(tag)
- .append(" scheduled ").append(formatTime(startTime))
+ .append(" scheduled ").append(formatTime(startSystemTime))
.append(" on thread ").append(std::to_string(tid)).append("\n")
+ .append(analyzeTimeouts(requestedTimeoutMs + secondChanceMs,
+ elapsedSteadyMs, elapsedSystemMs)).append("\n")
.append(halPids).append("\n")
.append(summary);
@@ -231,16 +290,16 @@
mediautils::getStatisticsForClass(className);
if (!statistics) return {}; // empty TimeCheck.
return mediautils::TimeCheck(
- std::string(className).append("::").append(methodName),
- [ clazz = std::string(className), method = std::string(methodName),
+ FixedString62(className).append("::").append(methodName),
+ [ safeMethodName = FixedString30(methodName),
stats = std::move(statistics) ]
(bool timeout, float elapsedMs) {
if (timeout) {
; // ignored, there is no timeout value.
} else {
- stats->event(method, elapsedMs);
+ stats->event(safeMethodName.asStringView(), elapsedMs);
}
- }, 0 /* timeoutMs */);
+ }, {} /* timeoutDuration */, {} /* secondChanceDuration */, false /* crashOnTimeout */);
}
} // namespace android::mediautils
diff --git a/media/utils/TimerThread-test.cpp b/media/utils/TimerThread-test.cpp
index 93cd64c..9452c07 100644
--- a/media/utils/TimerThread-test.cpp
+++ b/media/utils/TimerThread-test.cpp
@@ -33,10 +33,28 @@
return std::count(s.begin(), s.end(), c);
}
-TEST(TimerThread, Basic) {
+
+// Split msec time between timeout and second chance time
+// This tests expiration times weighted between timeout and the second chance time.
+#define DISTRIBUTE_TIMEOUT_SECONDCHANCE_MS_FRAC(msec, frac) \
+ std::chrono::milliseconds(int((msec) * (frac)) + 1), \
+ std::chrono::milliseconds(int((msec) * (1.f - (frac))))
+
+// The TimerThreadTest is parameterized on a fraction between 0.f and 1.f which
+// is how the total timeout time is split between the first timeout and the second chance time.
+//
+class TimerThreadTest : public ::testing::TestWithParam<float> {
+protected:
+
+static void testBasic() {
+ const auto frac = GetParam();
+
std::atomic<bool> taskRan = false;
TimerThread thread;
- thread.scheduleTask("Basic", [&taskRan] { taskRan = true; }, 100ms);
+ TimerThread::Handle handle =
+ thread.scheduleTask("Basic", [&taskRan](TimerThread::Handle handle __unused) {
+ taskRan = true; }, DISTRIBUTE_TIMEOUT_SECONDCHANCE_MS_FRAC(100, frac));
+ ASSERT_TRUE(TimerThread::isTimeoutHandle(handle));
std::this_thread::sleep_for(100ms - kJitter);
ASSERT_FALSE(taskRan);
std::this_thread::sleep_for(2 * kJitter);
@@ -44,11 +62,15 @@
ASSERT_EQ(1, countChars(thread.retiredToString(), REQUEST_START));
}
-TEST(TimerThread, Cancel) {
+static void testCancel() {
+ const auto frac = GetParam();
+
std::atomic<bool> taskRan = false;
TimerThread thread;
TimerThread::Handle handle =
- thread.scheduleTask("Cancel", [&taskRan] { taskRan = true; }, 100ms);
+ thread.scheduleTask("Cancel", [&taskRan](TimerThread::Handle handle __unused) {
+ taskRan = true; }, DISTRIBUTE_TIMEOUT_SECONDCHANCE_MS_FRAC(100, frac));
+ ASSERT_TRUE(TimerThread::isTimeoutHandle(handle));
std::this_thread::sleep_for(100ms - kJitter);
ASSERT_FALSE(taskRan);
ASSERT_TRUE(thread.cancelTask(handle));
@@ -57,88 +79,87 @@
ASSERT_EQ(1, countChars(thread.retiredToString(), REQUEST_START));
}
-TEST(TimerThread, CancelAfterRun) {
+static void testCancelAfterRun() {
+ const auto frac = GetParam();
+
std::atomic<bool> taskRan = false;
TimerThread thread;
TimerThread::Handle handle =
- thread.scheduleTask("CancelAfterRun", [&taskRan] { taskRan = true; }, 100ms);
+ thread.scheduleTask("CancelAfterRun",
+ [&taskRan](TimerThread::Handle handle __unused) {
+ taskRan = true; },
+ DISTRIBUTE_TIMEOUT_SECONDCHANCE_MS_FRAC(100, frac));
+ ASSERT_TRUE(TimerThread::isTimeoutHandle(handle));
std::this_thread::sleep_for(100ms + kJitter);
ASSERT_TRUE(taskRan);
ASSERT_FALSE(thread.cancelTask(handle));
ASSERT_EQ(1, countChars(thread.retiredToString(), REQUEST_START));
}
-TEST(TimerThread, MultipleTasks) {
+static void testMultipleTasks() {
+ const auto frac = GetParam();
+
std::array<std::atomic<bool>, 6> taskRan{};
TimerThread thread;
auto startTime = std::chrono::steady_clock::now();
- thread.scheduleTask("0", [&taskRan] { taskRan[0] = true; }, 300ms);
- thread.scheduleTask("1", [&taskRan] { taskRan[1] = true; }, 100ms);
- thread.scheduleTask("2", [&taskRan] { taskRan[2] = true; }, 200ms);
- thread.scheduleTask("3", [&taskRan] { taskRan[3] = true; }, 400ms);
- auto handle4 = thread.scheduleTask("4", [&taskRan] { taskRan[4] = true; }, 200ms);
- thread.scheduleTask("5", [&taskRan] { taskRan[5] = true; }, 200ms);
+ thread.scheduleTask("0", [&taskRan](TimerThread::Handle handle __unused) {
+ taskRan[0] = true; }, DISTRIBUTE_TIMEOUT_SECONDCHANCE_MS_FRAC(300, frac));
+ thread.scheduleTask("1", [&taskRan](TimerThread::Handle handle __unused) {
+ taskRan[1] = true; }, DISTRIBUTE_TIMEOUT_SECONDCHANCE_MS_FRAC(100, frac));
+ thread.scheduleTask("2", [&taskRan](TimerThread::Handle handle __unused) {
+ taskRan[2] = true; }, DISTRIBUTE_TIMEOUT_SECONDCHANCE_MS_FRAC(200, frac));
+ thread.scheduleTask("3", [&taskRan](TimerThread::Handle handle __unused) {
+ taskRan[3] = true; }, DISTRIBUTE_TIMEOUT_SECONDCHANCE_MS_FRAC(400, frac));
+ auto handle4 = thread.scheduleTask("4", [&taskRan](TimerThread::Handle handle __unused) {
+ taskRan[4] = true; }, DISTRIBUTE_TIMEOUT_SECONDCHANCE_MS_FRAC(200, frac));
+ thread.scheduleTask("5", [&taskRan](TimerThread::Handle handle __unused) {
+ taskRan[5] = true; }, DISTRIBUTE_TIMEOUT_SECONDCHANCE_MS_FRAC(200, frac));
// 6 tasks pending
ASSERT_EQ(6, countChars(thread.pendingToString(), REQUEST_START));
// 0 tasks completed
ASSERT_EQ(0, countChars(thread.retiredToString(), REQUEST_START));
+ // None of the tasks are expected to have finished at the start.
+ std::array<std::atomic<bool>, 6> expected{};
+
// Task 1 should trigger around 100ms.
std::this_thread::sleep_until(startTime + 100ms - kJitter);
- ASSERT_FALSE(taskRan[0]);
- ASSERT_FALSE(taskRan[1]);
- ASSERT_FALSE(taskRan[2]);
- ASSERT_FALSE(taskRan[3]);
- ASSERT_FALSE(taskRan[4]);
- ASSERT_FALSE(taskRan[5]);
+
+ ASSERT_EQ(expected, taskRan);
+
std::this_thread::sleep_until(startTime + 100ms + kJitter);
- ASSERT_FALSE(taskRan[0]);
- ASSERT_TRUE(taskRan[1]);
- ASSERT_FALSE(taskRan[2]);
- ASSERT_FALSE(taskRan[3]);
- ASSERT_FALSE(taskRan[4]);
- ASSERT_FALSE(taskRan[5]);
+
+ expected[1] = true;
+ ASSERT_EQ(expected, taskRan);
// Cancel task 4 before it gets a chance to run.
thread.cancelTask(handle4);
// Tasks 2 and 5 should trigger around 200ms.
std::this_thread::sleep_until(startTime + 200ms - kJitter);
- ASSERT_FALSE(taskRan[0]);
- ASSERT_TRUE(taskRan[1]);
- ASSERT_FALSE(taskRan[2]);
- ASSERT_FALSE(taskRan[3]);
- ASSERT_FALSE(taskRan[4]);
- ASSERT_FALSE(taskRan[5]);
+
+ ASSERT_EQ(expected, taskRan);
+
std::this_thread::sleep_until(startTime + 200ms + kJitter);
- ASSERT_FALSE(taskRan[0]);
- ASSERT_TRUE(taskRan[1]);
- ASSERT_TRUE(taskRan[2]);
- ASSERT_FALSE(taskRan[3]);
- ASSERT_FALSE(taskRan[4]);
- ASSERT_TRUE(taskRan[5]);
+
+ expected[2] = true;
+ expected[5] = true;
+ ASSERT_EQ(expected, taskRan);
// Task 0 should trigger around 300ms.
std::this_thread::sleep_until(startTime + 300ms - kJitter);
- ASSERT_FALSE(taskRan[0]);
- ASSERT_TRUE(taskRan[1]);
- ASSERT_TRUE(taskRan[2]);
- ASSERT_FALSE(taskRan[3]);
- ASSERT_FALSE(taskRan[4]);
- ASSERT_TRUE(taskRan[5]);
+
+ ASSERT_EQ(expected, taskRan);
std::this_thread::sleep_until(startTime + 300ms + kJitter);
- ASSERT_TRUE(taskRan[0]);
- ASSERT_TRUE(taskRan[1]);
- ASSERT_TRUE(taskRan[2]);
- ASSERT_FALSE(taskRan[3]);
- ASSERT_FALSE(taskRan[4]);
- ASSERT_TRUE(taskRan[5]);
+
+ expected[0] = true;
+ ASSERT_EQ(expected, taskRan);
// 1 task pending
ASSERT_EQ(1, countChars(thread.pendingToString(), REQUEST_START));
@@ -147,23 +168,16 @@
// Task 3 should trigger around 400ms.
std::this_thread::sleep_until(startTime + 400ms - kJitter);
- ASSERT_TRUE(taskRan[0]);
- ASSERT_TRUE(taskRan[1]);
- ASSERT_TRUE(taskRan[2]);
- ASSERT_FALSE(taskRan[3]);
- ASSERT_FALSE(taskRan[4]);
- ASSERT_TRUE(taskRan[5]);
+
+ ASSERT_EQ(expected, taskRan);
// 4 tasks ran and 1 cancelled
ASSERT_EQ(4 + 1, countChars(thread.retiredToString(), REQUEST_START));
std::this_thread::sleep_until(startTime + 400ms + kJitter);
- ASSERT_TRUE(taskRan[0]);
- ASSERT_TRUE(taskRan[1]);
- ASSERT_TRUE(taskRan[2]);
- ASSERT_TRUE(taskRan[3]);
- ASSERT_FALSE(taskRan[4]);
- ASSERT_TRUE(taskRan[5]);
+
+ expected[3] = true;
+ ASSERT_EQ(expected, taskRan);
// 0 tasks pending
ASSERT_EQ(0, countChars(thread.pendingToString(), REQUEST_START));
@@ -171,6 +185,30 @@
ASSERT_EQ(5 + 1, countChars(thread.retiredToString(), REQUEST_START));
}
+}; // class TimerThreadTest
+
+TEST_P(TimerThreadTest, Basic) {
+ testBasic();
+}
+
+TEST_P(TimerThreadTest, Cancel) {
+ testCancel();
+}
+
+TEST_P(TimerThreadTest, CancelAfterRun) {
+ testCancelAfterRun();
+}
+
+TEST_P(TimerThreadTest, MultipleTasks) {
+ testMultipleTasks();
+}
+
+INSTANTIATE_TEST_CASE_P(
+ TimerThread,
+ TimerThreadTest,
+ ::testing::Values(0.f, 0.5f, 1.f)
+ );
+
TEST(TimerThread, TrackedTasks) {
TimerThread thread;
@@ -178,6 +216,10 @@
auto handle1 = thread.trackTask("1");
auto handle2 = thread.trackTask("2");
+ ASSERT_TRUE(TimerThread::isNoTimeoutHandle(handle0));
+ ASSERT_TRUE(TimerThread::isNoTimeoutHandle(handle1));
+ ASSERT_TRUE(TimerThread::isNoTimeoutHandle(handle2));
+
// 3 tasks pending
ASSERT_EQ(3, countChars(thread.pendingToString(), REQUEST_START));
// 0 tasks retired
@@ -201,6 +243,7 @@
// Add another tracked task.
auto handle3 = thread.trackTask("3");
+ ASSERT_TRUE(TimerThread::isNoTimeoutHandle(handle3));
// 2 tasks pending
ASSERT_EQ(2, countChars(thread.pendingToString(), REQUEST_START));
diff --git a/media/utils/TimerThread.cpp b/media/utils/TimerThread.cpp
index 6de6b13..5e58a3d 100644
--- a/media/utils/TimerThread.cpp
+++ b/media/utils/TimerThread.cpp
@@ -23,30 +23,35 @@
#include <mediautils/MediaUtilsDelayed.h>
#include <mediautils/TimerThread.h>
+#include <utils/Log.h>
#include <utils/ThreadDefs.h>
+using namespace std::chrono_literals;
+
namespace android::mediautils {
extern std::string formatTime(std::chrono::system_clock::time_point t);
extern std::string_view timeSuffix(std::string_view time1, std::string_view time2);
TimerThread::Handle TimerThread::scheduleTask(
- std::string tag, std::function<void()>&& func, std::chrono::milliseconds timeout) {
+ std::string_view tag, TimerCallback&& func,
+ Duration timeoutDuration, Duration secondChanceDuration) {
const auto now = std::chrono::system_clock::now();
- std::shared_ptr<const Request> request{
- new Request{ now, now + timeout, gettid(), std::move(tag) }};
- return mMonitorThread.add(std::move(request), std::move(func), timeout);
+ auto request = std::make_shared<const Request>(now, now +
+ std::chrono::duration_cast<std::chrono::system_clock::duration>(timeoutDuration),
+ secondChanceDuration, gettid(), tag);
+ return mMonitorThread.add(std::move(request), std::move(func), timeoutDuration);
}
-TimerThread::Handle TimerThread::trackTask(std::string tag) {
+TimerThread::Handle TimerThread::trackTask(std::string_view tag) {
const auto now = std::chrono::system_clock::now();
- std::shared_ptr<const Request> request{
- new Request{ now, now, gettid(), std::move(tag) }};
+ auto request = std::make_shared<const Request>(now, now,
+ Duration{} /* secondChanceDuration */, gettid(), tag);
return mNoTimeoutMap.add(std::move(request));
}
bool TimerThread::cancelTask(Handle handle) {
- std::shared_ptr<const Request> request = mNoTimeoutMap.isValidHandle(handle) ?
+ std::shared_ptr<const Request> request = isNoTimeoutHandle(handle) ?
mNoTimeoutMap.remove(handle) : mMonitorThread.remove(handle);
if (!request) return false;
mRetiredQueue.add(std::move(request));
@@ -84,6 +89,8 @@
return std::string("now ")
.append(formatTime(std::chrono::system_clock::now()))
+ .append("\nsecondChanceCount ")
+ .append(std::to_string(mMonitorThread.getSecondChanceCount()))
.append(analysisSummary)
.append("\ntimeout [ ")
.append(requestsToString(timeoutRequests))
@@ -106,10 +113,10 @@
//
/* static */
bool TimerThread::isRequestFromHal(const std::shared_ptr<const Request>& request) {
- const size_t hidlPos = request->tag.find("Hidl");
+ const size_t hidlPos = request->tag.asStringView().find("Hidl");
if (hidlPos == std::string::npos) return false;
// should be a separator afterwards Hidl which indicates the string was in the class.
- const size_t separatorPos = request->tag.find("::", hidlPos);
+ const size_t separatorPos = request->tag.asStringView().find("::", hidlPos);
return separatorPos != std::string::npos;
}
@@ -127,12 +134,12 @@
std::vector<std::shared_ptr<const Request>> pendingExact;
std::vector<std::shared_ptr<const Request>> pendingPossible;
- // We look at pending requests that were scheduled no later than kDuration
+ // We look at pending requests that were scheduled no later than kPendingDuration
// after the timeout request. This prevents false matches with calls
// that naturally block for a short period of time
// such as HAL write() and read().
//
- auto constexpr kDuration = std::chrono::milliseconds(1000);
+ constexpr Duration kPendingDuration = 1000ms;
for (const auto& pending : pendingRequests) {
// If the pending tid is the same as timeout tid, problem identified.
if (pending->tid == timeout->tid) {
@@ -141,7 +148,7 @@
}
// if the pending tid is scheduled within time limit
- if (pending->scheduled - timeout->scheduled < kDuration) {
+ if (pending->scheduled - timeout->scheduled < kPendingDuration) {
pendingPossible.emplace_back(pending);
}
}
@@ -241,15 +248,11 @@
}
}
-bool TimerThread::NoTimeoutMap::isValidHandle(Handle handle) const {
- return handle > getIndexedHandle(mNoTimeoutRequests);
-}
-
TimerThread::Handle TimerThread::NoTimeoutMap::add(std::shared_ptr<const Request> request) {
std::lock_guard lg(mNTMutex);
// A unique handle is obtained by mNoTimeoutRequests.fetch_add(1),
// This need not be under a lock, but we do so anyhow.
- const Handle handle = getIndexedHandle(mNoTimeoutRequests++);
+ const Handle handle = getUniqueHandle_l();
mMap[handle] = request;
return handle;
}
@@ -271,16 +274,6 @@
}
}
-TimerThread::Handle TimerThread::MonitorThread::getUniqueHandle_l(
- std::chrono::milliseconds timeout) {
- // To avoid key collisions, advance by 1 tick until the key is unique.
- auto deadline = std::chrono::steady_clock::now() + timeout;
- for (; mMonitorRequests.find(deadline) != mMonitorRequests.end();
- deadline += std::chrono::steady_clock::duration(1))
- ;
- return deadline;
-}
-
TimerThread::MonitorThread::MonitorThread(RequestQueue& timeoutQueue)
: mTimeoutQueue(timeoutQueue)
, mThread([this] { threadFunc(); }) {
@@ -300,24 +293,78 @@
void TimerThread::MonitorThread::threadFunc() {
std::unique_lock _l(mMutex);
while (!mShouldExit) {
+ Handle nextDeadline = INVALID_HANDLE;
+ Handle now = INVALID_HANDLE;
if (!mMonitorRequests.empty()) {
- Handle nextDeadline = mMonitorRequests.begin()->first;
- if (nextDeadline < std::chrono::steady_clock::now()) {
+ nextDeadline = mMonitorRequests.begin()->first;
+ now = std::chrono::steady_clock::now();
+ if (nextDeadline < now) {
+ auto node = mMonitorRequests.extract(mMonitorRequests.begin());
// Deadline has expired, handle the request.
+ auto secondChanceDuration = node.mapped().first->secondChanceDuration;
+ if (secondChanceDuration.count() != 0) {
+ // We now apply the second chance duration to find the clock
+ // monotonic second deadline. The unique key is then the
+ // pair<second_deadline, first_deadline>.
+ //
+ // The second chance prevents a false timeout should there be
+ // any clock monotonic advancement during suspend.
+ auto newHandle = now + secondChanceDuration;
+ ALOGD("%s: TimeCheck second chance applied for %s",
+ __func__, node.mapped().first->tag.c_str()); // should be rare event.
+ mSecondChanceRequests.emplace_hint(mSecondChanceRequests.end(),
+ std::make_pair(newHandle, nextDeadline),
+ std::move(node.mapped()));
+ // increment second chance counter.
+ mSecondChanceCount.fetch_add(1 /* arg */, std::memory_order_relaxed);
+ } else {
+ {
+ _l.unlock();
+ // We add Request to retired queue early so that it can be dumped out.
+ mTimeoutQueue.add(std::move(node.mapped().first));
+ node.mapped().second(nextDeadline);
+ // Caution: we don't hold lock when we call TimerCallback,
+ // but this is the timeout case! We will crash soon,
+ // maybe before returning.
+ // anything left over is released here outside lock.
+ }
+ // reacquire the lock - if something was added, we loop immediately to check.
+ _l.lock();
+ }
+ // always process expiring monitor requests first.
+ continue;
+ }
+ }
+ // now process any second chance requests.
+ if (!mSecondChanceRequests.empty()) {
+ Handle secondDeadline = mSecondChanceRequests.begin()->first.first;
+ if (now == INVALID_HANDLE) now = std::chrono::steady_clock::now();
+ if (secondDeadline < now) {
+ auto node = mSecondChanceRequests.extract(mSecondChanceRequests.begin());
{
- auto node = mMonitorRequests.extract(mMonitorRequests.begin());
_l.unlock();
// We add Request to retired queue early so that it can be dumped out.
mTimeoutQueue.add(std::move(node.mapped().first));
- node.mapped().second(); // Caution: we don't hold lock here - but do we care?
- // this is the timeout case! We will crash soon,
- // maybe before returning.
- // anything left over is released here outside lock.
+ const Handle originalHandle = node.key().second;
+ node.mapped().second(originalHandle);
+ // Caution: we don't hold lock when we call TimerCallback.
+ // This is benign issue - we permit concurrent operations
+ // while in the callback to the MonitorQueue.
+ //
+ // Anything left over is released here outside lock.
}
// reacquire the lock - if something was added, we loop immediately to check.
_l.lock();
continue;
}
+ // update the deadline.
+ if (nextDeadline == INVALID_HANDLE) {
+ nextDeadline = secondDeadline;
+ } else {
+ nextDeadline = std::min(nextDeadline, secondDeadline);
+ }
+ }
+ if (nextDeadline != INVALID_HANDLE) {
mCond.wait_until(_l, nextDeadline);
} else {
mCond.wait(_l);
@@ -326,26 +373,39 @@
}
TimerThread::Handle TimerThread::MonitorThread::add(
- std::shared_ptr<const Request> request, std::function<void()>&& func,
- std::chrono::milliseconds timeout) {
+ std::shared_ptr<const Request> request, TimerCallback&& func, Duration timeout) {
std::lock_guard _l(mMutex);
const Handle handle = getUniqueHandle_l(timeout);
- mMonitorRequests.emplace(handle, std::make_pair(std::move(request), std::move(func)));
+ mMonitorRequests.emplace_hint(mMonitorRequests.end(),
+ handle, std::make_pair(std::move(request), std::move(func)));
mCond.notify_all();
return handle;
}
std::shared_ptr<const TimerThread::Request> TimerThread::MonitorThread::remove(Handle handle) {
+ std::pair<std::shared_ptr<const Request>, TimerCallback> data;
std::unique_lock ul(mMutex);
- const auto it = mMonitorRequests.find(handle);
- if (it == mMonitorRequests.end()) {
- return {};
+ if (const auto it = mMonitorRequests.find(handle);
+ it != mMonitorRequests.end()) {
+ data = std::move(it->second);
+ mMonitorRequests.erase(it);
+ ul.unlock(); // manually release lock here so func (data.second)
+ // is released outside of lock.
+ return data.first; // request
}
- std::shared_ptr<const TimerThread::Request> request = std::move(it->second.first);
- std::function<void()> func = std::move(it->second.second);
- mMonitorRequests.erase(it);
- ul.unlock(); // manually release lock here so func is released outside of lock.
- return request;
+
+ // this check is O(N), but since the second chance requests are ordered
+ // in terms of earliest expiration time, we would expect better than average results.
+ for (auto it = mSecondChanceRequests.begin(); it != mSecondChanceRequests.end(); ++it) {
+ if (it->first.second == handle) {
+ data = std::move(it->second);
+ mSecondChanceRequests.erase(it);
+ ul.unlock(); // manually release lock here so func (data.second)
+ // is released outside of lock.
+ return data.first; // request
+ }
+ }
+ return {};
}
void TimerThread::MonitorThread::copyRequests(
@@ -354,6 +414,13 @@
for (const auto &[deadline, monitorpair] : mMonitorRequests) {
requests.emplace_back(monitorpair.first);
}
+ // we combine the second map with the first map - this is
+ // everything that is pending on the monitor thread.
+ // The second map will be older than the first map so this
+ // is in order.
+ for (const auto &[deadline, monitorpair] : mSecondChanceRequests) {
+ requests.emplace_back(monitorpair.first);
+ }
}
} // namespace android::mediautils
diff --git a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
index 51e8d7a..15f043a 100644
--- a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
+++ b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
@@ -25,6 +25,7 @@
static constexpr int kMaxOperations = 50;
static constexpr int kMaxStringLen = 256;
+static constexpr int kMaxSpaces = 1000;
using android::content::AttributionSourceState;
@@ -35,7 +36,9 @@
pm.allowPlaybackCapture(uid);
},
[](FuzzedDataProvider* data_provider, android::MediaPackageManager pm) -> void {
- int spaces = data_provider->ConsumeIntegral<int>();
+ /* The large value of spaces was taking time in file write operation.
+ * Limited spaces values in range to avoid timeout.*/
+ int spaces = data_provider->ConsumeIntegralInRange<int>(0, kMaxSpaces);
// Dump everything into /dev/null
int fd = open("/dev/null", O_WRONLY);
diff --git a/media/utils/fuzzers/TimeCheckFuzz.cpp b/media/utils/fuzzers/TimeCheckFuzz.cpp
index 7966469..65b2885 100644
--- a/media/utils/fuzzers/TimeCheckFuzz.cpp
+++ b/media/utils/fuzzers/TimeCheckFuzz.cpp
@@ -48,7 +48,9 @@
std::string name = data_provider.ConsumeRandomLengthString(kMaxStringLen);
// 3. The constructor, which is fuzzed here:
- android::mediautils::TimeCheck timeCheck(name.c_str(), {} /* onTimer */, timeoutMs);
+ android::mediautils::TimeCheck timeCheck(name.c_str(), {} /* onTimer */,
+ std::chrono::milliseconds(timeoutMs),
+ {} /* secondChanceDuration */, true /* crashOnTimeout */);
// We will leave some buffer to avoid sleeping too long
uint8_t sleep_amount_ms = data_provider.ConsumeIntegralInRange<uint8_t>(0, timeoutMs / 2);
diff --git a/media/utils/include/mediautils/FixedString.h b/media/utils/include/mediautils/FixedString.h
new file mode 100644
index 0000000..047aa82
--- /dev/null
+++ b/media/utils/include/mediautils/FixedString.h
@@ -0,0 +1,282 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <algorithm>
+#include <string>
+#include <string_view>
+
+namespace android::mediautils {
+
+/*
+ * FixedString is a stack allocatable string buffer that supports
+ * simple appending of other strings and string_views.
+ *
+ * It is designed for no-malloc operation when std::string
+ * small buffer optimization is insufficient.
+ *
+ * To keep code small, use asStringView() for operations on this.
+ *
+ * Notes:
+ * 1) Appending beyond the internal buffer size results in truncation.
+ *
+ * Alternatives:
+ * 1) If you want a sharable copy-on-write string implementation,
+ * consider using the legacy android::String8().
+ * 2) Using std::string with a fixed stack allocator may suit your needs,
+ * but exception avoidance is tricky.
+ * 3) Using C++20 ranges https://en.cppreference.com/w/cpp/ranges if you don't
+ * need backing store. Be careful about allocation with ranges.
+ *
+ * Good small sizes are multiples of 16 minus 2, e.g. 14, 30, 46, 62.
+ *
+ * Implementation Notes:
+ * 1) No iterators or [] for FixedString - please convert to std::string_view.
+ * 2) For small N (e.g. less than 62), consider a change to always zero fill and
+ * potentially prevent always zero terminating (if one only does append).
+ *
+ * Possible arguments to create/append:
+ * 1) A FixedString.
+ * 2) A single char.
+ * 3) A char * pointer.
+ * 4) A std::string.
+ * 5) A std::string_view (or something convertible to it).
+ *
+ * Example:
+ *
+ * FixedString s1(std::string("a")); // ctor
+ * s1 << "bc" << 'd' << '\n'; // streaming append
+ * s1 += "hello"; // += append
+ * ASSERT_EQ(s1, "abcd\nhello");
+ */
+template <uint32_t N>
+struct FixedString
+{
+ // Find the best size type.
+ using strsize_t = std::conditional_t<(N > 255), uint32_t, uint8_t>;
+
+ // constructors
+ FixedString() { // override default
+ buffer_[0] = '\0';
+ }
+
+ FixedString(const FixedString& other) { // override default.
+ copyFrom<N>(other);
+ }
+
+ // The following constructor is not explicit to allow
+ // FixedString<8> s = "abcd";
+ template <typename ...Types>
+ FixedString(Types&&... args) {
+ append(std::forward<Types>(args)...);
+ }
+
+ // copy assign (copyFrom checks for equality and returns *this).
+ FixedString& operator=(const FixedString& other) { // override default.
+ return copyFrom<N>(other);
+ }
+
+ template <typename ...Types>
+ FixedString& operator=(Types&&... args) {
+ size_ = 0;
+ return append(std::forward<Types>(args)...);
+ }
+
+ // operator equals
+ bool operator==(const char *s) const {
+ return strncmp(c_str(), s, capacity() + 1) == 0;
+ }
+
+ bool operator==(std::string_view s) const {
+ return size() == s.size() && memcmp(data(), s.data(), size()) == 0;
+ }
+
+ // operator not-equals
+ template <typename T>
+ bool operator!=(const T& other) const {
+ return !operator==(other);
+ }
+
+ // operator +=
+ template <typename ...Types>
+ FixedString& operator+=(Types&&... args) {
+ return append(std::forward<Types>(args)...);
+ }
+
+ // conversion to std::string_view.
+ operator std::string_view() const {
+ return asStringView();
+ }
+
+ // basic observers
+ size_t buffer_offset() const { return offsetof(std::decay_t<decltype(*this)>, buffer_); }
+ static constexpr uint32_t capacity() { return N; }
+ uint32_t size() const { return size_; }
+ uint32_t remaining() const { return size_ >= N ? 0 : N - size_; }
+ bool empty() const { return size_ == 0; }
+ bool full() const { return size_ == N; } // when full, possible truncation risk.
+ char * data() { return buffer_; }
+ const char * data() const { return buffer_; }
+ const char * c_str() const { return buffer_; }
+
+ inline std::string_view asStringView() const {
+ return { buffer_, static_cast<size_t>(size_) };
+ }
+ inline std::string asString() const {
+ return { buffer_, static_cast<size_t>(size_) };
+ }
+
+ void clear() { size_ = 0; buffer_[0] = 0; }
+
+ // Implementation of append - using templates
+ // to guarantee precedence in the presence of ambiguity.
+ //
+ // Consider C++20 template overloading through constraints and concepts.
+ template <typename T>
+ FixedString& append(const T& t) {
+ using decayT = std::decay_t<T>;
+ if constexpr (is_specialization_v<decayT, FixedString>) {
+ // A FixedString<U>
+ if (size_ == 0) {
+ // optimization to erase everything.
+ return copyFrom(t);
+ } else {
+ return appendStringView({t.data(), t.size()});
+ }
+ } else if constexpr(std::is_same_v<decayT, char>) {
+ if (size_ < N) {
+ buffer_[size_++] = t;
+ buffer_[size_] = '\0';
+ }
+ return *this;
+ } else if constexpr(std::is_same_v<decayT, char *>) {
+ // Some char* ptr.
+ return appendString(t);
+ } else if constexpr (std::is_convertible_v<decayT, std::string_view>) {
+ // std::string_view, std::string, or some other convertible type.
+ return appendStringView(t);
+ } else /* constexpr */ {
+ static_assert(dependent_false_v<T>, "non-matching append type");
+ }
+ }
+
+ FixedString& appendStringView(std::string_view s) {
+ uint32_t total = std::min(static_cast<size_t>(N - size_), s.size());
+ memcpy(buffer_ + size_, s.data(), total);
+ size_ += total;
+ buffer_[size_] = '\0';
+ return *this;
+ }
+
+ FixedString& appendString(const char *s) {
+ // strncpy zero pads to the end,
+ // strlcpy returns total expected length,
+ // we don't have strncpy_s in Bionic,
+ // so we write our own here.
+ while (size_ < N && *s != '\0') {
+ buffer_[size_++] = *s++;
+ }
+ buffer_[size_] = '\0';
+ return *this;
+ }
+
+ // Copy initialize the struct.
+ // Note: We are POD but customize the copying for acceleration
+ // of moving small strings embedded in a large buffers.
+ template <uint32_t U>
+ FixedString& copyFrom(const FixedString<U>& other) {
+ if ((void*)this != (void*)&other) { // not a self-assignment
+ if (other.size() == 0) {
+ size_ = 0;
+ buffer_[0] = '\0';
+ return *this;
+ }
+ constexpr size_t kSizeToCopyWhole = 64;
+ if constexpr (N == U &&
+ sizeof(*this) == sizeof(other) &&
+ sizeof(*this) <= kSizeToCopyWhole) {
+ // As we have the same str size type, we can just
+ // memcpy with fixed size, which can be easily optimized.
+ memcpy(static_cast<void*>(this), static_cast<const void*>(&other), sizeof(*this));
+ return *this;
+ }
+ if constexpr (std::is_same_v<strsize_t, typename FixedString<U>::strsize_t>) {
+ constexpr size_t kAlign = 8; // align to a multiple of 8.
+ static_assert((kAlign & (kAlign - 1)) == 0); // power of 2.
+ // we check any alignment issues.
+ if (buffer_offset() == other.buffer_offset() && other.size() <= capacity()) {
+ // improve on standard POD copying by reducing size.
+ const size_t mincpy = buffer_offset() + other.size() + 1 /* nul */;
+ const size_t maxcpy = std::min(sizeof(*this), sizeof(other));
+ const size_t cpysize = std::min(mincpy + kAlign - 1 & ~(kAlign - 1), maxcpy);
+ memcpy(static_cast<void*>(this), static_cast<const void*>(&other), cpysize);
+ return *this;
+ }
+ }
+ size_ = std::min(other.size(), capacity());
+ memcpy(buffer_, other.data(), size_);
+ buffer_[size_] = '\0'; // zero terminate.
+ }
+ return *this;
+ }
+
+private:
+ // Template helper methods
+
+ template <typename Test, template <uint32_t> class Ref>
+ struct is_specialization : std::false_type {};
+
+ template <template <uint32_t> class Ref, uint32_t UU>
+ struct is_specialization<Ref<UU>, Ref>: std::true_type {};
+
+ template <typename Test, template <uint32_t> class Ref>
+ static inline constexpr bool is_specialization_v = is_specialization<Test, Ref>::value;
+
+ // For static assert(false) we need a template version to avoid early failure.
+ template <typename T>
+ static inline constexpr bool dependent_false_v = false;
+
+ // POD variables
+ strsize_t size_ = 0;
+ char buffer_[N + 1 /* allow zero termination */];
+};
+
+// Stream operator syntactic sugar.
+// Example:
+// s << 'b' << "c" << "d" << '\n';
+template <uint32_t N, typename ...Types>
+FixedString<N>& operator<<(FixedString<N>& fs, Types&&... args) {
+ return fs.append(std::forward<Types>(args)...);
+}
+
+// We do not use a default size for fixed string as changing
+// the default size would lead to different behavior - we want the
+// size to be explicitly known.
+
+// FixedString62 of 62 chars fits in one typical cache line.
+using FixedString62 = FixedString<62>;
+
+// Slightly smaller
+using FixedString30 = FixedString<30>;
+
+// Since we have added copy and assignment optimizations,
+// we are no longer trivially assignable and copyable.
+// But we check standard layout here to prevent inclusion of unacceptable members or virtuals.
+static_assert(std::is_standard_layout_v<FixedString62>);
+static_assert(std::is_standard_layout_v<FixedString30>);
+
+} // namespace android::mediautils
diff --git a/media/utils/include/mediautils/MethodStatistics.h b/media/utils/include/mediautils/MethodStatistics.h
index 700fbaa..6d7e990 100644
--- a/media/utils/include/mediautils/MethodStatistics.h
+++ b/media/utils/include/mediautils/MethodStatistics.h
@@ -55,15 +55,23 @@
/**
* Adds a method event, typically execution time in ms.
*/
- void event(Code code, FloatType executeMs) {
+ template <typename C>
+ void event(C&& code, FloatType executeMs) {
std::lock_guard lg(mLock);
- mStatisticsMap[code].add(executeMs);
+ auto it = mStatisticsMap.lower_bound(code);
+ if (it != mStatisticsMap.end() && it->first == code) {
+ it->second.add(executeMs);
+ } else {
+ // StatsType ctor takes an optional array of data for initialization.
+ FloatType dataArray[1] = { executeMs };
+ mStatisticsMap.emplace_hint(it, std::forward<C>(code), dataArray);
+ }
}
/**
* Returns the name for the method code.
*/
- std::string getMethodForCode(Code code) const {
+ std::string getMethodForCode(const Code& code) const {
auto it = mMethodMap.find(code);
return it == mMethodMap.end() ? std::to_string((int)code) : it->second;
}
@@ -71,7 +79,7 @@
/**
* Returns the number of times the method was invoked by event().
*/
- size_t getMethodCount(Code code) const {
+ size_t getMethodCount(const Code& code) const {
std::lock_guard lg(mLock);
auto it = mStatisticsMap.find(code);
return it == mStatisticsMap.end() ? 0 : it->second.getN();
@@ -80,7 +88,7 @@
/**
* Returns the statistics object for the method.
*/
- StatsType getStatistics(Code code) const {
+ StatsType getStatistics(const Code& code) const {
std::lock_guard lg(mLock);
auto it = mStatisticsMap.find(code);
return it == mStatisticsMap.end() ? StatsType{} : it->second;
@@ -107,9 +115,10 @@
}
private:
- const std::map<Code, std::string> mMethodMap;
+ // Note: we use a transparent comparator std::less<> for heterogeneous key lookup.
+ const std::map<Code, std::string, std::less<>> mMethodMap;
mutable std::mutex mLock;
- std::map<Code, StatsType> mStatisticsMap GUARDED_BY(mLock);
+ std::map<Code, StatsType, std::less<>> mStatisticsMap GUARDED_BY(mLock);
};
// Managed Statistics support.
diff --git a/media/utils/include/mediautils/SchedulingPolicyService.h b/media/utils/include/mediautils/SchedulingPolicyService.h
index 546cec5..af1fcd2 100644
--- a/media/utils/include/mediautils/SchedulingPolicyService.h
+++ b/media/utils/include/mediautils/SchedulingPolicyService.h
@@ -23,7 +23,7 @@
class IBinder;
// Request elevated priority for thread tid, whose thread group leader must be pid.
-// The priority parameter is currently restricted to either 1 or 2.
+// The priority parameter is currently restricted from 1 to 3.
// The asynchronous parameter should be 'true' to return immediately,
// after the request is enqueued but not necessarily executed.
// The default value 'false' means to return after request has been enqueued and executed.
@@ -37,6 +37,12 @@
// 'client' is ignored in this case.
int requestCpusetBoost(bool enable, const sp<IBinder> &client);
+// Audio: Request Spatializer RT priority for thread tid, whose thread group leader must be pid.
+// returns positive value if successful, the RT priority used
+// zero, if no RT priority selected
+// negative status code if RT priority unable to be set.
+int requestSpatializerPriority(pid_t pid, pid_t tid);
+
} // namespace android
#endif // _ANDROID_SCHEDULING_POLICY_SERVICE_H
diff --git a/media/utils/include/mediautils/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
index ef03aef..bdb5337 100644
--- a/media/utils/include/mediautils/TimeCheck.h
+++ b/media/utils/include/mediautils/TimeCheck.h
@@ -16,6 +16,7 @@
#pragma once
+#include <chrono>
#include <vector>
#include <mediautils/TimerThread.h>
@@ -27,10 +28,33 @@
class TimeCheck {
public:
+
+ // Duration for TimeCheck is based on steady_clock, typically nanoseconds.
+ using Duration = std::chrono::steady_clock::duration;
+
+ // Duration for printing is in milliseconds, using float for additional precision.
+ using FloatMs = std::chrono::duration<float, std::milli>;
+
+ // OnTimerFunc is the callback function with 2 parameters.
+ // bool timeout (which is true when the TimeCheck object
+ // times out, false when the TimeCheck object is
+ // destroyed or leaves scope before the timer expires.)
+ // float elapsedMs (the elapsed time to this event).
using OnTimerFunc = std::function<void(bool /* timeout */, float /* elapsedMs */ )>;
// The default timeout is chosen to be less than system server watchdog timeout
- static constexpr uint32_t kDefaultTimeOutMs = 5000;
+ // Note: kDefaultTimeOutMs should be no less than 2 seconds, otherwise spurious timeouts
+ // may occur with system suspend.
+ static constexpr TimeCheck::Duration kDefaultTimeoutDuration = std::chrono::milliseconds(3000);
+
+ // Due to suspend abort not incrementing the monotonic clock,
+ // we allow another second chance timeout after the first timeout expires.
+ //
+ // The total timeout is therefore kDefaultTimeoutDuration + kDefaultSecondChanceDuration,
+ // and the result is more stable when the monotonic clock increments during suspend.
+ //
+ static constexpr TimeCheck::Duration kDefaultSecondChanceDuration =
+ std::chrono::milliseconds(2000);
/**
* TimeCheck is a RAII object which will notify a callback
@@ -44,24 +68,24 @@
* the deallocation.
*
* \param tag string associated with the TimeCheck object.
- * \param onTimer callback function with 2 parameters
- * bool timeout (which is true when the TimeCheck object
- * times out, false when the TimeCheck object is
- * destroyed or leaves scope before the timer expires.)
- * float elapsedMs (the elapsed time to this event).
+ * \param onTimer callback function with 2 parameters (described above in OnTimerFunc).
* The callback when timeout is true will be called on a different thread.
* This will cancel the callback on the destructor but is not guaranteed
* to block for callback completion if it is already in progress
* (for maximum concurrency and reduced deadlock potential), so use proper
* lifetime analysis (e.g. shared or weak pointers).
- * \param timeoutMs timeout in milliseconds.
+ * \param requestedTimeoutDuration timeout in milliseconds.
* A zero timeout means no timeout is set -
* the callback is called only when
* the TimeCheck object is destroyed or leaves scope.
+ * \param secondChanceDuration additional milliseconds to wait if the first timeout expires.
+ * This is used to prevent false timeouts if the steady (monotonic)
+ * clock advances on aborted suspend.
* \param crashOnTimeout true if the object issues an abort on timeout.
*/
- explicit TimeCheck(std::string tag, OnTimerFunc&& onTimer = {},
- uint32_t timeoutMs = kDefaultTimeOutMs, bool crashOnTimeout = true);
+ explicit TimeCheck(std::string_view tag, OnTimerFunc&& onTimer,
+ Duration requestedTimeoutDuration, Duration secondChanceDuration,
+ bool crashOnTimeout);
TimeCheck() = default;
// Remove copy constructors as there should only be one call to the destructor.
@@ -79,16 +103,36 @@
// The usage here is const safe.
class TimeCheckHandler {
public:
- const std::string tag;
+ template <typename S, typename F>
+ TimeCheckHandler(S&& _tag, F&& _onTimer, bool _crashOnTimeout,
+ Duration _timeoutDuration, Duration _secondChanceDuration,
+ std::chrono::system_clock::time_point _startSystemTime,
+ pid_t _tid)
+ : tag(std::forward<S>(_tag))
+ , onTimer(std::forward<F>(_onTimer))
+ , crashOnTimeout(_crashOnTimeout)
+ , timeoutDuration(_timeoutDuration)
+ , secondChanceDuration(_secondChanceDuration)
+ , startSystemTime(_startSystemTime)
+ , tid(_tid)
+ {}
+ const FixedString62 tag;
const OnTimerFunc onTimer;
const bool crashOnTimeout;
- const std::chrono::system_clock::time_point startTime;
+ const Duration timeoutDuration;
+ const Duration secondChanceDuration;
+ const std::chrono::system_clock::time_point startSystemTime;
const pid_t tid;
void onCancel(TimerThread::Handle handle) const;
- void onTimeout() const;
+ void onTimeout(TimerThread::Handle handle) const;
};
+ // Returns a string that represents the timeout vs elapsed time,
+ // and diagnostics if there are any potential issues.
+ static std::string analyzeTimeouts(
+ float timeoutMs, float elapsedSteadyMs, float elapsedSystemMs);
+
static TimerThread& getTimeCheckThread();
static void accessAudioHalPids(std::vector<pid_t>* pids, bool update);
diff --git a/media/utils/include/mediautils/TimerThread.h b/media/utils/include/mediautils/TimerThread.h
index ffee602..c76fa7d 100644
--- a/media/utils/include/mediautils/TimerThread.h
+++ b/media/utils/include/mediautils/TimerThread.h
@@ -27,6 +27,8 @@
#include <android-base/thread_annotations.h>
+#include <mediautils/FixedString.h>
+
namespace android::mediautils {
/**
@@ -34,19 +36,93 @@
*/
class TimerThread {
public:
- // A Handle is a time_point that serves as a unique key. It is ordered.
+ // A Handle is a time_point that serves as a unique key to access a queued
+ // request to the TimerThread.
using Handle = std::chrono::steady_clock::time_point;
+ // Duration is based on steady_clock (typically nanoseconds)
+ // vs the system_clock duration (typically microseconds).
+ using Duration = std::chrono::steady_clock::duration;
+
static inline constexpr Handle INVALID_HANDLE =
std::chrono::steady_clock::time_point::min();
+ // Handle implementation details:
+ // A Handle represents the timer expiration time based on std::chrono::steady_clock
+ // (clock monotonic). This Handle is computed as now() + timeout.
+ //
+ // The lsb of the Handle time_point is adjusted to indicate whether there is
+ // a timeout action (1) or not (0).
+ //
+
+ template <size_t COUNT>
+ static constexpr bool is_power_of_2_v = COUNT > 0 && (COUNT & (COUNT - 1)) == 0;
+
+ template <size_t COUNT>
+ static constexpr size_t mask_from_count_v = COUNT - 1;
+
+ static constexpr size_t HANDLE_TYPES = 2;
+ // HANDLE_TYPES must be a power of 2.
+ static_assert(is_power_of_2_v<HANDLE_TYPES>);
+
+ // The handle types
+ enum class HANDLE_TYPE : size_t {
+ NO_TIMEOUT = 0,
+ TIMEOUT = 1,
+ };
+
+ static constexpr size_t HANDLE_TYPE_MASK = mask_from_count_v<HANDLE_TYPES>;
+
+ template <typename T>
+ static constexpr auto enum_as_value(T x) {
+ return static_cast<std::underlying_type_t<T>>(x);
+ }
+
+ static inline bool isNoTimeoutHandle(Handle handle) {
+ return (handle.time_since_epoch().count() & HANDLE_TYPE_MASK) ==
+ enum_as_value(HANDLE_TYPE::NO_TIMEOUT);
+ }
+
+ static inline bool isTimeoutHandle(Handle handle) {
+ return (handle.time_since_epoch().count() & HANDLE_TYPE_MASK) ==
+ enum_as_value(HANDLE_TYPE::TIMEOUT);
+ }
+
+ // Returns a unique Handle that doesn't exist in the container.
+ template <size_t MAX_TYPED_HANDLES, size_t HANDLE_TYPE_AS_VALUE, typename C, typename T>
+ static Handle getUniqueHandleForHandleType_l(C container, T timeout) {
+ static_assert(MAX_TYPED_HANDLES > 0 && HANDLE_TYPE_AS_VALUE < MAX_TYPED_HANDLES
+ && is_power_of_2_v<MAX_TYPED_HANDLES>,
+ " handles must be power of two");
+
+ // Our initial handle is the deadline as computed from steady_clock.
+ auto deadline = std::chrono::steady_clock::now() + timeout;
+
+ // We adjust the lsbs by the minimum increment to have the correct
+ // HANDLE_TYPE in the least significant bits.
+ auto remainder = deadline.time_since_epoch().count() & HANDLE_TYPE_MASK;
+ size_t offset = HANDLE_TYPE_AS_VALUE > remainder ? HANDLE_TYPE_AS_VALUE - remainder :
+ MAX_TYPED_HANDLES + HANDLE_TYPE_AS_VALUE - remainder;
+ deadline += std::chrono::steady_clock::duration(offset);
+
+ // To avoid key collisions, advance the handle by MAX_TYPED_HANDLES (the modulus factor)
+ // until the key is unique.
+ while (container.find(deadline) != container.end()) {
+ deadline += std::chrono::steady_clock::duration(MAX_TYPED_HANDLES);
+ }
+ return deadline;
+ }
+
+ // TimerCallback invoked on timeout or cancel.
+ using TimerCallback = std::function<void(Handle)>;
+
/**
* Schedules a task to be executed in the future (`timeout` duration from now).
*
* \param tag string associated with the task. This need not be unique,
* as the Handle returned is used for cancelling.
* \param func callback function that is invoked at the timeout.
- * \param timeout timeout duration which is converted to milliseconds with at
+ * \param timeoutDuration timeout duration which is converted to milliseconds with at
* least 45 integer bits.
* A timeout of 0 (or negative) means the timer never expires
* so func() is never called. These tasks are stored internally
@@ -54,7 +130,8 @@
* \returns a handle that can be used for cancellation.
*/
Handle scheduleTask(
- std::string tag, std::function<void()>&& func, std::chrono::milliseconds timeout);
+ std::string_view tag, TimerCallback&& func,
+ Duration timeoutDuration, Duration secondChanceDuration);
/**
* Tracks a task that shows up on toString() until cancelled.
@@ -62,7 +139,7 @@
* \param tag string associated with the task.
* \returns a handle that can be used for cancellation.
*/
- Handle trackTask(std::string tag);
+ Handle trackTask(std::string_view tag);
/**
* Cancels a task previously scheduled with scheduleTask()
@@ -128,14 +205,30 @@
private:
// To minimize movement of data, we pass around shared_ptrs to Requests.
// These are allocated and deallocated outside of the lock.
+ // TODO(b/243839867) consider options to merge Request with the
+ // TimeCheck::TimeCheckHandler struct.
struct Request {
+ Request(std::chrono::system_clock::time_point _scheduled,
+ std::chrono::system_clock::time_point _deadline,
+ Duration _secondChanceDuration,
+ pid_t _tid,
+ std::string_view _tag)
+ : scheduled(_scheduled)
+ , deadline(_deadline)
+ , secondChanceDuration(_secondChanceDuration)
+ , tid(_tid)
+ , tag(_tag)
+ {}
+
const std::chrono::system_clock::time_point scheduled;
- const std::chrono::system_clock::time_point deadline; // deadline := scheduled + timeout
+ const std::chrono::system_clock::time_point deadline; // deadline := scheduled
+ // + timeoutDuration
+ // + secondChanceDuration
// if deadline == scheduled, no
// timeout, task not executed.
+ Duration secondChanceDuration;
const pid_t tid;
- const std::string tag;
-
+ const FixedString62 tag;
std::string toString() const;
};
@@ -160,15 +253,17 @@
mRequestQueue GUARDED_BY(mRQMutex);
};
- // A storage map of tasks without timeouts. There is no std::function<void()>
+ // A storage map of tasks without timeouts. There is no TimerCallback
// required, it just tracks the tasks with the tag, scheduled time and the tid.
// These tasks show up on a pendingToString() until manually cancelled.
class NoTimeoutMap {
- // This a counter of the requests that have no timeout (timeout == 0).
- std::atomic<size_t> mNoTimeoutRequests{};
-
mutable std::mutex mNTMutex;
std::map<Handle, std::shared_ptr<const Request>> mMap GUARDED_BY(mNTMutex);
+ Handle getUniqueHandle_l() REQUIRES(mNTMutex) {
+ return getUniqueHandleForHandleType_l<
+ HANDLE_TYPES, enum_as_value(HANDLE_TYPE::NO_TIMEOUT)>(
+ mMap, Duration{} /* timeout */);
+ }
public:
bool isValidHandle(Handle handle) const; // lock free
@@ -182,14 +277,26 @@
// call on timeout.
// This class is thread-safe.
class MonitorThread {
+ std::atomic<size_t> mSecondChanceCount{};
mutable std::mutex mMutex;
- mutable std::condition_variable mCond;
+ mutable std::condition_variable mCond GUARDED_BY(mMutex);
// Ordered map of requests based on time of deadline.
//
- std::map<Handle, std::pair<std::shared_ptr<const Request>, std::function<void()>>>
+ std::map<Handle, std::pair<std::shared_ptr<const Request>, TimerCallback>>
mMonitorRequests GUARDED_BY(mMutex);
+ // Due to monotonic/steady clock inaccuracies during suspend,
+ // we allow an additional second chance waiting time to prevent
+ // false removal.
+
+ // This mSecondChanceRequests queue is almost always empty.
+ // Using a pair with the original handle allows lookup and keeps
+ // the Key unique.
+ std::map<std::pair<Handle /* new */, Handle /* original */>,
+ std::pair<std::shared_ptr<const Request>, TimerCallback>>
+ mSecondChanceRequests GUARDED_BY(mMutex);
+
RequestQueue& mTimeoutQueue; // locked internally, added to when request times out.
// Worker thread variables
@@ -200,16 +307,23 @@
std::thread mThread;
void threadFunc();
- Handle getUniqueHandle_l(std::chrono::milliseconds timeout) REQUIRES(mMutex);
+ Handle getUniqueHandle_l(Duration timeout) REQUIRES(mMutex) {
+ return getUniqueHandleForHandleType_l<
+ HANDLE_TYPES, enum_as_value(HANDLE_TYPE::TIMEOUT)>(
+ mMonitorRequests, timeout);
+ }
public:
MonitorThread(RequestQueue &timeoutQueue);
~MonitorThread();
- Handle add(std::shared_ptr<const Request> request, std::function<void()>&& func,
- std::chrono::milliseconds timeout);
+ Handle add(std::shared_ptr<const Request> request, TimerCallback&& func,
+ Duration timeout);
std::shared_ptr<const Request> remove(Handle handle);
void copyRequests(std::vector<std::shared_ptr<const Request>>& requests) const;
+ size_t getSecondChanceCount() const {
+ return mSecondChanceCount.load(std::memory_order_relaxed);
+ }
};
// Analysis contains info deduced by analysisTimeout().
@@ -244,16 +358,6 @@
std::vector<std::shared_ptr<const Request>> getPendingRequests() const;
- // A no-timeout request is represented by a handles at the end of steady_clock time,
- // counting down by the number of no timeout requests previously requested.
- // We manage them on the NoTimeoutMap, but conceptually they could be scheduled
- // on the MonitorThread because those time handles won't expire in
- // the lifetime of the device.
- static inline Handle getIndexedHandle(size_t index) {
- return std::chrono::time_point<std::chrono::steady_clock>::max() -
- std::chrono::time_point<std::chrono::steady_clock>::duration(index);
- }
-
static constexpr size_t kRetiredQueueMax = 16;
RequestQueue mRetiredQueue{kRetiredQueueMax}; // locked internally
diff --git a/media/utils/tests/Android.bp b/media/utils/tests/Android.bp
index 1024018..232cc4e 100644
--- a/media/utils/tests/Android.bp
+++ b/media/utils/tests/Android.bp
@@ -124,6 +124,27 @@
}
cc_test {
+ name: "mediautils_fixedstring_tests",
+
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+
+ shared_libs: [
+ "libaudioutils",
+ "liblog",
+ "libmediautils",
+ "libutils",
+ ],
+
+ srcs: [
+ "mediautils_fixedstring_tests.cpp",
+ ],
+}
+
+cc_test {
name: "mediautils_scopedstatistics_tests",
cflags: [
diff --git a/media/utils/tests/mediautils_fixedstring_tests.cpp b/media/utils/tests/mediautils_fixedstring_tests.cpp
new file mode 100644
index 0000000..7ab9a9f
--- /dev/null
+++ b/media/utils/tests/mediautils_fixedstring_tests.cpp
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "mediautils_fixedstring_tests"
+
+#include <mediautils/FixedString.h>
+
+#include <gtest/gtest.h>
+#include <utils/Log.h>
+
+using namespace android::mediautils;
+
+TEST(mediautils_fixedstring_tests, ctor) {
+ FixedString<8> s0("abcde");
+
+ ASSERT_FALSE(s0.empty());
+ ASSERT_EQ(8U, s0.capacity());
+
+ ASSERT_EQ(5U, s0.size());
+ ASSERT_EQ(3U, s0.remaining());
+ ASSERT_EQ(0, strcmp(s0.c_str(), "abcde"));
+
+ ASSERT_EQ(0, strcmp(s0.data(), "abcde"));
+
+ // overflow
+ FixedString<8> s1("abcdefghijk");
+ ASSERT_EQ(8U, s1.size());
+ ASSERT_TRUE(s1.full());
+ ASSERT_EQ(0U, s1.remaining());
+ ASSERT_EQ(0, strcmp(s1.c_str(), "abcdefgh"));
+
+ // overflow
+ FixedString<8> s2(std::string("abcdefghijk"));
+ ASSERT_TRUE(s2.full());
+
+ ASSERT_EQ(8U, s2.size());
+ ASSERT_EQ(0, strcmp(s2.c_str(), "abcdefgh"));
+
+ // complex
+ ASSERT_EQ(s1, s2);
+ ASSERT_EQ(FixedString<12>().append(s1), s2);
+ ASSERT_NE(s1, "bcd");
+
+ // string and stringview
+ ASSERT_EQ(s1.asString(), s1.asStringView());
+
+ FixedString30 s3;
+ s3 = std::string("abcd");
+ ASSERT_EQ(s3, "abcd");
+
+ s3.clear();
+ ASSERT_EQ(s3, "");
+ ASSERT_NE(s3, "abcd");
+ ASSERT_EQ(0U, s3.size());
+}
+
+TEST(mediautils_fixedstring_tests, append) {
+ FixedString<8> s0;
+ ASSERT_EQ(0U, s0.size());
+ ASSERT_EQ(0, strcmp(s0.c_str(), ""));
+ ASSERT_TRUE(s0.empty());
+ ASSERT_FALSE(s0.full());
+
+ s0.append("abc");
+ ASSERT_EQ(3U, s0.size());
+ ASSERT_EQ(0, strcmp(s0.c_str(), "abc"));
+
+ s0.append(std::string("d"));
+ ASSERT_EQ(4U, s0.size());
+ ASSERT_EQ(0, strcmp(s0.c_str(), "abcd"));
+
+ // overflow
+ s0.append("efghijk");
+ ASSERT_EQ(8U, s0.size());
+ ASSERT_EQ(0, strcmp(s0.c_str(), "abcdefgh"));
+ ASSERT_TRUE(s0.full());
+
+ // concatenated
+ ASSERT_EQ(FixedString62("abcd"),
+ FixedString<8>("ab").append("c").append(FixedString<8>("d")));
+ ASSERT_EQ(FixedString<12>("a").append(FixedString<12>("b")), "ab");
+}
+
+TEST(mediautils_fixedstring_tests, plus_equals) {
+ FixedString<8> s0;
+ ASSERT_EQ(0U, s0.size());
+ ASSERT_EQ(0, strcmp(s0.c_str(), ""));
+
+ s0 += "abc";
+ s0 += "def";
+ ASSERT_EQ(s0, "abcdef");
+}
+
+TEST(mediautils_fixedstring_tests, stream_operator) {
+ FixedString<8> s0('a');
+
+ s0 << 'b' << "c" << "d" << '\n';
+ ASSERT_EQ(s0, "abcd\n");
+}
+
+TEST(mediautils_fixedstring_tests, examples) {
+ FixedString30 s1(std::string("a"));
+ s1 << "bc" << 'd' << '\n';
+ s1 += "hello";
+
+ ASSERT_EQ(s1, "abcd\nhello");
+
+ FixedString30 s2;
+ for (const auto &c : s1.asStringView()) {
+ s2.append(c);
+ };
+ ASSERT_EQ(s1, s2);
+
+ FixedString30 s3(std::move(s1));
+}
+
+// Ensure type alias works fine as well.
+using FixedString1024 = FixedString<1024>;
+
+TEST(mediautils_fixedstring_tests, copy) {
+ FixedString1024 s0("abc");
+ FixedString62 s1(s0);
+
+ ASSERT_EQ(3U, s1.size());
+ ASSERT_EQ(0, strcmp(s1.c_str(), "abc"));
+ ASSERT_EQ(s0, s1);
+
+ FixedString<1024> s2(s1);
+ ASSERT_EQ(3U, s2.size());
+ ASSERT_EQ(0, strcmp(s2.c_str(), "abc"));
+ ASSERT_EQ(s2, "abc");
+ ASSERT_NE(s2, "def");
+ ASSERT_EQ(s2, std::string("abc"));
+ ASSERT_NE(s2, std::string("def"));
+ ASSERT_EQ(s1, s2);
+ ASSERT_EQ(s0, s2);
+ ASSERT_EQ(s2, FixedString62(FixedString1024("abc")));
+}
diff --git a/media/utils/tests/timecheck_tests.cpp b/media/utils/tests/timecheck_tests.cpp
index 6ebf44d..8236174 100644
--- a/media/utils/tests/timecheck_tests.cpp
+++ b/media/utils/tests/timecheck_tests.cpp
@@ -39,7 +39,7 @@
timeoutRegistered = timeout;
elapsedMsRegistered = elapsedMs;
event = true;
- }, 1000 /* msec */, false /* crash */);
+ }, 1000ms /* timeoutDuration */, {} /* secondChanceDuration */, false /* crash */);
}
ASSERT_TRUE(event);
ASSERT_FALSE(timeoutRegistered);
@@ -58,7 +58,7 @@
timeoutRegistered = timeout;
elapsedMsRegistered = elapsedMs;
event = true; // store-release, must be last.
- }, 1 /* msec */, false /* crash */);
+ }, 1ms /* timeoutDuration */, {} /* secondChanceDuration */, false /* crash */);
std::this_thread::sleep_for(100ms);
}
ASSERT_TRUE(event); // load-acquire, must be first.
@@ -69,4 +69,4 @@
// Note: We do not test TimeCheck crash because TimeCheck is multithreaded and the
// EXPECT_EXIT() signal catching is imperfect due to the gtest fork.
-} // namespace
\ No newline at end of file
+} // namespace
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index bb096fe..e4c4107 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -39,6 +39,7 @@
#include <utils/Log.h>
#include <utils/Trace.h>
#include <binder/Parcel.h>
+#include <media/audiohal/AudioHalVersionInfo.h>
#include <media/audiohal/DeviceHalInterface.h>
#include <media/audiohal/DevicesFactoryHalInterface.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
@@ -106,13 +107,15 @@
namespace android {
-#define MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION 7.1
-
using ::android::base::StringPrintf;
using media::IEffectClient;
using media::audio::common::AudioMMapPolicyInfo;
using media::audio::common::AudioMMapPolicyType;
using android::content::AttributionSourceState;
+using android::detail::AudioHalVersionInfo;
+
+static const AudioHalVersionInfo kMaxAAudioPropertyDeviceHalVersion =
+ AudioHalVersionInfo(AudioHalVersionInfo::Type::HIDL, 7, 1);
static const char kDeadlockedString[] = "AudioFlinger may be deadlocked\n";
static const char kHardwareLockedString[] = "Hardware lock is taken\n";
@@ -228,7 +231,9 @@
BINDER_METHOD_ENTRY(setDeviceConnectedState) \
BINDER_METHOD_ENTRY(setRequestedLatencyMode) \
BINDER_METHOD_ENTRY(getSupportedLatencyModes) \
-
+BINDER_METHOD_ENTRY(setBluetoothVariableLatencyEnabled) \
+BINDER_METHOD_ENTRY(isBluetoothVariableLatencyEnabled) \
+BINDER_METHOD_ENTRY(supportsBluetoothVariableLatency) \
// singleton for Binder Method Statistics for IAudioFlinger
static auto& getIAudioFlingerStatistics() {
@@ -323,7 +328,8 @@
mGlobalEffectEnableTime(0),
mPatchPanel(this),
mDeviceEffectManager(this),
- mSystemReady(false)
+ mSystemReady(false),
+ mBluetoothLatencyModesEnabled(true)
{
// Move the audio session unique ID generator start base as time passes to limit risk of
// generating the same ID again after an audioserver restart.
@@ -399,7 +405,7 @@
mDevicesFactoryHalCallback = new DevicesFactoryHalCallbackImpl;
mDevicesFactoryHal->setCallbackOnce(mDevicesFactoryHalCallback);
- if (mDevicesFactoryHal->getHalVersion() <= MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+ if (mDevicesFactoryHal->getHalVersion() <= kMaxAAudioPropertyDeviceHalVersion) {
mAAudioBurstsPerBuffer = getAAudioMixerBurstCountFromSystemProperty();
mAAudioHwBurstMinMicros = getAAudioHardwareBurstMinUsecFromSystemProperty();
}
@@ -445,7 +451,7 @@
*policyInfos = it->second;
return NO_ERROR;
}
- if (mDevicesFactoryHal->getHalVersion() > MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+ if (mDevicesFactoryHal->getHalVersion() > kMaxAAudioPropertyDeviceHalVersion) {
AutoMutex lock(mHardwareLock);
for (size_t i = 0; i < mAudioHwDevs.size(); ++i) {
AudioHwDevice *dev = mAudioHwDevs.valueAt(i);
@@ -1612,6 +1618,44 @@
return thread->getSupportedLatencyModes(modes);
}
+status_t AudioFlinger::setBluetoothVariableLatencyEnabled(bool enabled) {
+ Mutex::Autolock _l(mLock);
+ status_t status = INVALID_OPERATION;
+ for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
+ // Success if at least one PlaybackThread supports Bluetooth latency modes
+ if (mPlaybackThreads.valueAt(i)->setBluetoothVariableLatencyEnabled(enabled) == NO_ERROR) {
+ status = NO_ERROR;
+ }
+ }
+ if (status == NO_ERROR) {
+ mBluetoothLatencyModesEnabled.store(enabled);
+ }
+ return status;
+}
+
+status_t AudioFlinger::isBluetoothVariableLatencyEnabled(bool *enabled) {
+ if (enabled == nullptr) {
+ return BAD_VALUE;
+ }
+ *enabled = mBluetoothLatencyModesEnabled.load();
+ return NO_ERROR;
+}
+
+status_t AudioFlinger::supportsBluetoothVariableLatency(bool* support) {
+ if (support == nullptr) {
+ return BAD_VALUE;
+ }
+ Mutex::Autolock _l(mLock);
+ *support = false;
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ if (mAudioHwDevs.valueAt(i)->supportsBluetoothVariableLatency()) {
+ *support = true;
+ break;
+ }
+ }
+ return NO_ERROR;
+}
+
status_t AudioFlinger::setStreamMute(audio_stream_type_t stream, bool muted)
{
// check calling permissions
@@ -2114,9 +2158,9 @@
void AudioFlinger::onSupportedLatencyModesChanged(
audio_io_handle_t output, const std::vector<audio_latency_mode_t>& modes) {
int32_t outputAidl = VALUE_OR_FATAL(legacy2aidl_audio_io_handle_t_int32_t(output));
- std::vector<media::LatencyMode> modesAidl = VALUE_OR_FATAL(
- convertContainer<std::vector<media::LatencyMode>>(modes,
- legacy2aidl_audio_latency_mode_t_LatencyMode));
+ std::vector<media::audio::common::AudioLatencyMode> modesAidl = VALUE_OR_FATAL(
+ convertContainer<std::vector<media::audio::common::AudioLatencyMode>>(
+ modes, legacy2aidl_audio_latency_mode_t_AudioLatencyMode));
Mutex::Autolock _l(mClientLock);
size_t size = mNotificationClients.size();
@@ -2512,6 +2556,13 @@
flags = static_cast<AudioHwDevice::Flags>(flags | AudioHwDevice::AHWD_IS_INSERT);
}
+
+ if (bool supports = false;
+ dev->supportsBluetoothVariableLatency(&supports) == NO_ERROR && supports) {
+ flags = static_cast<AudioHwDevice::Flags>(flags |
+ AudioHwDevice::AHWD_SUPPORTS_BT_LATENCY_MODES);
+ }
+
audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
AudioHwDevice *audioDevice = new AudioHwDevice(handle, name, dev, flags);
if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
@@ -2521,7 +2572,7 @@
mHardwareStatus = AUDIO_HW_IDLE;
}
- if (mDevicesFactoryHal->getHalVersion() > MAX_AAUDIO_PROPERTY_DEVICE_HAL_VERSION) {
+ if (mDevicesFactoryHal->getHalVersion() > kMaxAAudioPropertyDeviceHalVersion) {
if (int32_t mixerBursts = dev->getAAudioMixerBurstCount();
mixerBursts > 0 && mixerBursts > mAAudioBurstsPerBuffer) {
mAAudioBurstsPerBuffer = mixerBursts;
@@ -2725,14 +2776,15 @@
status_t status = INVALID_OPERATION;
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
- std::vector<media::MicrophoneInfo> mics;
+ std::vector<audio_microphone_characteristic_t> mics;
AudioHwDevice *dev = mAudioHwDevs.valueAt(i);
mHardwareStatus = AUDIO_HW_GET_MICROPHONES;
status_t devStatus = dev->hwDevice()->getMicrophones(&mics);
mHardwareStatus = AUDIO_HW_IDLE;
if (devStatus == NO_ERROR) {
- microphones->insert(microphones->begin(), mics.begin(), mics.end());
// report success if at least one HW module supports the function.
+ std::transform(mics.begin(), mics.end(), std::back_inserter(*microphones),
+ [](auto& mic) { return media::MicrophoneInfo(mic); });
status = NO_ERROR;
}
}
@@ -2862,6 +2914,7 @@
if (thread->isMsdDevice()) {
thread->setDownStreamPatch(&patch);
}
+ thread->setBluetoothVariableLatencyEnabled(mBluetoothLatencyModesEnabled.load());
return thread;
}
}
@@ -4557,7 +4610,10 @@
case TransactionCode::SYSTEM_READY:
case TransactionCode::SET_AUDIO_HAL_PIDS:
case TransactionCode::SET_VIBRATOR_INFOS:
- case TransactionCode::UPDATE_SECONDARY_OUTPUTS: {
+ case TransactionCode::UPDATE_SECONDARY_OUTPUTS:
+ case TransactionCode::SET_BLUETOOTH_VARIABLE_LATENCY_ENABLED:
+ case TransactionCode::IS_BLUETOOTH_VARIABLE_LATENCY_ENABLED:
+ case TransactionCode::SUPPORTS_BLUETOOTH_VARIABLE_LATENCY: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
@@ -4610,7 +4666,9 @@
} else {
getIAudioFlingerStatistics().event(code, elapsedMs);
}
- });
+ }, mediautils::TimeCheck::kDefaultTimeoutDuration,
+ mediautils::TimeCheck::kDefaultSecondChanceDuration,
+ true /* crashOnTimeout */);
// Make sure we connect to Audio Policy Service before calling into AudioFlinger:
// - AudioFlinger can call into Audio Policy Service with its global mutex held
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index fc4c807..cb303cf 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -299,6 +299,12 @@
virtual status_t getSupportedLatencyModes(audio_io_handle_t output,
std::vector<audio_latency_mode_t>* modes);
+ virtual status_t setBluetoothVariableLatencyEnabled(bool enabled);
+
+ virtual status_t isBluetoothVariableLatencyEnabled(bool* enabled);
+
+ virtual status_t supportsBluetoothVariableLatency(bool* support);
+
status_t onTransactWrapper(TransactionCode code, const Parcel& data, uint32_t flags,
const std::function<status_t()>& delegate) override;
@@ -678,14 +684,16 @@
binder::Status getVolumeShaperState(
int32_t id,
std::optional<media::VolumeShaperState>* _aidl_return) override;
- binder::Status getDualMonoMode(media::AudioDualMonoMode* _aidl_return) override;
- binder::Status setDualMonoMode(media::AudioDualMonoMode mode) override;
+ binder::Status getDualMonoMode(
+ media::audio::common::AudioDualMonoMode* _aidl_return) override;
+ binder::Status setDualMonoMode(
+ media::audio::common::AudioDualMonoMode mode) override;
binder::Status getAudioDescriptionMixLevel(float* _aidl_return) override;
binder::Status setAudioDescriptionMixLevel(float leveldB) override;
binder::Status getPlaybackRateParameters(
- media::AudioPlaybackRate* _aidl_return) override;
+ media::audio::common::AudioPlaybackRate* _aidl_return) override;
binder::Status setPlaybackRateParameters(
- const media::AudioPlaybackRate& playbackRate) override;
+ const media::audio::common::AudioPlaybackRate& playbackRate) override;
private:
const sp<PlaybackThread::Track> mTrack;
@@ -1029,6 +1037,9 @@
std::vector<media::audio::common::AudioMMapPolicyInfo>> mPolicyInfos;
int32_t mAAudioBurstsPerBuffer = 0;
int32_t mAAudioHwBurstMinMicros = 0;
+
+ // Bluetooth Variable latency control logic is enabled or disabled
+ std::atomic_bool mBluetoothLatencyModesEnabled;
};
#undef INCLUDING_FROM_AUDIOFLINGER_H
diff --git a/services/audioflinger/AudioHwDevice.h b/services/audioflinger/AudioHwDevice.h
index 8c5d239..1749f3f 100644
--- a/services/audioflinger/AudioHwDevice.h
+++ b/services/audioflinger/AudioHwDevice.h
@@ -40,6 +40,8 @@
// Means that this isn't a terminal module, and software patches
// are used to transport audio data further.
AHWD_IS_INSERT = 0x4,
+ // This Module supports BT Latency mode control
+ AHWD_SUPPORTS_BT_LATENCY_MODES = 0x8,
};
AudioHwDevice(audio_module_handle_t handle,
@@ -64,6 +66,10 @@
return (0 != (mFlags & AHWD_IS_INSERT));
}
+ bool supportsBluetoothVariableLatency() const {
+ return (0 != (mFlags & AHWD_SUPPORTS_BT_LATENCY_MODES));
+ }
+
audio_module_handle_t handle() const { return mHandle; }
const char *moduleName() const { return mModuleName; }
sp<DeviceHalInterface> hwDevice() const { return mHwDevice; }
diff --git a/services/audioflinger/DeviceEffectManager.cpp b/services/audioflinger/DeviceEffectManager.cpp
index 4f3ed0a..3a8c1bc 100644
--- a/services/audioflinger/DeviceEffectManager.cpp
+++ b/services/audioflinger/DeviceEffectManager.cpp
@@ -30,6 +30,7 @@
namespace android {
+using detail::AudioHalVersionInfo;
using media::IEffectClient;
void AudioFlinger::DeviceEffectManager::createAudioPatch(audio_patch_handle_t handle,
@@ -130,14 +131,17 @@
return BAD_VALUE;
}
- static const float sMinDeviceEffectHalVersion = 6.0;
- float halVersion = effectsFactory->getHalVersion();
+ static AudioHalVersionInfo sMinDeviceEffectHalVersion =
+ AudioHalVersionInfo(AudioHalVersionInfo::Type::HIDL, 6, 0);
+ AudioHalVersionInfo halVersion = effectsFactory->getHalVersion();
+ // We can trust AIDL generated AudioHalVersionInfo comparison operator (based on std::tie) as
+ // long as the type, major and minor sequence doesn't change in the definition.
if (((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_PRE_PROC
&& (desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC)
|| halVersion < sMinDeviceEffectHalVersion) {
- ALOGW("%s() non pre/post processing device effect %s or incompatible API version %f",
- __func__, desc->name, halVersion);
+ ALOGW("%s() non pre/post processing device effect %s or incompatible API version %s",
+ __func__, desc->name, halVersion.toString().c_str());
return BAD_VALUE;
}
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index 193e270..98829d0 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -1756,6 +1756,7 @@
mNotifyFramesProcessed(notifyFramesProcessed)
{
ALOGV("constructor %p client %p", this, client.get());
+ setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
if (client == 0) {
return;
@@ -1817,7 +1818,7 @@
} else {
getIEffectStatistics().event(code, elapsedMs);
}
- }, 0 /* timeoutMs */);
+ }, {} /* timeoutDuration */, {} /* secondChanceDuration */, false /* crashOnTimeout */);
return BnEffect::onTransact(code, data, reply, flags);
}
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 6a138bb..33983d7 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -310,6 +310,7 @@
binder::Status unmute(/*out*/ bool *ret) override;
private:
Track* const mTrack;
+ bool setMute(bool muted);
};
sp<AudioVibrationController> mAudioVibrationController;
sp<os::ExternalVibration> mExternalVibration;
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 87a6bd1..c0e612d 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -269,6 +269,23 @@
return ss.str();
}
+static std::string toString(audio_latency_mode_t mode) {
+ // We convert to the AIDL type to print (eventually the legacy type will be removed).
+ const auto result = legacy2aidl_audio_latency_mode_t_AudioLatencyMode(mode);
+ return result.has_value() ? media::audio::common::toString(*result) : "UNKNOWN";
+}
+
+// Could be made a template, but other toString overloads for std::vector are confused.
+static std::string toString(const std::vector<audio_latency_mode_t>& elements) {
+ std::string s("{ ");
+ for (const auto& e : elements) {
+ s.append(toString(e));
+ s.append(" ");
+ }
+ s.append("}");
+ return s;
+}
+
static pthread_once_t sFastTrackMultiplierOnce = PTHREAD_ONCE_INIT;
static void sFastTrackMultiplierInit()
@@ -2046,7 +2063,8 @@
mHwSupportsPause(false), mHwPaused(false), mFlushPending(false),
mLeftVolFloat(-1.0), mRightVolFloat(-1.0),
mDownStreamPatch{},
- mIsTimestampAdvancing(kMinimumTimeBetweenTimestampChecksNs)
+ mIsTimestampAdvancing(kMinimumTimeBetweenTimestampChecksNs),
+ mBluetoothLatencyModesEnabled(true)
{
snprintf(mThreadName, kThreadNameLength, "AudioOut_%X", id);
mNBLogWriter = audioFlinger->newWriter_l(kLogSize, mThreadName);
@@ -2891,7 +2909,14 @@
void AudioFlinger::PlaybackThread::onCodecFormatChanged(
const std::basic_string<uint8_t>& metadataBs)
{
- std::thread([this, metadataBs]() {
+ wp<AudioFlinger::PlaybackThread> weakPointerThis = this;
+ std::thread([this, metadataBs, weakPointerThis]() {
+ sp<AudioFlinger::PlaybackThread> playbackThread = weakPointerThis.promote();
+ if (playbackThread == nullptr) {
+ ALOGW("PlaybackThread was destroyed, skip codec format change event");
+ return;
+ }
+
audio_utils::metadata::Data metadata =
audio_utils::metadata::dataFromByteString(metadataBs);
if (metadata.empty()) {
@@ -3152,10 +3177,7 @@
auto backInserter = std::back_inserter(metadata.tracks);
for (const sp<Track> &track : mActiveTracks) {
// No track is invalid as this is called after prepareTrack_l in the same critical section
- // Do not forward metadata for PatchTrack with unspecified stream type
- if (track->streamType() != AUDIO_STREAM_PATCH) {
- track->copyMetadataTo(backInserter);
- }
+ track->copyMetadataTo(backInserter);
}
sendMetadataToBackend_l(metadata);
}
@@ -3985,19 +4007,24 @@
void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
- // mono blend occurs for mixer threads only (not direct or offloaded)
- // and is handled here if we're going directly to the sink.
- if (requireMonoBlend() && !mEffectBufferValid) {
- mono_blend(mMixerBuffer, mMixerBufferFormat, mChannelCount, mNormalFrameCount,
- true /*limit*/);
- }
+ // Apply mono blending and balancing if the effect buffer is not valid. Otherwise,
+ // do these processes after effects are applied.
+ if (!mEffectBufferValid) {
+ // mono blend occurs for mixer threads only (not direct or offloaded)
+ // and is handled here if we're going directly to the sink.
+ if (requireMonoBlend()) {
+ mono_blend(mMixerBuffer, mMixerBufferFormat, mChannelCount,
+ mNormalFrameCount, true /*limit*/);
+ }
- if (!hasFastMixer()) {
- // Balance must take effect after mono conversion.
- // We do it here if there is no FastMixer.
- // mBalance detects zero balance within the class for speed (not needed here).
- mBalance.setBalance(mMasterBalance.load());
- mBalance.process((float *)mMixerBuffer, mNormalFrameCount);
+ if (!hasFastMixer()) {
+ // Balance must take effect after mono conversion.
+ // We do it here if there is no FastMixer.
+ // mBalance detects zero balance within the class for speed
+ // (not needed here).
+ mBalance.setBalance(mMasterBalance.load());
+ mBalance.process((float *)mMixerBuffer, mNormalFrameCount);
+ }
}
memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
@@ -4100,10 +4127,19 @@
mEffectBufferFormat,
mNormalFrameCount * mHapticChannelCount);
}
-
- memcpy_by_audio_format(mSinkBuffer, mFormat, effectBuffer, mEffectBufferFormat,
- mNormalFrameCount * (mChannelCount + mHapticChannelCount));
-
+ const size_t framesToCopy = mNormalFrameCount * (mChannelCount + mHapticChannelCount);
+ if (mFormat == AUDIO_FORMAT_PCM_FLOAT &&
+ mEffectBufferFormat == AUDIO_FORMAT_PCM_FLOAT) {
+ // Clamp PCM float values more than this distance from 0 to insulate
+ // a HAL which doesn't handle NaN correctly.
+ static constexpr float HAL_FLOAT_SAMPLE_LIMIT = 2.0f;
+ memcpy_to_float_from_float_with_clamping(static_cast<float*>(mSinkBuffer),
+ static_cast<const float*>(effectBuffer),
+ framesToCopy, HAL_FLOAT_SAMPLE_LIMIT /* absMax */);
+ } else {
+ memcpy_by_audio_format(mSinkBuffer, mFormat,
+ effectBuffer, mEffectBufferFormat, framesToCopy);
+ }
// The sample data is partially interleaved when haptic channels exist,
// we need to adjust channels here.
if (mHapticChannelCount > 0) {
@@ -7310,6 +7346,17 @@
if (status != INVALID_OPERATION) {
updateHalSupportedLatencyModes_l();
}
+
+ const pid_t tid = getTid();
+ if (tid == -1) {
+ // Unusual: PlaybackThread::onFirstRef() should set the threadLoop running.
+ ALOGW("%s: Cannot update Spatializer mixer thread priority, not running", __func__);
+ } else {
+ const int priorityBoost = requestSpatializerPriority(getpid(), tid);
+ if (priorityBoost > 0) {
+ stream()->setHalThreadPriority(priorityBoost);
+ }
+ }
}
status_t AudioFlinger::SpatializerThread::createAudioPatch_l(const struct audio_patch *patch,
@@ -7322,10 +7369,13 @@
void AudioFlinger::SpatializerThread::updateHalSupportedLatencyModes_l() {
std::vector<audio_latency_mode_t> latencyModes;
- if (mOutput->stream->getRecommendedLatencyModes(&latencyModes) != NO_ERROR) {
+ const status_t status = mOutput->stream->getRecommendedLatencyModes(&latencyModes);
+ if (status != NO_ERROR) {
latencyModes.clear();
}
if (latencyModes != mSupportedLatencyModes) {
+ ALOGD("%s: thread(%d) status %d supported latency modes: %s",
+ __func__, mId, status, toString(latencyModes).c_str());
mSupportedLatencyModes.swap(latencyModes);
sendHalLatencyModesChangedEvent_l();
}
@@ -7365,6 +7415,8 @@
if (latencyMode != mSetLatencyMode) {
status_t status = mOutput->stream->setLatencyMode(latencyMode);
+ ALOGD("%s: thread(%d) setLatencyMode(%s) returned %d",
+ __func__, mId, toString(latencyMode).c_str(), status);
if (status == NO_ERROR) {
mSetLatencyMode = latencyMode;
}
@@ -7390,6 +7442,15 @@
return NO_ERROR;
}
+status_t AudioFlinger::PlaybackThread::setBluetoothVariableLatencyEnabled(bool enabled) {
+ if (mOutput == nullptr || mOutput->audioHwDev == nullptr
+ || !mOutput->audioHwDev->supportsBluetoothVariableLatency()) {
+ return INVALID_OPERATION;
+ }
+ mBluetoothLatencyModesEnabled.store(enabled);
+ return NO_ERROR;
+}
+
void AudioFlinger::SpatializerThread::checkOutputStageEffects()
{
bool hasVirtualizer = false;
@@ -7446,6 +7507,8 @@
std::vector<audio_latency_mode_t> modes) {
Mutex::Autolock _l(mLock);
if (modes != mSupportedLatencyModes) {
+ ALOGD("%s: thread(%d) supported latency modes: %s",
+ __func__, mId, toString(modes).c_str());
mSupportedLatencyModes.swap(modes);
sendHalLatencyModesChangedEvent_l();
}
@@ -7522,10 +7585,11 @@
ALOGV("%p kUseFastCapture = Always, initFastCapture = true", this);
break;
case FastCapture_Static:
- initFastCapture = (mFrameCount * 1000) / mSampleRate < kMinNormalCaptureBufferSizeMs;
- ALOGV("%p kUseFastCapture = Static, (%lld * 1000) / %u vs %u, initFastCapture = %d",
- this, (long long)mFrameCount, mSampleRate, kMinNormalCaptureBufferSizeMs,
- initFastCapture);
+ initFastCapture = !mIsMsdDevice // Disable fast capture for MSD BUS devices.
+ && (mFrameCount * 1000) / mSampleRate < kMinNormalCaptureBufferSizeMs;
+ ALOGV("%p kUseFastCapture = Static, (%lld * 1000) / %u vs %u, initFastCapture = %d "
+ "mIsMsdDevice = %d", this, (long long)mFrameCount, mSampleRate,
+ kMinNormalCaptureBufferSizeMs, initFastCapture, mIsMsdDevice);
break;
// case FastCapture_Dynamic:
}
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 2acbea6..bb42f22 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -1086,6 +1086,8 @@
return INVALID_OPERATION;
}
+ virtual status_t setBluetoothVariableLatencyEnabled(bool enabled);
+
protected:
// updated by readOutputParameters_l()
size_t mNormalFrameCount; // normal mixer and effects
@@ -1172,7 +1174,7 @@
volatile int32_t mSuspended;
int64_t mBytesWritten;
- int64_t mFramesWritten; // not reset on standby
+ std::atomic<int64_t> mFramesWritten; // not reset on standby
int64_t mLastFramesWritten = -1; // track changes in timestamp
// server frames written.
int64_t mSuspendedFrames; // not reset on standby
@@ -1386,6 +1388,7 @@
virtual bool hasFastMixer() const = 0;
virtual FastTrackUnderruns getFastTrackUnderruns(size_t fastIndex __unused) const
{ FastTrackUnderruns dummy; return dummy; }
+ const std::atomic<int64_t>& framesWritten() const { return mFramesWritten; }
protected:
// accessed by both binder threads and within threadLoop(), lock on mutex needed
@@ -1435,6 +1438,9 @@
virtual void flushHw_l() {
mIsTimestampAdvancing.clear();
}
+
+ // Bluetooth Variable latency control logic is enabled or disabled for this thread
+ std::atomic_bool mBluetoothLatencyModesEnabled;
};
class MixerThread : public PlaybackThread {
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 2677ab3..20bfbb0 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -397,6 +397,8 @@
int64_t mLogStartFrames = 0; // Timestamp frames at start()
double mLogLatencyMs = 0.; // Track the last log latency
+ bool mLogForceVolumeUpdate = true; // force volume update to TrackMetrics.
+
TrackMetrics mTrackMetrics;
bool mServerLatencySupported = false;
diff --git a/services/audioflinger/TrackMetrics.h b/services/audioflinger/TrackMetrics.h
index 30d69ab..6fc70d6 100644
--- a/services/audioflinger/TrackMetrics.h
+++ b/services/audioflinger/TrackMetrics.h
@@ -64,7 +64,6 @@
AMEDIAMETRICS_PROP_EVENT_VALUE_BEGINAUDIOINTERVALGROUP, devices.c_str());
}
++mIntervalCount;
- mIntervalStartTimeNs = systemTime();
}
void logConstructor(pid_t creatorPid, uid_t creatorUid, int32_t internalTrackId,
@@ -90,11 +89,9 @@
// Called when we are removed from the Thread.
void logEndInterval() {
std::lock_guard l(mLock);
- if (mIntervalStartTimeNs != 0) {
- const int64_t elapsedTimeNs = systemTime() - mIntervalStartTimeNs;
- mIntervalStartTimeNs = 0;
- mCumulativeTimeNs += elapsedTimeNs;
- mDeviceTimeNs += elapsedTimeNs;
+ if (mLastVolumeChangeTimeNs != 0) {
+ logVolume_l(mVolume); // flush out the last volume.
+ mLastVolumeChangeTimeNs = 0;
}
}
@@ -133,20 +130,8 @@
// may be called multiple times during an interval
void logVolume(float volume) {
- const int64_t timeNs = systemTime();
std::lock_guard l(mLock);
- if (mStartVolumeTimeNs == 0) {
- mDeviceVolume = mVolume = volume;
- mLastVolumeChangeTimeNs = mStartVolumeTimeNs = timeNs;
- updateMinMaxVolume(0, mVolume);
- return;
- }
- const int64_t durationNs = timeNs - mLastVolumeChangeTimeNs;
- updateMinMaxVolume(durationNs, mVolume);
- mDeviceVolume = (mDeviceVolume * (mLastVolumeChangeTimeNs - mStartVolumeTimeNs) +
- mVolume * durationNs) / (timeNs - mStartVolumeTimeNs);
- mVolume = volume;
- mLastVolumeChangeTimeNs = timeNs;
+ logVolume_l(volume);
}
// Use absolute numbers returned by AudioTrackShared.
@@ -158,6 +143,7 @@
}
private:
+
// no lock required - all arguments and constants.
void deliverDeviceMetrics(const char *eventName, const char *devices) const {
mediametrics::LogItem(mMetricsId)
@@ -167,6 +153,23 @@
.record();
}
+ void logVolume_l(float volume) REQUIRES(mLock) {
+ const int64_t timeNs = systemTime();
+ const int64_t durationNs = mLastVolumeChangeTimeNs == 0
+ ? 0 : timeNs - mLastVolumeChangeTimeNs;
+ if (durationNs > 0) {
+ // See West's algorithm for weighted averages
+ // https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
+ mDeviceVolume += (mVolume - mDeviceVolume) * durationNs
+ / (durationNs + mDeviceTimeNs);
+ mDeviceTimeNs += durationNs;
+ mCumulativeTimeNs += durationNs;
+ }
+ updateMinMaxVolume(durationNs, mVolume); // always update.
+ mVolume = volume;
+ mLastVolumeChangeTimeNs = timeNs;
+ }
+
void deliverCumulativeMetrics(const char *eventName) const REQUIRES(mLock) {
if (mIntervalCount > 0) {
mediametrics::LogItem item(mMetricsId);
@@ -199,14 +202,12 @@
// mDevices is not reset by resetIntervalGroupMetrics.
mIntervalCount = 0;
- mIntervalStartTimeNs = 0;
// mCumulativeTimeNs is not reset by resetIntervalGroupMetrics.
mDeviceTimeNs = 0;
mVolume = 0.f;
mDeviceVolume = 0.f;
- mStartVolumeTimeNs = 0;
- mLastVolumeChangeTimeNs = 0;
+ mLastVolumeChangeTimeNs = 0; // last time volume logged, cleared on endInterval
mMinVolume = AMEDIAMETRICS_INITIAL_MIN_VOLUME;
mMaxVolume = AMEDIAMETRICS_INITIAL_MAX_VOLUME;
mMinVolumeDurationNs = 0;
@@ -230,14 +231,12 @@
// Number of intervals and playing time
int32_t mIntervalCount GUARDED_BY(mLock) = 0;
- int64_t mIntervalStartTimeNs GUARDED_BY(mLock) = 0;
- int64_t mCumulativeTimeNs GUARDED_BY(mLock) = 0;
- int64_t mDeviceTimeNs GUARDED_BY(mLock) = 0;
+ int64_t mCumulativeTimeNs GUARDED_BY(mLock) = 0; // total time.
+ int64_t mDeviceTimeNs GUARDED_BY(mLock) = 0; // time on device.
// Average volume
- double mVolume GUARDED_BY(mLock) = 0.f;
- double mDeviceVolume GUARDED_BY(mLock) = 0.f;
- int64_t mStartVolumeTimeNs GUARDED_BY(mLock) = 0;
+ double mVolume GUARDED_BY(mLock) = 0.f; // last set volume.
+ double mDeviceVolume GUARDED_BY(mLock) = 0.f; // running average volume.
int64_t mLastVolumeChangeTimeNs GUARDED_BY(mLock) = 0;
// Min/Max volume
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 6135020..02c4aaf 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -334,6 +334,7 @@
: BnAudioTrack(),
mTrack(track)
{
+ setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
}
AudioFlinger::TrackHandle::~TrackHandle() {
@@ -438,7 +439,8 @@
return Status::ok();
}
-Status AudioFlinger::TrackHandle::getDualMonoMode(media::AudioDualMonoMode* _aidl_return)
+Status AudioFlinger::TrackHandle::getDualMonoMode(
+ media::audio::common::AudioDualMonoMode* _aidl_return)
{
audio_dual_mono_mode_t mode = AUDIO_DUAL_MONO_MODE_OFF;
const status_t status = mTrack->getDualMonoMode(&mode)
@@ -451,7 +453,7 @@
}
Status AudioFlinger::TrackHandle::setDualMonoMode(
- media::AudioDualMonoMode mode)
+ media::audio::common::AudioDualMonoMode mode)
{
const auto localMonoMode = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioDualMonoMode_audio_dual_mono_mode_t(mode));
@@ -475,7 +477,7 @@
}
Status AudioFlinger::TrackHandle::getPlaybackRateParameters(
- media::AudioPlaybackRate* _aidl_return)
+ media::audio::common::AudioPlaybackRate* _aidl_return)
{
audio_playback_rate_t localPlaybackRate{};
status_t status = mTrack->getPlaybackRateParameters(&localPlaybackRate)
@@ -488,7 +490,7 @@
}
Status AudioFlinger::TrackHandle::setPlaybackRateParameters(
- const media::AudioPlaybackRate& playbackRate)
+ const media::audio::common::AudioPlaybackRate& playbackRate)
{
const audio_playback_rate_t localPlaybackRate = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioPlaybackRate_audio_playback_rate_t(playbackRate));
@@ -1094,12 +1096,22 @@
__func__, mId, (int)mThreadIoHandle);
}
- // states to reset position info for non-offloaded/direct tracks
- if (!isOffloaded() && !isDirect()
+ PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
+
+ // states to reset position info for pcm tracks
+ if (audio_is_linear_pcm(mFormat)
&& (state == IDLE || state == STOPPED || state == FLUSHED)) {
mFrameMap.reset();
+
+ if (!isFastTrack() && (isDirect() || isOffloaded())) {
+ // Start point of track -> sink frame map. If the HAL returns a
+ // frame position smaller than the first written frame in
+ // updateTrackFrameInfo, the timestamp can be interpolated
+ // instead of using a larger value.
+ mFrameMap.push(mAudioTrackServerProxy->framesReleased(),
+ playbackThread->framesWritten());
+ }
}
- PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
if (isFastTrack()) {
// refresh fast track underruns on start because that field is never cleared
// by the fast mixer; furthermore, the same track can be recycled, i.e. start
@@ -1123,6 +1135,7 @@
.mPosition[ExtendedTimestamp::LOCATION_KERNEL];
mLogLatencyMs = 0.;
}
+ mLogForceVolumeUpdate = true; // at least one volume logged for metrics when starting.
if (status == NO_ERROR || status == ALREADY_EXISTS) {
// for streaming tracks, remove the buffer read stop limit.
@@ -1394,12 +1407,21 @@
if (mFinalVolume != volume) { // Compare to an epsilon if too many meaningless updates
mFinalVolume = volume;
setMetadataHasChanged();
- mTrackMetrics.logVolume(volume);
+ mLogForceVolumeUpdate = true;
+ }
+ if (mLogForceVolumeUpdate) {
+ mLogForceVolumeUpdate = false;
+ mTrackMetrics.logVolume(mFinalVolume);
}
}
void AudioFlinger::PlaybackThread::Track::copyMetadataTo(MetadataInserter& backInserter) const
{
+ // Do not forward metadata for PatchTrack with unspecified stream type
+ if (mStreamType == AUDIO_STREAM_PATCH) {
+ return;
+ }
+
playback_track_metadata_v7_t metadata;
metadata.base = {
.usage = mAttr.usage,
@@ -1898,9 +1920,7 @@
}
}
-binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::mute(
- /*out*/ bool *ret) {
- *ret = false;
+bool AudioFlinger::PlaybackThread::Track::AudioVibrationController::setMute(bool muted) {
sp<ThreadBase> thread = mTrack->mThread.promote();
if (thread != 0) {
// Lock for updating mHapticPlaybackEnabled.
@@ -1908,27 +1928,22 @@
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
&& playbackThread->mHapticChannelCount > 0) {
- mTrack->setHapticPlaybackEnabled(false);
- *ret = true;
+ mTrack->setHapticPlaybackEnabled(!muted);
+ return true;
}
}
+ return false;
+}
+
+binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::mute(
+ /*out*/ bool *ret) {
+ *ret = setMute(true);
return binder::Status::ok();
}
binder::Status AudioFlinger::PlaybackThread::Track::AudioVibrationController::unmute(
/*out*/ bool *ret) {
- *ret = false;
- sp<ThreadBase> thread = mTrack->mThread.promote();
- if (thread != 0) {
- // Lock for updating mHapticPlaybackEnabled.
- Mutex::Autolock _l(thread->mLock);
- PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
- if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
- && playbackThread->mHapticChannelCount > 0) {
- mTrack->setHapticPlaybackEnabled(true);
- *ret = true;
- }
- }
+ *ret = setMute(false);
return binder::Status::ok();
}
@@ -2314,6 +2329,7 @@
: BnAudioRecord(),
mRecordTrack(recordTrack)
{
+ setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
}
AudioFlinger::RecordHandle::~RecordHandle() {
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index cf1f64c..a62d3f0 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -194,6 +194,9 @@
{AUDIO_FORMAT_E_AC3, {}},
{AUDIO_FORMAT_DTS, {}},
{AUDIO_FORMAT_DTS_HD, {}},
+ {AUDIO_FORMAT_DTS_HD_MA, {}},
+ {AUDIO_FORMAT_DTS_UHD, {}},
+ {AUDIO_FORMAT_DTS_UHD_P2, {}},
{AUDIO_FORMAT_AAC_LC, {
AUDIO_FORMAT_AAC_HE_V1, AUDIO_FORMAT_AAC_HE_V2, AUDIO_FORMAT_AAC_ELD,
AUDIO_FORMAT_AAC_XHE}},
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index fdf5175..56c0603 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -316,7 +316,8 @@
// Permit match only if requested format and mix format are PCM and can be format
// adapted by the mixer, or are the same (compressed) format.
- if (!((audio_is_linear_pcm(config.format) && audio_is_linear_pcm(mix->mFormat.format)) ||
+ if (!is_mix_loopback(mix->mRouteFlags) &&
+ !((audio_is_linear_pcm(config.format) && audio_is_linear_pcm(mix->mFormat.format)) ||
(config.format == mix->mFormat.format)) &&
config.format != AUDIO_CONFIG_BASE_INITIALIZER.format) {
return MixMatchStatus::NO_MATCH;
diff --git a/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml
index e7908eb..c453dea 100644
--- a/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml
+++ b/services/audiopolicy/config/bluetooth_with_le_audio_policy_configuration_7_0.xml
@@ -12,6 +12,7 @@
</mixPort>
<!-- Le Audio Audio Ports -->
<mixPort name="le audio output" role="source" />
+ <mixPort name="le audio broadcast output" role="source" />
<mixPort name="le audio input" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000 16000 24000 32000 44100 48000"
@@ -47,6 +48,7 @@
<devicePort tagName="BLE Headset Out" type="AUDIO_DEVICE_OUT_BLE_HEADSET" role="sink"/>
<devicePort tagName="BLE Speaker Out" type="AUDIO_DEVICE_OUT_BLE_SPEAKER" role="sink"/>
<devicePort tagName="BLE Headset In" type="AUDIO_DEVICE_IN_BLE_HEADSET" role="source"/>
+ <devicePort tagName="BLE Broadcast Out" type="AUDIO_DEVICE_OUT_BLE_BROADCAST" role="sink"/>
</devicePorts>
<routes>
<route type="mix" sink="BT A2DP Out"
@@ -63,5 +65,7 @@
sources="BLE Headset In"/>
<route type="mix" sink="BLE Speaker Out"
sources="le audio output"/>
+ <route type="mix" sink="BLE Broadcast Out"
+ sources="le audio broadcast output"/>
</routes>
</module>
diff --git a/services/audiopolicy/config/surround_sound_configuration_aidl.xml b/services/audiopolicy/config/surround_sound_configuration_aidl.xml
new file mode 100644
index 0000000..cf15711
--- /dev/null
+++ b/services/audiopolicy/config/surround_sound_configuration_aidl.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
+<!-- Copyright (C) 2018 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<surroundSound>
+ <!-- Each of the listed formats gets an entry in Surround Settings dialog on TV devices.
+ There must be a corresponding Java ENCODING_... constant defined in AudioFormat.java,
+ and a display name defined in AudioFormat.toDisplayName. For the formats that don't
+ need a dedicated Surround Settings dialog entry, a subformats list has to be used. -->
+ <formats>
+ <format name="AUDIO_FORMAT_AC3" />
+ <format name="AUDIO_FORMAT_E_AC3" />
+ <format name="AUDIO_FORMAT_E_AC3_JOC" />
+ <format name="AUDIO_FORMAT_DOLBY_TRUEHD" />
+ <format name="AUDIO_FORMAT_DTS" />
+ <format name="AUDIO_FORMAT_DTS_HD" />
+ <format name="AUDIO_FORMAT_DTS_HD_MA" />
+ <format name="AUDIO_FORMAT_DTS_UHD" />
+ <format name="AUDIO_FORMAT_DTS_UHD_P2" />
+ <format name="AUDIO_FORMAT_AAC_LC" subformats="AUDIO_FORMAT_AAC_HE_V1 AUDIO_FORMAT_AAC_HE_V2 AUDIO_FORMAT_AAC_ELD AUDIO_FORMAT_AAC_XHE" />
+ <format name="AUDIO_FORMAT_AC4" />
+ </formats>
+</surroundSound>
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index d927f6b..45c5eac 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -195,6 +195,12 @@
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP, AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES,
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_SPEAKER, }));
}
+ // If connected to a dock, never use the device speaker for calls
+ if (!availableOutputDevices.getDevicesFromTypes({AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET})
+ .isEmpty()) {
+ availableOutputDevices.remove(
+ availableOutputDevices.getDevicesFromTypes({AUDIO_DEVICE_OUT_SPEAKER}));
+ }
} break;
case STRATEGY_ACCESSIBILITY: {
// do not route accessibility prompts to a digital output currently configured with a
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 7d7ed93..3518037 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -68,16 +68,6 @@
// media / notification / system volume.
constexpr float IN_CALL_EARPIECE_HEADROOM_DB = 3.f;
-// Compressed formats for MSD module, ordered from most preferred to least preferred.
-static const std::vector<audio_format_t> msdCompressedFormatsOrder = {{
- AUDIO_FORMAT_IEC60958, AUDIO_FORMAT_MAT_2_1, AUDIO_FORMAT_MAT_2_0, AUDIO_FORMAT_E_AC3,
- AUDIO_FORMAT_AC3, AUDIO_FORMAT_PCM_16_BIT }};
-// Channel masks for MSD module, 3D > 2D > 1D ordering (most preferred to least preferred).
-static const std::vector<audio_channel_mask_t> msdSurroundChannelMasksOrder = {{
- AUDIO_CHANNEL_OUT_3POINT1POINT2, AUDIO_CHANNEL_OUT_3POINT0POINT2,
- AUDIO_CHANNEL_OUT_2POINT1POINT2, AUDIO_CHANNEL_OUT_2POINT0POINT2,
- AUDIO_CHANNEL_OUT_5POINT1, AUDIO_CHANNEL_OUT_STEREO }};
-
template <typename T>
bool operator== (const SortedVector<T> &left, const SortedVector<T> &right)
{
@@ -1644,10 +1634,28 @@
const AudioProfileVector &sourceProfiles, const AudioProfileVector &sinkProfiles,
audio_port_config *sourceConfig, audio_port_config *sinkConfig) const
{
+ // Compressed formats for MSD module, ordered from most preferred to least preferred.
+ static const std::vector<audio_format_t> formatsOrder = {{
+ AUDIO_FORMAT_IEC60958, AUDIO_FORMAT_MAT_2_1, AUDIO_FORMAT_MAT_2_0, AUDIO_FORMAT_E_AC3,
+ AUDIO_FORMAT_AC3, AUDIO_FORMAT_PCM_16_BIT }};
+ static const std::vector<audio_channel_mask_t> channelMasksOrder = [](){
+ // Channel position masks for MSD module, 3D > 2D > 1D ordering (most preferred to least
+ // preferred).
+ std::vector<audio_channel_mask_t> masks = {{
+ AUDIO_CHANNEL_OUT_3POINT1POINT2, AUDIO_CHANNEL_OUT_3POINT0POINT2,
+ AUDIO_CHANNEL_OUT_2POINT1POINT2, AUDIO_CHANNEL_OUT_2POINT0POINT2,
+ AUDIO_CHANNEL_OUT_5POINT1, AUDIO_CHANNEL_OUT_STEREO }};
+ // insert index masks (higher counts most preferred) as preferred over position masks
+ for (int i = 1; i <= AUDIO_CHANNEL_COUNT_MAX; i++) {
+ masks.insert(
+ masks.begin(), audio_channel_mask_for_index_assignment_from_count(i));
+ }
+ return masks;
+ }();
+
struct audio_config_base bestSinkConfig;
- status_t result = findBestMatchingOutputConfig(sourceProfiles, sinkProfiles,
- msdCompressedFormatsOrder, msdSurroundChannelMasksOrder,
- true /*preferHigherSamplingRates*/, bestSinkConfig);
+ status_t result = findBestMatchingOutputConfig(sourceProfiles, sinkProfiles, formatsOrder,
+ channelMasksOrder, true /*preferHigherSamplingRates*/, bestSinkConfig);
if (result != NO_ERROR) {
ALOGD("%s() no matching config found for sink, hwAvSync: %d",
__func__, hwAvSync);
@@ -1669,7 +1677,10 @@
}
sourceConfig->sample_rate = bestSinkConfig.sample_rate;
// Specify exact channel mask to prevent guessing by bit count in PatchPanel.
- sourceConfig->channel_mask = audio_channel_mask_out_to_in(bestSinkConfig.channel_mask);
+ sourceConfig->channel_mask =
+ audio_channel_mask_get_representation(bestSinkConfig.channel_mask)
+ == AUDIO_CHANNEL_REPRESENTATION_INDEX ?
+ bestSinkConfig.channel_mask : audio_channel_mask_out_to_in(bestSinkConfig.channel_mask);
sourceConfig->format = bestSinkConfig.format;
// Copy input stream directly without any processing (e.g. resampling).
sourceConfig->flags.input = static_cast<audio_input_flags_t>(
@@ -1884,7 +1895,8 @@
// (see b/200293124, the incorrect selection of AUDIO_OUTPUT_FLAG_VOIP_RX).
// 3: the output supporting the exact channel mask
// 4: the output with a higher channel count than requested
- // 5: the output with a higher sampling rate than requested
+ // 5: the output with the highest sampling rate if the requested sample rate is
+ // greater than default sampling rate
// 6: the output with the highest number of requested performance flags
// 7: the output with the bit depth the closest to the requested one
// 8: the primary output
@@ -1944,8 +1956,7 @@
}
// sampling rate match
- if (samplingRate > SAMPLE_RATE_HZ_DEFAULT &&
- samplingRate <= outputDesc->getSamplingRate()) {
+ if (samplingRate > SAMPLE_RATE_HZ_DEFAULT) {
currentMatchCriteria[4] = outputDesc->getSamplingRate();
}
@@ -4100,7 +4111,6 @@
if (mEffects.isNonOffloadableEffectEnabled()) {
return OK;
}
-
AudioDeviceTypeAddrVector devices;
status_t status = getDevicesForAttributes(*attr, &devices, false /* forVolume */);
if (status != OK) {
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index beb26b6..281785e 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -202,6 +202,7 @@
mCaptureStateNotifier(false),
mCreateAudioPolicyManager(createAudioPolicyManager),
mDestroyAudioPolicyManager(destroyAudioPolicyManager) {
+ setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
}
void AudioPolicyService::loadAudioPolicyManager()
@@ -1347,7 +1348,9 @@
} else {
getIAudioPolicyServiceStatistics().event(code, elapsedMs);
}
- });
+ }, mediautils::TimeCheck::kDefaultTimeoutDuration,
+ mediautils::TimeCheck::kDefaultSecondChanceDuration,
+ true /* crashOnTimeout */);
switch (code) {
case SHELL_COMMAND_TRANSACTION: {
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index 38ad494..2fe7b9e 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -34,6 +34,7 @@
#include <media/stagefright/foundation/AMessage.h>
#include <media/MediaMetricsItem.h>
#include <media/ShmemCompat.h>
+#include <mediautils/SchedulingPolicyService.h>
#include <mediautils/ServiceUtilities.h>
#include <utils/Thread.h>
@@ -111,6 +112,14 @@
};
void onMessageReceived(const sp<AMessage> &msg) override {
+ // No ALooper method to get the tid so update
+ // Spatializer priority on the first message received.
+ std::call_once(mPrioritySetFlag, [](){
+ const pid_t pid = getpid();
+ const pid_t tid = gettid();
+ (void)requestSpatializerPriority(pid, tid);
+ });
+
sp<Spatializer> spatializer = mSpatializer.promote();
if (spatializer == nullptr) {
ALOGW("%s: Cannot promote spatializer", __func__);
@@ -163,6 +172,7 @@
}
private:
wp<Spatializer> mSpatializer;
+ std::once_flag mPrioritySetFlag;
};
const std::vector<const char *> Spatializer::sHeadPoseKeys = {
@@ -255,6 +265,7 @@
: mEngineDescriptor(engineDescriptor),
mPolicyCallback(callback) {
ALOGV("%s", __func__);
+ setMinSchedulerPolicy(SCHED_NORMAL, ANDROID_PRIORITY_AUDIO);
}
void Spatializer::onFirstRef() {
@@ -263,7 +274,7 @@
mLooper->start(
/*runOnCallingThread*/false,
/*canCallJava*/ false,
- PRIORITY_AUDIO);
+ PRIORITY_URGENT_AUDIO);
mHandler = new EngineCallbackHandler(this);
mLooper->registerHandler(mHandler);
@@ -963,7 +974,10 @@
}
}
if (mOutput != AUDIO_IO_HANDLE_NONE && supportsSetLatencyMode) {
- AudioSystem::setRequestedLatencyMode(mOutput, requestedLatencyMode);
+ const status_t status =
+ AudioSystem::setRequestedLatencyMode(mOutput, requestedLatencyMode);
+ ALOGD("%s: setRequestedLatencyMode for output thread(%d) to %s returned %d",
+ __func__, mOutput, toString(requestedLatencyMode).c_str(), status);
}
}
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index bcbd92b..0f6bafe 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -23,6 +23,7 @@
#include <android/media/SpatializationLevel.h>
#include <android/media/SpatializationMode.h>
#include <android/media/SpatializerHeadTrackingMode.h>
+#include <android/media/audio/common/AudioLatencyMode.h>
#include <audio_utils/SimpleLog.h>
#include <math.h>
#include <media/AudioEffect.h>
@@ -165,14 +166,11 @@
std::string toString(unsigned level) const NO_THREAD_SAFETY_ANALYSIS;
static std::string toString(audio_latency_mode_t mode) {
- switch (mode) {
- case AUDIO_LATENCY_MODE_FREE:
- return "LATENCY_MODE_FREE";
- case AUDIO_LATENCY_MODE_LOW:
- return "LATENCY_MODE_LOW";
- }
- return "EnumNotImplemented";
- };
+ // We convert to the AIDL type to print (eventually the legacy type will be removed).
+ const auto result = legacy2aidl_audio_latency_mode_t_AudioLatencyMode(mode);
+ return result.has_value() ?
+ media::audio::common::toString(*result) : "unknown_latency_mode";
+ }
/**
* Format head to stage vector to a string, [0.00, 0.00, 0.00, -1.29, -0.50, 15.27].
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
index 5e9a0e1..72dba3d 100644
--- a/services/audiopolicy/service/SpatializerPoseController.cpp
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -43,7 +43,7 @@
constexpr float kMaxTranslationalVelocity = 2;
// This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting.
-constexpr float kMaxRotationalVelocity = 8;
+constexpr float kMaxRotationalVelocity = 0.8f;
// This is how far into the future we predict the head pose, using linear extrapolation based on
// twist (velocity). It should be set to a value that matches the characteristic durations of moving
@@ -67,13 +67,13 @@
// Screen is considered to be unstable (not still) if it has moved significantly within the last
// time window of this duration.
-constexpr auto kScreenStillnessWindowDuration = 3s;
+constexpr auto kScreenStillnessWindowDuration = 750ms;
// Screen is considered to have moved significantly if translated by this much (in meter, approx).
constexpr float kScreenStillnessTranslationThreshold = 0.1f;
// Screen is considered to have moved significantly if rotated by this much (in radians, approx).
-constexpr float kScreenStillnessRotationThreshold = 7.0f / 180 * M_PI;
+constexpr float kScreenStillnessRotationThreshold = 15.0f / 180 * M_PI;
// Time units for system clock ticks. This is what the Sensor Framework timestamps represent and
// what we use for pose filtering.
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index d51c57c..c341b32 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -446,7 +446,7 @@
sp<AudioProfile> ac3OutputProfile = new AudioProfile(
AUDIO_FORMAT_AC3, AUDIO_CHANNEL_OUT_5POINT1, k48000SamplingRate);
sp<AudioProfile> iec958OutputProfile = new AudioProfile(
- AUDIO_FORMAT_IEC60958, AUDIO_CHANNEL_OUT_STEREO, k48000SamplingRate);
+ AUDIO_FORMAT_IEC60958, AUDIO_CHANNEL_INDEX_MASK_24, k48000SamplingRate);
mMsdOutputDevice->addAudioProfile(pcmOutputProfile);
mMsdOutputDevice->addAudioProfile(ac3OutputProfile);
mMsdOutputDevice->addAudioProfile(iec958OutputProfile);
@@ -519,7 +519,7 @@
// Add HDMI input device with IEC60958 profile for HDMI in -> MSD patching.
mHdmiInputDevice = new DeviceDescriptor(AUDIO_DEVICE_IN_HDMI);
sp<AudioProfile> iec958InputProfile = new AudioProfile(
- AUDIO_FORMAT_IEC60958, AUDIO_CHANNEL_IN_STEREO, k48000SamplingRate);
+ AUDIO_FORMAT_IEC60958, AUDIO_CHANNEL_INDEX_MASK_24, k48000SamplingRate);
mHdmiInputDevice->addAudioProfile(iec958InputProfile);
config.addDevice(mHdmiInputDevice);
sp<InputProfile> hdmiInputProfile = new InputProfile("hdmi input");
@@ -677,8 +677,8 @@
ASSERT_EQ(AUDIO_PORT_ROLE_SINK, patch->mPatch.sinks[0].role);
ASSERT_EQ(AUDIO_FORMAT_IEC60958, patch->mPatch.sources[0].format);
ASSERT_EQ(AUDIO_FORMAT_IEC60958, patch->mPatch.sinks[0].format);
- ASSERT_EQ(AUDIO_CHANNEL_IN_STEREO, patch->mPatch.sources[0].channel_mask);
- ASSERT_EQ(AUDIO_CHANNEL_OUT_STEREO, patch->mPatch.sinks[0].channel_mask);
+ ASSERT_EQ(AUDIO_CHANNEL_INDEX_MASK_24, patch->mPatch.sources[0].channel_mask);
+ ASSERT_EQ(AUDIO_CHANNEL_INDEX_MASK_24, patch->mPatch.sinks[0].channel_mask);
ASSERT_EQ(k48000SamplingRate, patch->mPatch.sources[0].sample_rate);
ASSERT_EQ(k48000SamplingRate, patch->mPatch.sinks[0].sample_rate);
ASSERT_EQ(1, patchCount.deltaFromSnapshot());
@@ -754,7 +754,7 @@
audio_config_base_t msdDirectConfig2 = AUDIO_CONFIG_BASE_INITIALIZER;
msdDirectConfig2.format = AUDIO_FORMAT_IEC60958;
msdDirectConfig2.sample_rate = 48000;
- msdDirectConfig2.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ msdDirectConfig2.channel_mask = AUDIO_CHANNEL_INDEX_MASK_24;
audio_config_base_t msdNonDirectConfig = AUDIO_CONFIG_BASE_INITIALIZER;
msdNonDirectConfig.format = AUDIO_FORMAT_PCM_16_BIT;
@@ -821,7 +821,7 @@
audio_config_t msdDirectConfig2 = AUDIO_CONFIG_INITIALIZER;
msdDirectConfig2.format = AUDIO_FORMAT_IEC60958;
msdDirectConfig2.sample_rate = 48000;
- msdDirectConfig2.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ msdDirectConfig2.channel_mask = AUDIO_CHANNEL_INDEX_MASK_24;
audio_config_t msdNonDirectConfig = AUDIO_CONFIG_INITIALIZER;
msdNonDirectConfig.format = AUDIO_FORMAT_PCM_16_BIT;
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 80410ab..2388b79 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -1698,7 +1698,13 @@
return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
}
- if (CameraServiceProxyWrapper::isCameraDisabled()) {
+ userid_t clientUserId = multiuser_get_user_id(clientUid);
+ int callingUid = CameraThreadState::getCallingUid();
+ if (clientUid == USE_CALLING_UID) {
+ clientUserId = multiuser_get_user_id(callingUid);
+ }
+
+ if (CameraServiceProxyWrapper::isCameraDisabled(clientUserId)) {
String8 msg =
String8::format("Camera disabled by device policy");
ALOGE("%s: %s", __FUNCTION__, msg.string());
@@ -1746,20 +1752,68 @@
return ret;
}
+String16 CameraService::getPackageNameFromUid(int clientUid) {
+ String16 packageName("");
+
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->getService(String16(kPermissionServiceName));
+ if (binder == 0) {
+ ALOGE("Cannot get permission service");
+ // Return empty package name and the further interaction
+ // with camera will likely fail
+ return packageName;
+ }
+
+ sp<IPermissionController> permCtrl = interface_cast<IPermissionController>(binder);
+ Vector<String16> packages;
+
+ permCtrl->getPackagesForUid(clientUid, packages);
+
+ if (packages.isEmpty()) {
+ ALOGE("No packages for calling UID %d", clientUid);
+ // Return empty package name and the further interaction
+ // with camera will likely fail
+ return packageName;
+ }
+
+ // Arbitrarily pick the first name in the list
+ packageName = packages[0];
+
+ return packageName;
+}
+
template<class CALLBACK, class CLIENT>
Status CameraService::connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
- int api1CameraId, const String16& clientPackageName, bool systemNativeClient,
+ int api1CameraId, const String16& clientPackageNameMaybe, bool systemNativeClient,
const std::optional<String16>& clientFeatureId, int clientUid, int clientPid,
apiLevel effectiveApiLevel, bool shimUpdateOnly, int oomScoreOffset, int targetSdkVersion,
/*out*/sp<CLIENT>& device) {
binder::Status ret = binder::Status::ok();
+ bool isNonSystemNdk = false;
+ String16 clientPackageName;
+ if (clientPackageNameMaybe.size() <= 0) {
+ // NDK calls don't come with package names, but we need one for various cases.
+ // Generally, there's a 1:1 mapping between UID and package name, but shared UIDs
+ // do exist. For all authentication cases, all packages under the same UID get the
+ // same permissions, so picking any associated package name is sufficient. For some
+ // other cases, this may give inaccurate names for clients in logs.
+ isNonSystemNdk = true;
+ int packageUid = (clientUid == USE_CALLING_UID) ?
+ CameraThreadState::getCallingUid() : clientUid;
+ clientPackageName = getPackageNameFromUid(packageUid);
+ } else {
+ clientPackageName = clientPackageNameMaybe;
+ }
+
String8 clientName8(clientPackageName);
int originalClientPid = 0;
+ int packagePid = (clientPid == USE_CALLING_PID) ?
+ CameraThreadState::getCallingPid() : clientPid;
ALOGI("CameraService::connect call (PID %d \"%s\", camera ID %s) and "
- "Camera API version %d", clientPid, clientName8.string(), cameraId.string(),
+ "Camera API version %d", packagePid, clientName8.string(), cameraId.string(),
static_cast<int>(effectiveApiLevel));
nsecs_t openTimeNs = systemTime();
@@ -1767,7 +1821,7 @@
sp<CLIENT> client = nullptr;
int facing = -1;
int orientation = 0;
- bool isNonSystemNdk = (clientPackageName.size() == 0);
+
{
// Acquire mServiceLock and prevent other clients from connecting
std::unique_ptr<AutoConditionLock> lock =
@@ -1901,6 +1955,9 @@
}
}
+ // Enable/disable camera service watchdog
+ client->setCameraServiceWatchdog(mCameraServiceWatchdogEnabled);
+
// Set rotate-and-crop override behavior
if (mOverrideRotateAndCropMode != ANDROID_SCALER_ROTATE_AND_CROP_AUTO) {
client->setRotateAndCropOverride(mOverrideRotateAndCropMode);
@@ -3257,37 +3314,6 @@
sCameraService = cameraService;
}
- // In some cases the calling code has no access to the package it runs under.
- // For example, NDK camera API.
- // In this case we will get the packages for the calling UID and pick the first one
- // for attributing the app op. This will work correctly for runtime permissions
- // as for legacy apps we will toggle the app op for all packages in the UID.
- // The caveat is that the operation may be attributed to the wrong package and
- // stats based on app ops may be slightly off.
- if (mClientPackageName.size() <= 0) {
- sp<IServiceManager> sm = defaultServiceManager();
- sp<IBinder> binder = sm->getService(String16(kPermissionServiceName));
- if (binder == 0) {
- ALOGE("Cannot get permission service");
- // Leave mClientPackageName unchanged (empty) and the further interaction
- // with camera will fail in BasicClient::startCameraOps
- return;
- }
-
- sp<IPermissionController> permCtrl = interface_cast<IPermissionController>(binder);
- Vector<String16> packages;
-
- permCtrl->getPackagesForUid(mClientUid, packages);
-
- if (packages.isEmpty()) {
- ALOGE("No packages for calling UID");
- // Leave mClientPackageName unchanged (empty) and the further interaction
- // with camera will fail in BasicClient::startCameraOps
- return;
- }
- mClientPackageName = packages[0];
- }
-
// There are 2 scenarios in which a client won't have AppOps operations
// (both scenarios : native clients)
// 1) It's an system native client*, the package name will be empty
@@ -3952,7 +3978,7 @@
int toggleType __unused, int sensor __unused, bool enabled) {
{
Mutex::Autolock _l(mSensorPrivacyLock);
- mSensorPrivacyEnabled = mSpm.isToggleSensorPrivacyEnabled(SensorPrivacyManager::TOGGLE_SENSOR_CAMERA);
+ mSensorPrivacyEnabled = enabled;
}
// if sensor privacy is enabled then block all clients from accessing the camera
if (enabled) {
@@ -4842,6 +4868,8 @@
return handleSetCameraMute(args);
} else if (args.size() >= 2 && args[0] == String16("watch")) {
return handleWatchCommand(args, in, out);
+ } else if (args.size() >= 2 && args[0] == String16("set-watchdog")) {
+ return handleSetCameraServiceWatchdog(args);
} else if (args.size() == 1 && args[0] == String16("help")) {
printHelp(out);
return OK;
@@ -4935,6 +4963,28 @@
return OK;
}
+status_t CameraService::handleSetCameraServiceWatchdog(const Vector<String16>& args) {
+ int enableWatchdog = atoi(String8(args[1]));
+
+ if (enableWatchdog < 0 || enableWatchdog > 1) return BAD_VALUE;
+
+ Mutex::Autolock lock(mServiceLock);
+
+ mCameraServiceWatchdogEnabled = enableWatchdog;
+
+ const auto clients = mActiveClientManager.getAll();
+ for (auto& current : clients) {
+ if (current != nullptr) {
+ const auto basicClient = current->getValue();
+ if (basicClient.get() != nullptr) {
+ basicClient->setCameraServiceWatchdog(enableWatchdog);
+ }
+ }
+ }
+
+ return OK;
+}
+
status_t CameraService::handleGetRotateAndCrop(int out) {
Mutex::Autolock lock(mServiceLock);
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index d96ea00..f2d15ef 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -339,6 +339,9 @@
// Set/reset camera mute
virtual status_t setCameraMute(bool enabled) = 0;
+ // Set Camera service watchdog
+ virtual status_t setCameraServiceWatchdog(bool enabled) = 0;
+
// The injection camera session to replace the internal camera
// session.
virtual status_t injectCamera(const String8& injectedCamId,
@@ -825,10 +828,19 @@
// sorted in alpha-numeric order.
void filterAPI1SystemCameraLocked(const std::vector<std::string> &normalDeviceIds);
+ // In some cases the calling code has no access to the package it runs under.
+ // For example, NDK camera API.
+ // In this case we will get the packages for the calling UID and pick the first one
+ // for attributing the app op. This will work correctly for runtime permissions
+ // as for legacy apps we will toggle the app op for all packages in the UID.
+ // The caveat is that the operation may be attributed to the wrong package and
+ // stats based on app ops may be slightly off.
+ String16 getPackageNameFromUid(int clientUid);
+
// Single implementation shared between the various connect calls
template<class CALLBACK, class CLIENT>
binder::Status connectHelper(const sp<CALLBACK>& cameraCb, const String8& cameraId,
- int api1CameraId, const String16& clientPackageName, bool systemNativeClient,
+ int api1CameraId, const String16& clientPackageNameMaybe, bool systemNativeClient,
const std::optional<String16>& clientFeatureId, int clientUid, int clientPid,
apiLevel effectiveApiLevel, bool shimUpdateOnly, int scoreOffset, int targetSdkVersion,
/*out*/sp<CLIENT>& device);
@@ -1198,6 +1210,9 @@
// Handle 'watch' command as passed through 'cmd'
status_t handleWatchCommand(const Vector<String16> &args, int inFd, int outFd);
+ // Set the camera service watchdog
+ status_t handleSetCameraServiceWatchdog(const Vector<String16>& args);
+
// Enable tag monitoring of the given tags in provided clients
status_t startWatchingTags(const Vector<String16> &args, int outFd);
@@ -1284,6 +1299,9 @@
// Current camera mute mode
bool mOverrideCameraMuteMode = false;
+ // Camera Service watchdog flag
+ bool mCameraServiceWatchdogEnabled = true;
+
/**
* A listener class that implements the IBinder::DeathRecipient interface
* for use to call back the error state injected by the external camera, and
diff --git a/services/camera/libcameraservice/CameraServiceWatchdog.cpp b/services/camera/libcameraservice/CameraServiceWatchdog.cpp
index a169667..e101dd3 100644
--- a/services/camera/libcameraservice/CameraServiceWatchdog.cpp
+++ b/services/camera/libcameraservice/CameraServiceWatchdog.cpp
@@ -66,6 +66,17 @@
}
}
+void CameraServiceWatchdog::setEnabled(bool enable)
+{
+ AutoMutex _l(mEnabledLock);
+
+ if (enable) {
+ mEnabled = true;
+ } else {
+ mEnabled = false;
+ }
+}
+
void CameraServiceWatchdog::stop(uint32_t tid)
{
AutoMutex _l(mWatchdogLock);
diff --git a/services/camera/libcameraservice/CameraServiceWatchdog.h b/services/camera/libcameraservice/CameraServiceWatchdog.h
index f4955e2..e35d69e 100644
--- a/services/camera/libcameraservice/CameraServiceWatchdog.h
+++ b/services/camera/libcameraservice/CameraServiceWatchdog.h
@@ -21,12 +21,14 @@
* expected duration has exceeded.
* Notes on multi-threaded behaviors:
* - The threadloop is blocked/paused when there are no calls being
- * monitored.
+ * monitored (when the TID cycle to counter map is empty).
* - The start and stop functions handle simultaneous call monitoring
* and single call monitoring differently. See function documentation for
* more details.
+ * To disable/enable:
+ * - adb shell cmd media.camera set-cameraservice-watchdog [0/1]
*/
-
+#pragma once
#include <chrono>
#include <thread>
#include <time.h>
@@ -49,26 +51,41 @@
public:
explicit CameraServiceWatchdog() : mPause(true), mMaxCycles(kMaxCycles),
- mCycleLengthMs(kCycleLengthMs) {};
+ mCycleLengthMs(kCycleLengthMs), mEnabled(true) {};
- explicit CameraServiceWatchdog (size_t maxCycles, uint32_t cycleLengthMs) :
- mPause(true), mMaxCycles(maxCycles), mCycleLengthMs(cycleLengthMs) {};
+ explicit CameraServiceWatchdog (size_t maxCycles, uint32_t cycleLengthMs, bool enabled) :
+ mPause(true), mMaxCycles(maxCycles), mCycleLengthMs(cycleLengthMs), mEnabled(enabled)
+ {};
virtual ~CameraServiceWatchdog() {};
virtual void requestExit();
+ /** Enables/disables the watchdog */
+ void setEnabled(bool enable);
+
/** Used to wrap monitored calls in start and stop functions using custom timer values */
template<typename T>
auto watchThread(T func, uint32_t tid, uint32_t cycles, uint32_t cycleLength) {
- auto res = NULL;
+ decltype(func()) res;
if (cycles != mMaxCycles || cycleLength != mCycleLengthMs) {
// Create another instance of the watchdog to prevent disruption
// of timer for current monitored calls
+
+ // Lock for mEnabled
+ mEnabledLock.lock();
sp<CameraServiceWatchdog> tempWatchdog =
- new CameraServiceWatchdog(cycles, cycleLength);
- tempWatchdog->run("CameraServiceWatchdog");
+ new CameraServiceWatchdog(cycles, cycleLength, mEnabled);
+ mEnabledLock.unlock();
+
+ status_t status = tempWatchdog->run("CameraServiceWatchdog");
+ if (status != OK) {
+ ALOGE("Unable to watch thread: %s (%d)", strerror(-status), status);
+ res = watchThread(func, tid);
+ return res;
+ }
+
res = tempWatchdog->watchThread(func, tid);
tempWatchdog->requestExit();
tempWatchdog.clear();
@@ -84,11 +101,16 @@
/** Used to wrap monitored calls in start and stop functions using class timer values */
template<typename T>
auto watchThread(T func, uint32_t tid) {
- auto res = NULL;
+ decltype(func()) res;
+ AutoMutex _l(mEnabledLock);
- start(tid);
- res = func();
- stop(tid);
+ if (mEnabled) {
+ start(tid);
+ res = func();
+ stop(tid);
+ } else {
+ res = func();
+ }
return res;
}
@@ -109,11 +131,13 @@
virtual bool threadLoop();
- Mutex mWatchdogLock; // Lock for condition variable
- Condition mWatchdogCondition; // Condition variable for stop/start
- bool mPause; // True if thread is currently paused
- uint32_t mMaxCycles; // Max cycles
- uint32_t mCycleLengthMs; // Length of time elapsed per cycle
+ Mutex mWatchdogLock; // Lock for condition variable
+ Mutex mEnabledLock; // Lock for enabled status
+ Condition mWatchdogCondition; // Condition variable for stop/start
+ bool mPause; // True if tid map is empty
+ uint32_t mMaxCycles; // Max cycles
+ uint32_t mCycleLengthMs; // Length of time elapsed per cycle
+ bool mEnabled; // True if watchdog is enabled
std::unordered_map<uint32_t, uint32_t> tidToCycleCounterMap; // Thread Id to cycle counter map
};
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 68dc7f8..20bf73d 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -2316,6 +2316,10 @@
return INVALID_OPERATION;
}
+status_t Camera2Client::setCameraServiceWatchdog(bool enabled) {
+ return mDevice->setCameraServiceWatchdog(enabled);
+}
+
status_t Camera2Client::setRotateAndCropOverride(uint8_t rotateAndCrop) {
if (rotateAndCrop > ANDROID_SCALER_ROTATE_AND_CROP_AUTO) return BAD_VALUE;
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index c8dfc46..8081efa 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -90,6 +90,8 @@
virtual bool supportsCameraMute();
virtual status_t setCameraMute(bool enabled);
+ virtual status_t setCameraServiceWatchdog(bool enabled);
+
/**
* Interface used by CameraService
*/
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 5e91501..2a8a103 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -944,8 +944,8 @@
std::vector<int> surfaceIds;
bool isDepthCompositeStream =
camera3::DepthCompositeStream::isDepthCompositeStream(surfaces[0]);
- bool isHeicCompisiteStream = camera3::HeicCompositeStream::isHeicCompositeStream(surfaces[0]);
- if (isDepthCompositeStream || isHeicCompisiteStream) {
+ bool isHeicCompositeStream = camera3::HeicCompositeStream::isHeicCompositeStream(surfaces[0]);
+ if (isDepthCompositeStream || isHeicCompositeStream) {
sp<CompositeStream> compositeStream;
if (isDepthCompositeStream) {
compositeStream = new camera3::DepthCompositeStream(mDevice, getRemoteCallback());
@@ -1730,6 +1730,10 @@
return binder::Status::ok();
}
+status_t CameraDeviceClient::setCameraServiceWatchdog(bool enabled) {
+ return mDevice->setCameraServiceWatchdog(enabled);
+}
+
status_t CameraDeviceClient::setRotateAndCropOverride(uint8_t rotateAndCrop) {
if (rotateAndCrop > ANDROID_SCALER_ROTATE_AND_CROP_AUTO) return BAD_VALUE;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index c5aad6b..45915ba 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -206,6 +206,8 @@
virtual status_t stopWatchingTags(int out);
virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out);
+ virtual status_t setCameraServiceWatchdog(bool enabled);
+
/**
* Device listener interface
*/
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index 9303fd2..beb655b 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -66,6 +66,10 @@
return OK;
}
+status_t CameraOfflineSessionClient::setCameraServiceWatchdog(bool) {
+ return OK;
+}
+
status_t CameraOfflineSessionClient::setRotateAndCropOverride(uint8_t /*rotateAndCrop*/) {
// Since we're not submitting more capture requests, changes to rotateAndCrop override
// make no difference.
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index f2c42d8..9ea1093 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -84,6 +84,8 @@
bool supportsCameraMute() override;
status_t setCameraMute(bool enabled) override;
+ status_t setCameraServiceWatchdog(bool enabled) override;
+
// permissions management
status_t startCameraOps() override;
status_t finishCameraOps() override;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index dc5002b..7d98a0b 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -40,6 +40,7 @@
#include "utils/CameraServiceProxyWrapper.h"
namespace android {
+
using namespace camera2;
// Interface used by CameraService
@@ -144,6 +145,10 @@
wp<NotificationListener> weakThis(this);
res = mDevice->setNotifyCallback(weakThis);
+ /** Start watchdog thread */
+ mCameraServiceWatchdog = new CameraServiceWatchdog();
+ mCameraServiceWatchdog->run("Camera2ClientBaseWatchdog");
+
return OK;
}
@@ -155,6 +160,11 @@
disconnect();
+ if (mCameraServiceWatchdog != NULL) {
+ mCameraServiceWatchdog->requestExit();
+ mCameraServiceWatchdog.clear();
+ }
+
ALOGI("Closed Camera %s. Client was: %s (PID %d, UID %u)",
TClientBase::mCameraIdStr.string(),
String8(TClientBase::mClientPackageName).string(),
@@ -238,9 +248,24 @@
// ICameraClient2BaseUser interface
-
template <typename TClientBase>
binder::Status Camera2ClientBase<TClientBase>::disconnect() {
+ if (mCameraServiceWatchdog != nullptr && mDevice != nullptr) {
+ // Timer for the disconnect call should be greater than getExpectedInFlightDuration
+ // since this duration is used to error handle methods in the disconnect sequence
+ // thus allowing existing error handling methods to execute first
+ uint64_t maxExpectedDuration =
+ ns2ms(mDevice->getExpectedInFlightDuration() + kBufferTimeDisconnectNs);
+
+ // Initialization from hal succeeded, time disconnect.
+ return mCameraServiceWatchdog->WATCH_CUSTOM_TIMER(disconnectImpl(),
+ maxExpectedDuration / kCycleLengthMs, kCycleLengthMs);
+ }
+ return disconnectImpl();
+}
+
+template <typename TClientBase>
+binder::Status Camera2ClientBase<TClientBase>::disconnectImpl() {
ATRACE_CALL();
ALOGD("Camera %s: start to disconnect", TClientBase::mCameraIdStr.string());
Mutex::Autolock icl(mBinderSerializationLock);
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index b0d1c3f..e51d25d 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -19,6 +19,7 @@
#include "common/CameraDeviceBase.h"
#include "camera/CaptureResult.h"
+#include "CameraServiceWatchdog.h"
namespace android {
@@ -131,6 +132,9 @@
protected:
+ // Used for watchdog timeout to monitor disconnect
+ static const nsecs_t kBufferTimeDisconnectNs = 3000000000; // 3 sec.
+
// The PID provided in the constructor call
pid_t mInitialClientPid;
bool mOverrideForPerfClass = false;
@@ -173,6 +177,12 @@
private:
template<typename TProviderPtr>
status_t initializeImpl(TProviderPtr providerPtr, const String8& monitorTags);
+
+ binder::Status disconnectImpl();
+
+ // Watchdog thread
+ sp<CameraServiceWatchdog> mCameraServiceWatchdog;
+
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 7e2f93c..69514f3 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -449,6 +449,11 @@
virtual status_t setCameraMute(bool enabled) = 0;
/**
+ * Enable/disable camera service watchdog
+ */
+ virtual status_t setCameraServiceWatchdog(bool enabled) = 0;
+
+ /**
* Get the status tracker of the camera device
*/
virtual wp<camera3::StatusTracker> getStatusTracker() = 0;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index e36f761..cd23250 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -1412,6 +1412,27 @@
return res;
}
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::addReadoutTimestampTag(
+ bool readoutTimestampSupported) {
+ status_t res = OK;
+ auto& c = mCameraCharacteristics;
+
+ auto entry = c.find(ANDROID_SENSOR_READOUT_TIMESTAMP);
+ if (entry.count != 0) {
+ ALOGE("%s: CameraCharacteristics must not contain ANDROID_SENSOR_READOUT_TIMESTAMP!",
+ __FUNCTION__);
+ }
+
+ uint8_t readoutTimestamp = ANDROID_SENSOR_READOUT_TIMESTAMP_NOT_SUPPORTED;
+ if (readoutTimestampSupported) {
+ readoutTimestamp = ANDROID_SENSOR_READOUT_TIMESTAMP_HARDWARE;
+ }
+
+ res = c.update(ANDROID_SENSOR_READOUT_TIMESTAMP, &readoutTimestamp, 1);
+
+ return res;
+}
+
status_t CameraProviderManager::ProviderInfo::DeviceInfo3::removeAvailableKeys(
CameraMetadata& c, const std::vector<uint32_t>& keys, uint32_t keyTag) {
status_t res = OK;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index a66598d..d049aff 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -663,6 +663,7 @@
status_t deriveHeicTags(bool maxResolution = false);
status_t addRotateCropTags();
status_t addPreCorrectionActiveArraySize();
+ status_t addReadoutTimestampTag(bool readoutTimestampSupported = true);
static void getSupportedSizes(const CameraMetadata& ch, uint32_t tag,
android_pixel_format_t format,
diff --git a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
index 81b4779..ef68f28 100644
--- a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
@@ -532,6 +532,11 @@
ALOGE("%s: Unable to override zoomRatio related tags: %s (%d)",
__FUNCTION__, strerror(-res), res);
}
+ res = addReadoutTimestampTag();
+ if (OK != res) {
+ ALOGE("%s: Unable to add sensorReadoutTimestamp tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ }
camera_metadata_entry flashAvailable =
mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
diff --git a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
index bded9aa..d60565f 100644
--- a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
@@ -655,6 +655,11 @@
ALOGE("%s: Unable to override zoomRatio related tags: %s (%d)",
__FUNCTION__, strerror(-res), res);
}
+ res = addReadoutTimestampTag(/*readoutTimestampSupported*/false);
+ if (OK != res) {
+ ALOGE("%s: Unable to add sensorReadoutTimestamp tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ }
camera_metadata_entry flashAvailable =
mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 7c2f34f..445b397 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -113,10 +113,6 @@
status_t Camera3Device::initializeCommonLocked() {
- /** Start watchdog thread */
- mCameraServiceWatchdog = new CameraServiceWatchdog();
- mCameraServiceWatchdog->run("CameraServiceWatchdog");
-
/** Start up status tracker thread */
mStatusTracker = new StatusTracker(this);
status_t res = mStatusTracker->run(String8::format("C3Dev-%s-Status", mId.string()).string());
@@ -230,6 +226,15 @@
// Hidl/AidlCamera3DeviceInjectionMethods
mInjectionMethods = createCamera3DeviceInjectionMethods(this);
+ /** Start watchdog thread */
+ mCameraServiceWatchdog = new CameraServiceWatchdog();
+ res = mCameraServiceWatchdog->run("CameraServiceWatchdog");
+ if (res != OK) {
+ SET_ERR_L("Unable to start camera service watchdog thread: %s (%d)",
+ strerror(-res), res);
+ return res;
+ }
+
return OK;
}
@@ -250,20 +255,28 @@
Mutex::Autolock l(mLock);
if (mStatus == STATUS_UNINITIALIZED) return res;
- if (mStatus == STATUS_ACTIVE ||
- (mStatus == STATUS_ERROR && mRequestThread != NULL)) {
- res = mRequestThread->clearRepeatingRequests();
- if (res != OK) {
- SET_ERR_L("Can't stop streaming");
- // Continue to close device even in case of error
- } else {
- res = waitUntilStateThenRelock(/*active*/ false, maxExpectedDuration);
+ if (mRequestThread != NULL) {
+ if (mStatus == STATUS_ACTIVE || mStatus == STATUS_ERROR) {
+ res = mRequestThread->clear();
if (res != OK) {
- SET_ERR_L("Timeout waiting for HAL to drain (% " PRIi64 " ns)",
- maxExpectedDuration);
+ SET_ERR_L("Can't stop streaming");
// Continue to close device even in case of error
+ } else {
+ res = waitUntilStateThenRelock(/*active*/ false, maxExpectedDuration);
+ if (res != OK) {
+ SET_ERR_L("Timeout waiting for HAL to drain (% " PRIi64 " ns)",
+ maxExpectedDuration);
+ // Continue to close device even in case of error
+ }
}
}
+ // Signal to request thread that we're not expecting any
+ // more requests. This will be true since once we're in
+ // disconnect and we've cleared off the request queue, the
+ // request thread can't receive any new requests through
+ // binder calls - since disconnect holds
+ // mBinderSerialization lock.
+ mRequestThread->setRequestClearing();
}
if (mStatus == STATUS_ERROR) {
@@ -1737,7 +1750,7 @@
}
// Calculate expected duration for flush with additional buffer time in ms for watchdog
- uint64_t maxExpectedDuration = (getExpectedInFlightDuration() + kBaseGetBufferWait) / 1e6;
+ uint64_t maxExpectedDuration = ns2ms(getExpectedInFlightDuration() + kBaseGetBufferWait);
status_t res = mCameraServiceWatchdog->WATCH_CUSTOM_TIMER(mRequestThread->flush(),
maxExpectedDuration / kCycleLengthMs, kCycleLengthMs);
@@ -2674,7 +2687,7 @@
status_t Camera3Device::registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
bool hasAppCallback, nsecs_t minExpectedDuration, nsecs_t maxExpectedDuration,
- const std::set<std::set<String8>>& physicalCameraIds,
+ bool isFixedFps, const std::set<std::set<String8>>& physicalCameraIds,
bool isStillCapture, bool isZslCapture, bool rotateAndCropAuto,
const std::set<std::string>& cameraIdsWithZoom,
const SurfaceMap& outputSurfaces, nsecs_t requestTimeNs) {
@@ -2683,7 +2696,7 @@
ssize_t res;
res = mInFlightMap.add(frameNumber, InFlightRequest(numBuffers, resultExtras, hasInput,
- hasAppCallback, minExpectedDuration, maxExpectedDuration, physicalCameraIds,
+ hasAppCallback, minExpectedDuration, maxExpectedDuration, isFixedFps, physicalCameraIds,
isStillCapture, isZslCapture, rotateAndCropAuto, cameraIdsWithZoom, requestTimeNs,
outputSurfaces));
if (res < 0) return res;
@@ -3047,7 +3060,8 @@
}
-status_t Camera3Device::RequestThread::clearRepeatingRequestsLocked(/*out*/int64_t *lastFrameNumber) {
+status_t Camera3Device::RequestThread::clearRepeatingRequestsLocked(
+ /*out*/int64_t *lastFrameNumber) {
std::vector<int32_t> streamIds;
for (const auto& request : mRepeatingRequests) {
for (const auto& stream : request->mOutputStreams) {
@@ -3072,8 +3086,6 @@
Mutex::Autolock l(mRequestLock);
ALOGV("RequestThread::%s:", __FUNCTION__);
- mRepeatingRequests.clear();
-
// Send errors for all requests pending in the request queue, including
// pending repeating requests
sp<NotificationListener> listener = mListener.promote();
@@ -3111,10 +3123,7 @@
Mutex::Autolock al(mTriggerMutex);
mTriggerMap.clear();
- if (lastFrameNumber != NULL) {
- *lastFrameNumber = mRepeatingLastFrameNumber;
- }
- mRepeatingLastFrameNumber = hardware::camera2::ICameraDeviceUser::NO_IN_FLIGHT_REPEATING_FRAMES;
+ clearRepeatingRequestsLocked(lastFrameNumber);
mRequestClearing = true;
mRequestSignal.signal();
return OK;
@@ -3243,16 +3252,18 @@
return true;
}
-std::pair<nsecs_t, nsecs_t> Camera3Device::RequestThread::calculateExpectedDurationRange(
- const camera_metadata_t *request) {
- std::pair<nsecs_t, nsecs_t> expectedRange(
+Camera3Device::RequestThread::ExpectedDurationInfo
+ Camera3Device::RequestThread::calculateExpectedDurationRange(
+ const camera_metadata_t *request) {
+ ExpectedDurationInfo expectedDurationInfo = {
InFlightRequest::kDefaultMinExpectedDuration,
- InFlightRequest::kDefaultMaxExpectedDuration);
+ InFlightRequest::kDefaultMaxExpectedDuration,
+ /*isFixedFps*/false};
camera_metadata_ro_entry_t e = camera_metadata_ro_entry_t();
find_camera_metadata_ro_entry(request,
ANDROID_CONTROL_AE_MODE,
&e);
- if (e.count == 0) return expectedRange;
+ if (e.count == 0) return expectedDurationInfo;
switch (e.data.u8[0]) {
case ANDROID_CONTROL_AE_MODE_OFF:
@@ -3260,29 +3271,32 @@
ANDROID_SENSOR_EXPOSURE_TIME,
&e);
if (e.count > 0) {
- expectedRange.first = e.data.i64[0];
- expectedRange.second = expectedRange.first;
+ expectedDurationInfo.minDuration = e.data.i64[0];
+ expectedDurationInfo.maxDuration = expectedDurationInfo.minDuration;
}
find_camera_metadata_ro_entry(request,
ANDROID_SENSOR_FRAME_DURATION,
&e);
if (e.count > 0) {
- expectedRange.first = std::max(e.data.i64[0], expectedRange.first);
- expectedRange.second = expectedRange.first;
+ expectedDurationInfo.minDuration =
+ std::max(e.data.i64[0], expectedDurationInfo.minDuration);
+ expectedDurationInfo.maxDuration = expectedDurationInfo.minDuration;
}
+ expectedDurationInfo.isFixedFps = false;
break;
default:
find_camera_metadata_ro_entry(request,
ANDROID_CONTROL_AE_TARGET_FPS_RANGE,
&e);
if (e.count > 1) {
- expectedRange.first = 1e9 / e.data.i32[1];
- expectedRange.second = 1e9 / e.data.i32[0];
+ expectedDurationInfo.minDuration = 1e9 / e.data.i32[1];
+ expectedDurationInfo.maxDuration = 1e9 / e.data.i32[0];
}
+ expectedDurationInfo.isFixedFps = (e.data.i32[1] == e.data.i32[0]);
break;
}
- return expectedRange;
+ return expectedDurationInfo;
}
bool Camera3Device::RequestThread::skipHFRTargetFPSUpdate(int32_t tag,
@@ -3897,13 +3911,14 @@
isZslCapture = true;
}
}
- auto expectedDurationRange = calculateExpectedDurationRange(settings);
+ auto expectedDurationInfo = calculateExpectedDurationRange(settings);
res = parent->registerInFlight(halRequest->frame_number,
totalNumBuffers, captureRequest->mResultExtras,
/*hasInput*/halRequest->input_buffer != NULL,
hasCallback,
- /*min*/expectedDurationRange.first,
- /*max*/expectedDurationRange.second,
+ expectedDurationInfo.minDuration,
+ expectedDurationInfo.maxDuration,
+ expectedDurationInfo.isFixedFps,
requestedPhysicalCameras, isStillCapture, isZslCapture,
captureRequest->mRotateAndCropAuto, mPrevCameraIdsWithZoom,
(mUseHalBufManager) ? uniqueSurfaceIdMap :
@@ -4094,6 +4109,17 @@
}
}
+status_t Camera3Device::setCameraServiceWatchdog(bool enabled) {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ if (mCameraServiceWatchdog != NULL) {
+ mCameraServiceWatchdog->setEnabled(enabled);
+ }
+
+ return OK;
+}
+
void Camera3Device::RequestThread::cleanUpFailedRequests(bool sendRequestError) {
if (mNextRequests.empty()) {
return;
@@ -4209,6 +4235,11 @@
return;
}
+void Camera3Device::RequestThread::setRequestClearing() {
+ Mutex::Autolock l(mRequestLock);
+ mRequestClearing = true;
+}
+
sp<Camera3Device::CaptureRequest>
Camera3Device::RequestThread::waitForNextRequestLocked() {
status_t res;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 3c60ba2..7dff809 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -281,6 +281,11 @@
*/
status_t setCameraMute(bool enabled);
+ /**
+ * Enables/disables camera service watchdog
+ */
+ status_t setCameraServiceWatchdog(bool enabled);
+
// Get the status trackeer for the camera device
wp<camera3::StatusTracker> getStatusTracker() { return mStatusTracker; }
@@ -856,6 +861,9 @@
*/
void setPaused(bool paused);
+ // set mRequestClearing - no new requests are expected to be queued to RequestThread
+ void setRequestClearing();
+
/**
* Wait until thread processes the capture request with settings'
* android.request.id == requestId.
@@ -984,8 +992,13 @@
// send request in mNextRequests to HAL in a batch. Return true = sucssess
bool sendRequestsBatch();
- // Calculate the expected (minimum, maximum) duration range for a request
- std::pair<nsecs_t, nsecs_t> calculateExpectedDurationRange(
+ // Calculate the expected (minimum, maximum, isFixedFps) duration info for a request
+ struct ExpectedDurationInfo {
+ nsecs_t minDuration;
+ nsecs_t maxDuration;
+ bool isFixedFps;
+ };
+ ExpectedDurationInfo calculateExpectedDurationRange(
const camera_metadata_t *request);
// Check and update latest session parameters based on the current request settings.
@@ -1104,7 +1117,7 @@
status_t registerInFlight(uint32_t frameNumber,
int32_t numBuffers, CaptureResultExtras resultExtras, bool hasInput,
bool callback, nsecs_t minExpectedDuration, nsecs_t maxExpectedDuration,
- const std::set<std::set<String8>>& physicalCameraIds,
+ bool isFixedFps, const std::set<std::set<String8>>& physicalCameraIds,
bool isStillCapture, bool isZslCapture, bool rotateAndCropAuto,
const std::set<std::string>& cameraIdsWithZoom, const SurfaceMap& outputSurfaces,
nsecs_t requestTimeNs);
@@ -1356,6 +1369,9 @@
// The current minimum expected frame duration based on AE_TARGET_FPS_RANGE
nsecs_t mMinExpectedDuration = 0;
+ // Whether the camera device runs at fixed frame rate based on AE_MODE and
+ // AE_TARGET_FPS_RANGE
+ bool mIsFixedFps = false;
// Injection camera related methods.
class Camera3DeviceInjectionMethods : public virtual RefBase {
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.h b/services/camera/libcameraservice/device3/Camera3FakeStream.h
index 8cecabd..a93d1da 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.h
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.h
@@ -100,7 +100,7 @@
virtual status_t setBatchSize(size_t batchSize) override;
- virtual void onMinDurationChanged(nsecs_t /*duration*/) {}
+ virtual void onMinDurationChanged(nsecs_t /*duration*/, bool /*fixedFps*/) {}
protected:
/**
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index add1483..f594f84 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -41,8 +41,10 @@
physicalCameraId, sensorPixelModesUsed, setId, isMultiResolution,
dynamicRangeProfile, streamUseCase, deviceTimeBaseIsRealtime, timestampBase),
mTotalBufferCount(0),
+ mMaxCachedBufferCount(0),
mHandoutTotalBufferCount(0),
mHandoutOutputBufferCount(0),
+ mCachedOutputBufferCount(0),
mFrameCount(0),
mLastTimestamp(0) {
@@ -95,8 +97,8 @@
lines.appendFormat(" Timestamp base: %d\n", getTimestampBase());
lines.appendFormat(" Frames produced: %d, last timestamp: %" PRId64 " ns\n",
mFrameCount, mLastTimestamp);
- lines.appendFormat(" Total buffers: %zu, currently dequeued: %zu\n",
- mTotalBufferCount, mHandoutTotalBufferCount);
+ lines.appendFormat(" Total buffers: %zu, currently dequeued: %zu, currently cached: %zu\n",
+ mTotalBufferCount, mHandoutTotalBufferCount, mCachedOutputBufferCount);
write(fd, lines.string(), lines.size());
Camera3Stream::dump(fd, args);
@@ -135,6 +137,14 @@
return (mHandoutTotalBufferCount - mHandoutOutputBufferCount);
}
+size_t Camera3IOStreamBase::getCachedOutputBufferCountLocked() const {
+ return mCachedOutputBufferCount;
+}
+
+size_t Camera3IOStreamBase::getMaxCachedOutputBuffersLocked() const {
+ return mMaxCachedBufferCount;
+}
+
status_t Camera3IOStreamBase::disconnectLocked() {
switch (mState) {
case STATE_IN_RECONFIG:
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index f389d53..ca1f238 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -56,11 +56,18 @@
int getMaxTotalBuffers() const { return mTotalBufferCount; }
protected:
size_t mTotalBufferCount;
+ // The maximum number of cached buffers allowed for this stream
+ size_t mMaxCachedBufferCount;
+
// sum of input and output buffers that are currently acquired by HAL
size_t mHandoutTotalBufferCount;
// number of output buffers that are currently acquired by HAL. This will be
// Redundant when camera3 streams are no longer bidirectional streams.
size_t mHandoutOutputBufferCount;
+ // number of cached output buffers that are currently queued in the camera
+ // server but not yet queued to the buffer queue.
+ size_t mCachedOutputBufferCount;
+
uint32_t mFrameCount;
// Last received output buffer's timestamp
nsecs_t mLastTimestamp;
@@ -97,6 +104,9 @@
virtual size_t getHandoutInputBufferCountLocked();
+ virtual size_t getCachedOutputBufferCountLocked() const;
+ virtual size_t getMaxCachedOutputBuffersLocked() const;
+
virtual status_t getEndpointUsage(uint64_t *usage) const = 0;
status_t getBufferPreconditionCheckLocked() const;
diff --git a/services/camera/libcameraservice/device3/Camera3OfflineSession.h b/services/camera/libcameraservice/device3/Camera3OfflineSession.h
index a799719..5ee6ca5 100644
--- a/services/camera/libcameraservice/device3/Camera3OfflineSession.h
+++ b/services/camera/libcameraservice/device3/Camera3OfflineSession.h
@@ -248,6 +248,9 @@
// The current minimum expected frame duration based on AE_TARGET_FPS_RANGE
nsecs_t mMinExpectedDuration = 0;
+ // Whether the camera device runs at fixed frame rate based on AE_MODE and
+ // AE_TARGET_FPS_RANGE
+ bool mIsFixedFps = false;
// SetErrorInterface
void setErrorState(const char *fmt, ...) override;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 1e20ee0..396104c 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -67,6 +67,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(0),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -100,6 +101,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(0),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -140,6 +142,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(consumerUsage),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -188,6 +191,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(consumerUsage),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -415,6 +419,7 @@
mLock.unlock();
ANativeWindowBuffer *anwBuffer = container_of(buffer.buffer, ANativeWindowBuffer, handle);
+ bool bufferDeferred = false;
/**
* Return buffer back to ANativeWindow
*/
@@ -462,17 +467,20 @@
}
}
+ nsecs_t captureTime = (mUseReadoutTime && readoutTimestamp != 0 ?
+ readoutTimestamp : timestamp) - mTimestampOffset;
if (mPreviewFrameSpacer != nullptr) {
- res = mPreviewFrameSpacer->queuePreviewBuffer(timestamp - mTimestampOffset,
- readoutTimestamp - mTimestampOffset, transform, anwBuffer, anwReleaseFence);
+ nsecs_t readoutTime = (readoutTimestamp != 0 ? readoutTimestamp : timestamp)
+ - mTimestampOffset;
+ res = mPreviewFrameSpacer->queuePreviewBuffer(captureTime, readoutTime,
+ transform, anwBuffer, anwReleaseFence);
if (res != OK) {
ALOGE("%s: Stream %d: Error queuing buffer to preview buffer spacer: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
+ bufferDeferred = true;
} else {
- nsecs_t captureTime = (mSyncToDisplay ? readoutTimestamp : timestamp)
- - mTimestampOffset;
nsecs_t presentTime = mSyncToDisplay ?
syncTimestampToDisplayLocked(captureTime) : captureTime;
@@ -495,6 +503,10 @@
}
mLock.lock();
+ if (bufferDeferred) {
+ mCachedOutputBufferCount++;
+ }
+
// Once a valid buffer has been returned to the queue, can no longer
// dequeue all buffers for preallocation.
if (buffer.status != CAMERA_BUFFER_STATUS_ERROR) {
@@ -690,10 +702,15 @@
!isVideoStream());
if (forceChoreographer || defaultToChoreographer) {
mSyncToDisplay = true;
+ // For choreographer synced stream, extra buffers aren't kept by
+ // camera service. So no need to update mMaxCachedBufferCount.
mTotalBufferCount += kDisplaySyncExtraBuffer;
} else if (defaultToSpacer) {
mPreviewFrameSpacer = new PreviewFrameSpacer(this, mConsumer);
- mTotalBufferCount ++;
+ // For preview frame spacer, the extra buffer is kept by camera
+ // service. So update mMaxCachedBufferCount.
+ mMaxCachedBufferCount = 1;
+ mTotalBufferCount += mMaxCachedBufferCount;
res = mPreviewFrameSpacer->run(String8::format("PreviewSpacer-%d", mId).string());
if (res != OK) {
ALOGE("%s: Unable to start preview spacer", __FUNCTION__);
@@ -705,12 +722,16 @@
mFrameCount = 0;
mLastTimestamp = 0;
+ mUseReadoutTime =
+ (timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR || mSyncToDisplay);
+
if (isDeviceTimeBaseRealtime()) {
if (isDefaultTimeBase && !isConsumedByHWComposer() && !isVideoStream()) {
// Default time base, but not hardware composer or video encoder
mTimestampOffset = 0;
} else if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME ||
- timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR) {
+ timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR ||
+ timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR) {
mTimestampOffset = 0;
}
// If timestampBase is CHOREOGRAPHER SYNCED or MONOTONIC, leave
@@ -720,7 +741,7 @@
// Reverse offset for monotonicTime -> bootTime
mTimestampOffset = -mTimestampOffset;
} else {
- // If timestampBase is DEFAULT, MONOTONIC, SENSOR, or
+ // If timestampBase is DEFAULT, MONOTONIC, SENSOR, READOUT_SENSOR or
// CHOREOGRAPHER_SYNCED, timestamp offset is 0.
mTimestampOffset = 0;
}
@@ -958,6 +979,14 @@
return true;
}
+void Camera3OutputStream::onCachedBufferQueued() {
+ Mutex::Autolock l(mLock);
+ mCachedOutputBufferCount--;
+ // Signal whoever is waiting for the buffer to be returned to the buffer
+ // queue.
+ mOutputBufferReturnedSignal.signal();
+}
+
status_t Camera3OutputStream::disconnectLocked() {
status_t res;
@@ -1357,9 +1386,10 @@
return OK;
}
-void Camera3OutputStream::onMinDurationChanged(nsecs_t duration) {
+void Camera3OutputStream::onMinDurationChanged(nsecs_t duration, bool fixedFps) {
Mutex::Autolock l(mLock);
mMinExpectedDuration = duration;
+ mFixedFps = fixedFps;
}
void Camera3OutputStream::returnPrefetchedBuffersLocked() {
@@ -1380,29 +1410,39 @@
}
nsecs_t Camera3OutputStream::syncTimestampToDisplayLocked(nsecs_t t) {
+ nsecs_t currentTime = systemTime();
+ if (!mFixedFps) {
+ mLastCaptureTime = t;
+ mLastPresentTime = currentTime;
+ return t;
+ }
+
ParcelableVsyncEventData parcelableVsyncEventData;
auto res = mDisplayEventReceiver.getLatestVsyncEventData(&parcelableVsyncEventData);
if (res != OK) {
ALOGE("%s: Stream %d: Error getting latest vsync event data: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
mLastCaptureTime = t;
- mLastPresentTime = t;
+ mLastPresentTime = currentTime;
return t;
}
const VsyncEventData& vsyncEventData = parcelableVsyncEventData.vsync;
- nsecs_t currentTime = systemTime();
+ nsecs_t minPresentT = mLastPresentTime + vsyncEventData.frameInterval / 2;
- // Reset capture to present time offset if:
- // - More than 1 second between frames.
- // - The frame duration deviates from multiples of vsync frame intervals.
+ // Find the best presentation time without worrying about previous frame's
+ // presentation time if capture interval is more than kSpacingResetIntervalNs.
+ //
+ // When frame interval is more than 50 ms apart (3 vsyncs for 60hz refresh rate),
+ // there is little risk in starting over and finding the earliest vsync to latch onto.
+ // - Update captureToPresentTime offset to be used for later frames.
+ // - Example use cases:
+ // - when frame rate drops down to below 20 fps, or
+ // - A new streaming session starts (stopPreview followed by
+ // startPreview)
+ //
nsecs_t captureInterval = t - mLastCaptureTime;
- float captureToVsyncIntervalRatio = 1.0f * captureInterval / vsyncEventData.frameInterval;
- float ratioDeviation = std::fabs(
- captureToVsyncIntervalRatio - std::roundf(captureToVsyncIntervalRatio));
- if (captureInterval > kSpacingResetIntervalNs ||
- ratioDeviation >= kMaxIntervalRatioDeviation) {
- nsecs_t minPresentT = mLastPresentTime + vsyncEventData.frameInterval / 2;
+ if (captureInterval > kSpacingResetIntervalNs) {
for (size_t i = 0; i < VsyncEventData::kFrameTimelinesLength; i++) {
const auto& timeline = vsyncEventData.frameTimelines[i];
if (timeline.deadlineTimestamp >= currentTime &&
@@ -1424,21 +1464,54 @@
nsecs_t idealPresentT = t + mCaptureToPresentOffset;
nsecs_t expectedPresentT = mLastPresentTime;
nsecs_t minDiff = INT64_MAX;
- // Derive minimum intervals between presentation times based on minimal
+
+ // In fixed FPS case, when frame durations are close to multiples of display refresh
+ // rate, derive minimum intervals between presentation times based on minimal
// expected duration. The minimum number of Vsyncs is:
// - 0 if minFrameDuration in (0, 1.5] * vSyncInterval,
// - 1 if minFrameDuration in (1.5, 2.5] * vSyncInterval,
// - and so on.
+ //
+ // This spaces out the displaying of the frames so that the frame
+ // presentations are roughly in sync with frame captures.
int minVsyncs = (mMinExpectedDuration - vsyncEventData.frameInterval / 2) /
vsyncEventData.frameInterval;
if (minVsyncs < 0) minVsyncs = 0;
nsecs_t minInterval = minVsyncs * vsyncEventData.frameInterval;
+
+ // In fixed FPS case, if the frame duration deviates from multiples of
+ // display refresh rate, find the closest Vsync without requiring a minimum
+ // number of Vsync.
+ //
+ // Example: (24fps camera, 60hz refresh):
+ // capture readout: | t1 | t1 | .. | t1 | .. | t1 | .. | t1 |
+ // display VSYNC: | t2 | t2 | ... | t2 | ... | t2 | ... | t2 |
+ // | : 1 frame
+ // t1 : 41.67ms
+ // t2 : 16.67ms
+ // t1/t2 = 2.5
+ //
+ // 24fps is a commonly used video frame rate. Because the capture
+ // interval is 2.5 times of display refresh interval, the minVsyncs
+ // calculation will directly fall at the boundary condition. In this case,
+ // we should fall back to the basic logic of finding closest vsync
+ // timestamp without worrying about minVsyncs.
+ float captureToVsyncIntervalRatio = 1.0f * mMinExpectedDuration / vsyncEventData.frameInterval;
+ float ratioDeviation = std::fabs(
+ captureToVsyncIntervalRatio - std::roundf(captureToVsyncIntervalRatio));
+ bool captureDeviateFromVsync = ratioDeviation >= kMaxIntervalRatioDeviation;
+ bool cameraDisplayInSync = (mFixedFps && !captureDeviateFromVsync);
+
// Find best timestamp in the vsync timelines:
- // - Only use at most 3 timelines to avoid long latency
- // - closest to the ideal present time,
+ // - Only use at most kMaxTimelines timelines to avoid long latency
+ // - closest to the ideal presentation time,
// - deadline timestamp is greater than the current time, and
- // - the candidate present time is at least minInterval in the future
- // compared to last present time.
+ // - For fixed FPS, if the capture interval doesn't deviate too much from refresh interval,
+ // the candidate presentation time is at least minInterval in the future compared to last
+ // presentation time.
+ // - For variable FPS, or if the capture interval deviates from refresh
+ // interval for more than 5%, find a presentation time closest to the
+ // (lastPresentationTime + captureToPresentOffset) instead.
int maxTimelines = std::min(kMaxTimelines, (int)VsyncEventData::kFrameTimelinesLength);
float biasForShortDelay = 1.0f;
for (int i = 0; i < maxTimelines; i ++) {
@@ -1451,12 +1524,51 @@
}
if (std::abs(vsyncTime.expectedPresentationTime - idealPresentT) < minDiff &&
vsyncTime.deadlineTimestamp >= currentTime &&
- vsyncTime.expectedPresentationTime >
- mLastPresentTime + minInterval + biasForShortDelay * kTimelineThresholdNs) {
+ ((!cameraDisplayInSync && vsyncTime.expectedPresentationTime > minPresentT) ||
+ (cameraDisplayInSync && vsyncTime.expectedPresentationTime >
+ mLastPresentTime + minInterval +
+ static_cast<nsecs_t>(biasForShortDelay * kTimelineThresholdNs)))) {
expectedPresentT = vsyncTime.expectedPresentationTime;
minDiff = std::abs(vsyncTime.expectedPresentationTime - idealPresentT);
}
}
+
+ if (expectedPresentT == mLastPresentTime && expectedPresentT <
+ vsyncEventData.frameTimelines[maxTimelines-1].expectedPresentationTime) {
+ // Couldn't find a reasonable presentation time. Using last frame's
+ // presentation time would cause a frame drop. The best option now
+ // is to use the next VSync as long as the last presentation time
+ // doesn't already has the maximum latency, in which case dropping the
+ // buffer is more desired than increasing latency.
+ //
+ // Example: (60fps camera, 59.9hz refresh):
+ // capture readout: | t1 | t1 | .. | t1 | .. | t1 | .. | t1 |
+ // \ \ \ \ \ \ \ \ \
+ // queue to BQ: | | | | | | | | |
+ // \ \ \ \ \ \ \ \ \
+ // display VSYNC: | t2 | t2 | ... | t2 | ... | t2 | ... | t2 |
+ //
+ // |: 1 frame
+ // t1 : 16.67ms
+ // t2 : 16.69ms
+ //
+ // It takes 833 frames for capture readout count and display VSYNC count to be off
+ // by 1.
+ // - At frames [0, 832], presentationTime is set to timeline[0]
+ // - At frames [833, 833*2-1], presentationTime is set to timeline[1]
+ // - At frames [833*2, 833*3-1] presentationTime is set to timeline[2]
+ // - At frame 833*3, no presentation time is found because we only
+ // search for timeline[0..2].
+ // - Drop one buffer is better than further extend the presentation
+ // time.
+ //
+ // However, if frame 833*2 arrives 16.67ms early (right after frame
+ // 833*2-1), no presentation time can be found because
+ // getLatestVsyncEventData is called early. In that case, it's better to
+ // set presentation time by offseting last presentation time.
+ expectedPresentT += vsyncEventData.frameInterval;
+ }
+
mLastCaptureTime = t;
mLastPresentTime = expectedPresentT;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index e8065ce..db988a0 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -247,9 +247,10 @@
virtual status_t setBatchSize(size_t batchSize = 1) override;
/**
- * Notify the stream on change of min frame durations.
+ * Notify the stream on change of min frame durations or variable/fixed
+ * frame rate.
*/
- virtual void onMinDurationChanged(nsecs_t duration) override;
+ virtual void onMinDurationChanged(nsecs_t duration, bool fixedFps) override;
/**
* Apply ZSL related consumer usage quirk.
@@ -258,6 +259,7 @@
void setImageDumpMask(int mask) { mImageDumpMask = mask; }
bool shouldLogError(status_t res);
+ void onCachedBufferQueued();
protected:
Camera3OutputStream(int id, camera_stream_type_t type,
@@ -341,6 +343,11 @@
nsecs_t mTimestampOffset;
/**
+ * If camera readout time is used rather than the start-of-exposure time.
+ */
+ bool mUseReadoutTime;
+
+ /**
* Consumer end point usage flag set by the constructor for the deferred
* consumer case.
*/
@@ -414,6 +421,7 @@
// Re-space frames by overriding timestamp to align with display Vsync.
// Default is on for SurfaceView bound streams.
+ bool mFixedFps = false;
nsecs_t mMinExpectedDuration = 0;
bool mSyncToDisplay = false;
DisplayEventReceiver mDisplayEventReceiver;
@@ -424,7 +432,7 @@
static constexpr nsecs_t kSpacingResetIntervalNs = 50000000LL; // 50 millisecond
static constexpr nsecs_t kTimelineThresholdNs = 1000000LL; // 1 millisecond
static constexpr float kMaxIntervalRatioDeviation = 0.05f;
- static constexpr int kMaxTimelines = 3;
+ static constexpr int kMaxTimelines = 2;
nsecs_t syncTimestampToDisplayLocked(nsecs_t t);
// Re-space frames by delaying queueBuffer so that frame delivery has
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index a6d4b96..dbc6fe1 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -110,12 +110,13 @@
virtual status_t setBatchSize(size_t batchSize = 1) = 0;
/**
- * Notify the output stream that the minimum frame duration has changed.
+ * Notify the output stream that the minimum frame duration has changed, or
+ * frame rate has switched between variable and fixed.
*
* The minimum frame duration is calculated based on the upper bound of
* AE_TARGET_FPS_RANGE in the capture request.
*/
- virtual void onMinDurationChanged(nsecs_t duration) = 0;
+ virtual void onMinDurationChanged(nsecs_t duration, bool fixedFps) = 0;
};
// Helper class to organize a synchronized mapping of stream IDs to stream instances
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index ed66df0..e16982b 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -787,10 +787,12 @@
SessionStatsBuilder& sessionStatsBuilder) {
bool timestampIncreasing =
!((request.zslCapture && request.stillCapture) || request.hasInputBuffer);
+ nsecs_t readoutTimestamp = request.resultExtras.hasReadoutTimestamp ?
+ request.resultExtras.readoutTimestamp : 0;
returnOutputBuffers(useHalBufManager, listener,
request.pendingOutputBuffers.array(),
request.pendingOutputBuffers.size(),
- request.shutterTimestamp, request.shutterReadoutTimestamp,
+ request.shutterTimestamp, readoutTimestamp,
/*requested*/true, request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
request.outputSurfaces, request.resultExtras,
request.errorBufStrategy, request.transform);
@@ -852,13 +854,18 @@
}
r.shutterTimestamp = msg.timestamp;
- r.shutterReadoutTimestamp = msg.readout_timestamp;
- if (r.minExpectedDuration != states.minFrameDuration) {
+ if (msg.readout_timestamp_valid) {
+ r.resultExtras.hasReadoutTimestamp = true;
+ r.resultExtras.readoutTimestamp = msg.readout_timestamp;
+ }
+ if (r.minExpectedDuration != states.minFrameDuration ||
+ r.isFixedFps != states.isFixedFps) {
for (size_t i = 0; i < states.outputStreams.size(); i++) {
auto outputStream = states.outputStreams[i];
- outputStream->onMinDurationChanged(r.minExpectedDuration);
+ outputStream->onMinDurationChanged(r.minExpectedDuration, r.isFixedFps);
}
states.minFrameDuration = r.minExpectedDuration;
+ states.isFixedFps = r.isFixedFps;
}
if (r.hasCallback) {
ALOGVV("Camera %s: %s: Shutter fired for frame %d (id %d) at %" PRId64,
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index d6107c2..8c71c2b 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -106,6 +106,7 @@
BufferRecordsInterface& bufferRecordsIntf;
bool legacyClient;
nsecs_t& minFrameDuration;
+ bool& isFixedFps;
};
void processCaptureResult(CaptureOutputStates& states, const camera_capture_result *result);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 7ad6649..88be9ff 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -665,11 +665,19 @@
}
}
- // Wait for new buffer returned back if we are running into the limit.
+ // Wait for new buffer returned back if we are running into the limit. There
+ // are 2 limits:
+ // 1. The number of HAL buffers is greater than max_buffers
+ // 2. The number of HAL buffers + cached buffers is greater than max_buffers
+ // + maxCachedBuffers
size_t numOutstandingBuffers = getHandoutOutputBufferCountLocked();
- if (numOutstandingBuffers == camera_stream::max_buffers) {
- ALOGV("%s: Already dequeued max output buffers (%d), wait for next returned one.",
- __FUNCTION__, camera_stream::max_buffers);
+ size_t numCachedBuffers = getCachedOutputBufferCountLocked();
+ size_t maxNumCachedBuffers = getMaxCachedOutputBuffersLocked();
+ while (numOutstandingBuffers == camera_stream::max_buffers ||
+ numOutstandingBuffers + numCachedBuffers ==
+ camera_stream::max_buffers + maxNumCachedBuffers) {
+ ALOGV("%s: Already dequeued max output buffers (%d(+%zu)), wait for next returned one.",
+ __FUNCTION__, camera_stream::max_buffers, maxNumCachedBuffers);
nsecs_t waitStart = systemTime(SYSTEM_TIME_MONOTONIC);
if (waitBufferTimeout < kWaitForBufferDuration) {
waitBufferTimeout = kWaitForBufferDuration;
@@ -687,12 +695,16 @@
}
size_t updatedNumOutstandingBuffers = getHandoutOutputBufferCountLocked();
- if (updatedNumOutstandingBuffers >= numOutstandingBuffers) {
- ALOGE("%s: outsanding buffer count goes from %zu to %zu, "
+ size_t updatedNumCachedBuffers = getCachedOutputBufferCountLocked();
+ if (updatedNumOutstandingBuffers >= numOutstandingBuffers &&
+ updatedNumCachedBuffers == numCachedBuffers) {
+ ALOGE("%s: outstanding buffer count goes from %zu to %zu, "
"getBuffer(s) call must not run in parallel!", __FUNCTION__,
numOutstandingBuffers, updatedNumOutstandingBuffers);
return INVALID_OPERATION;
}
+ numOutstandingBuffers = updatedNumOutstandingBuffers;
+ numCachedBuffers = updatedNumCachedBuffers;
}
res = getBufferLocked(buffer, surface_ids);
@@ -1057,11 +1069,20 @@
}
size_t numOutstandingBuffers = getHandoutOutputBufferCountLocked();
- // Wait for new buffer returned back if we are running into the limit.
- while (numOutstandingBuffers + numBuffersRequested > camera_stream::max_buffers) {
- ALOGV("%s: Already dequeued %zu output buffers and requesting %zu (max is %d), waiting.",
- __FUNCTION__, numOutstandingBuffers, numBuffersRequested,
- camera_stream::max_buffers);
+ size_t numCachedBuffers = getCachedOutputBufferCountLocked();
+ size_t maxNumCachedBuffers = getMaxCachedOutputBuffersLocked();
+ // Wait for new buffer returned back if we are running into the limit. There
+ // are 2 limits:
+ // 1. The number of HAL buffers is greater than max_buffers
+ // 2. The number of HAL buffers + cached buffers is greater than max_buffers
+ // + maxCachedBuffers
+ while (numOutstandingBuffers + numBuffersRequested > camera_stream::max_buffers ||
+ numOutstandingBuffers + numCachedBuffers + numBuffersRequested >
+ camera_stream::max_buffers + maxNumCachedBuffers) {
+ ALOGV("%s: Already dequeued %zu(+%zu) output buffers and requesting %zu "
+ "(max is %d(+%zu)), waiting.", __FUNCTION__, numOutstandingBuffers,
+ numCachedBuffers, numBuffersRequested, camera_stream::max_buffers,
+ maxNumCachedBuffers);
nsecs_t waitStart = systemTime(SYSTEM_TIME_MONOTONIC);
if (waitBufferTimeout < kWaitForBufferDuration) {
waitBufferTimeout = kWaitForBufferDuration;
@@ -1078,13 +1099,16 @@
return res;
}
size_t updatedNumOutstandingBuffers = getHandoutOutputBufferCountLocked();
- if (updatedNumOutstandingBuffers >= numOutstandingBuffers) {
- ALOGE("%s: outsanding buffer count goes from %zu to %zu, "
+ size_t updatedNumCachedBuffers = getCachedOutputBufferCountLocked();
+ if (updatedNumOutstandingBuffers >= numOutstandingBuffers &&
+ updatedNumCachedBuffers == numCachedBuffers) {
+ ALOGE("%s: outstanding buffer count goes from %zu to %zu, "
"getBuffer(s) call must not run in parallel!", __FUNCTION__,
numOutstandingBuffers, updatedNumOutstandingBuffers);
return INVALID_OPERATION;
}
numOutstandingBuffers = updatedNumOutstandingBuffers;
+ numCachedBuffers = updatedNumCachedBuffers;
}
res = getBuffersLocked(buffers);
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index d429e6c..214618a 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -558,6 +558,10 @@
// Get handout input buffer count.
virtual size_t getHandoutInputBufferCountLocked() = 0;
+ // Get cached output buffer count.
+ virtual size_t getCachedOutputBufferCountLocked() const = 0;
+ virtual size_t getMaxCachedOutputBuffersLocked() const = 0;
+
// Get the usage flags for the other endpoint, or return
// INVALID_OPERATION if they cannot be obtained.
virtual status_t getEndpointUsage(uint64_t *usage) const = 0;
@@ -576,6 +580,8 @@
uint64_t mUsage;
+ Condition mOutputBufferReturnedSignal;
+
private:
// Previously configured stream properties (post HAL override)
uint64_t mOldUsage;
@@ -583,7 +589,6 @@
int mOldFormat;
android_dataspace mOldDataSpace;
- Condition mOutputBufferReturnedSignal;
Condition mInputBufferReturnedSignal;
static const nsecs_t kWaitForBufferDuration = 3000000000LL; // 3000 ms
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index 493a9e2..444445b 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -65,6 +65,7 @@
typedef struct camera_shutter_msg {
uint32_t frame_number;
uint64_t timestamp;
+ bool readout_timestamp_valid;
uint64_t readout_timestamp;
} camera_shutter_msg_t;
@@ -104,8 +105,6 @@
struct InFlightRequest {
// Set by notify() SHUTTER call.
nsecs_t shutterTimestamp;
- // Set by notify() SHUTTER call with readout time.
- nsecs_t shutterReadoutTimestamp;
// Set by process_capture_result().
nsecs_t sensorTimestamp;
int requestStatus;
@@ -153,6 +152,9 @@
// For auto-exposure modes, equal to 1/(lower end of target FPS range)
nsecs_t maxExpectedDuration;
+ // Whether the FPS range is fixed, aka, minFps == maxFps
+ bool isFixedFps;
+
// Whether the result metadata for this request is to be skipped. The
// result metadata should be skipped in the case of
// REQUEST/RESULT error.
@@ -206,6 +208,7 @@
hasCallback(true),
minExpectedDuration(kDefaultMinExpectedDuration),
maxExpectedDuration(kDefaultMaxExpectedDuration),
+ isFixedFps(false),
skipResultMetadata(false),
errorBufStrategy(ERROR_BUF_CACHE),
stillCapture(false),
@@ -216,7 +219,7 @@
}
InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
- bool hasAppCallback, nsecs_t minDuration, nsecs_t maxDuration,
+ bool hasAppCallback, nsecs_t minDuration, nsecs_t maxDuration, bool fixedFps,
const std::set<std::set<String8>>& physicalCameraIdSet, bool isStillCapture,
bool isZslCapture, bool rotateAndCropAuto, const std::set<std::string>& idsWithZoom,
nsecs_t requestNs, const SurfaceMap& outSurfaces = SurfaceMap{}) :
@@ -230,6 +233,7 @@
hasCallback(hasAppCallback),
minExpectedDuration(minDuration),
maxExpectedDuration(maxDuration),
+ isFixedFps(fixedFps),
skipResultMetadata(false),
errorBufStrategy(ERROR_BUF_CACHE),
physicalCameraIds(physicalCameraIdSet),
diff --git a/services/camera/libcameraservice/device3/PreviewFrameSpacer.cpp b/services/camera/libcameraservice/device3/PreviewFrameSpacer.cpp
index 0439501..b3cb178 100644
--- a/services/camera/libcameraservice/device3/PreviewFrameSpacer.cpp
+++ b/services/camera/libcameraservice/device3/PreviewFrameSpacer.cpp
@@ -68,7 +68,7 @@
return true;
}
- // Cache the frame to match readout time interval, for up to 33ms
+ // Cache the frame to match readout time interval, for up to kMaxFrameWaitTime
nsecs_t expectedQueueTime = mLastCameraPresentTime + readoutInterval;
nsecs_t frameWaitTime = std::min(kMaxFrameWaitTime, expectedQueueTime - currentTime);
if (frameWaitTime > 0 && mPendingBuffers.size() < 2) {
@@ -122,6 +122,7 @@
}
}
+ parent->onCachedBufferQueued();
mLastCameraPresentTime = currentTime;
mLastCameraReadoutTime = bufferHolder.readoutTimestamp;
}
diff --git a/services/camera/libcameraservice/device3/PreviewFrameSpacer.h b/services/camera/libcameraservice/device3/PreviewFrameSpacer.h
index e165768..cb9690c 100644
--- a/services/camera/libcameraservice/device3/PreviewFrameSpacer.h
+++ b/services/camera/libcameraservice/device3/PreviewFrameSpacer.h
@@ -85,7 +85,7 @@
nsecs_t mLastCameraPresentTime = 0;
static constexpr nsecs_t kWaitDuration = 5000000LL; // 50ms
static constexpr nsecs_t kFrameIntervalThreshold = 80000000LL; // 80ms
- static constexpr nsecs_t kMaxFrameWaitTime = 33333333LL; // 33ms
+ static constexpr nsecs_t kMaxFrameWaitTime = 10000000LL; // 10ms
};
}; //namespace camera3
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
index f05520f..ec28d31 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
@@ -51,6 +51,7 @@
#include <aidl/android/hardware/camera/device/ICameraInjectionSession.h>
#include <aidlcommonsupport/NativeHandle.h>
+#include <android/binder_ibinder_platform.h>
#include <android/hardware/camera2/ICameraDeviceUser.h>
#include "utils/CameraTraces.h"
@@ -371,7 +372,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
- *this, *(mInterface), mLegacyClient, mMinExpectedDuration}, mResultMetadataQueue
+ *this, *(mInterface), mLegacyClient, mMinExpectedDuration, mIsFixedFps},
+ mResultMetadataQueue
};
for (const auto& result : results) {
@@ -412,7 +414,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
- *this, *(mInterface), mLegacyClient, mMinExpectedDuration}, mResultMetadataQueue
+ *this, *(mInterface), mLegacyClient, mMinExpectedDuration, mIsFixedFps},
+ mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
@@ -669,6 +672,12 @@
return p->returnStreamBuffers(buffers);
}
+::ndk::SpAIBinder AidlCamera3Device::AidlCameraDeviceCallbacks::createBinder() {
+ auto binder = BnCameraDeviceCallback::createBinder();
+ AIBinder_setInheritRt(binder.get(), /*inheritRt*/ true);
+ return binder;
+}
+
::ndk::ScopedAStatus AidlCamera3Device::returnStreamBuffers(
const std::vector<camera::device::StreamBuffer>& buffers) {
ReturnBufferStates states {
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h
index d20a7eb..fd66661 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.h
@@ -242,6 +242,10 @@
::ndk::ScopedAStatus returnStreamBuffers(
const std::vector<
aidl::android::hardware::camera::device::StreamBuffer>& buffers) override;
+
+ protected:
+ ::ndk::SpAIBinder createBinder() override;
+
private:
wp<AidlCamera3Device> mParent = nullptr;
};
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
index 336719d..8ff0b07 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
@@ -30,6 +30,7 @@
#include <utils/Trace.h>
#include <android/hardware/camera2/ICameraDeviceCallbacks.h>
+#include <android/binder_ibinder_platform.h>
#include "device3/aidl/AidlCamera3OfflineSession.h"
#include "device3/Camera3OutputStream.h"
@@ -123,7 +124,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
- *this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration}, mResultMetadataQueue
+ *this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps},
+ mResultMetadataQueue
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -168,7 +170,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
- *this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration}, mResultMetadataQueue
+ *this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps},
+ mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
@@ -218,6 +221,12 @@
return p->returnStreamBuffers(buffers);
}
+::ndk::SpAIBinder AidlCamera3OfflineSession::AidlCameraDeviceCallbacks::createBinder() {
+ auto binder = BnCameraDeviceCallback::createBinder();
+ AIBinder_setInheritRt(binder.get(), /*inheritRt*/ true);
+ return binder;
+}
+
::ndk::ScopedAStatus AidlCamera3OfflineSession::returnStreamBuffers(
const std::vector<camera::device::StreamBuffer>& buffers) {
{
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h
index 33de2c5..d107af6 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.h
@@ -97,6 +97,10 @@
::ndk::ScopedAStatus returnStreamBuffers(
const std::vector<
aidl::android::hardware::camera::device::StreamBuffer>& buffers) override;
+ protected:
+
+ ::ndk::SpAIBinder createBinder() override;
+
private:
wp<AidlCamera3OfflineSession> mParent = nullptr;
};
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
index 02eebd2..b2accc1 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
@@ -110,6 +110,7 @@
m.type = CAMERA_MSG_SHUTTER;
m.message.shutter.frame_number = msg.get<Tag::shutter>().frameNumber;
m.message.shutter.timestamp = msg.get<Tag::shutter>().timestamp;
+ m.message.shutter.readout_timestamp_valid = true;
m.message.shutter.readout_timestamp = msg.get<Tag::shutter>().readoutTimestamp;
break;
}
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
index 4bb426c..9557692 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
@@ -363,7 +363,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface, mLegacyClient, mMinExpectedDuration}, mResultMetadataQueue
+ *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps}, mResultMetadataQueue
};
//HidlCaptureOutputStates hidlStates {
@@ -425,7 +425,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface, mLegacyClient, mMinExpectedDuration}, mResultMetadataQueue
+ *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps}, mResultMetadataQueue
};
for (const auto& result : results) {
@@ -472,7 +472,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface, mLegacyClient, mMinExpectedDuration}, mResultMetadataQueue
+ *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps}, mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
index 5c97f0e..2b4f8a1 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
@@ -105,7 +105,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration}, mResultMetadataQueue
+ mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps},
+ mResultMetadataQueue
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -145,7 +146,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration}, mResultMetadataQueue
+ mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps},
+ mResultMetadataQueue
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -180,7 +182,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration}, mResultMetadataQueue
+ mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps},
+ mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp
index 8b0cd65..ff6fc17 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp
@@ -105,6 +105,7 @@
m.type = CAMERA_MSG_SHUTTER;
m.message.shutter.frame_number = msg.msg.shutter.frameNumber;
m.message.shutter.timestamp = msg.msg.shutter.timestamp;
+ m.message.shutter.readout_timestamp_valid = false;
m.message.shutter.readout_timestamp = 0LL;
break;
}
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
index 69175cc..dae5eea 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
@@ -262,11 +262,11 @@
sessionStats->onClose(latencyMs);
}
-bool CameraServiceProxyWrapper::isCameraDisabled() {
+bool CameraServiceProxyWrapper::isCameraDisabled(int userId) {
sp<ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
if (proxyBinder == nullptr) return true;
bool ret = false;
- auto status = proxyBinder->isCameraDisabled(&ret);
+ auto status = proxyBinder->isCameraDisabled(userId, &ret);
if (!status.isOk()) {
ALOGE("%s: Failed during camera disabled query: %s", __FUNCTION__,
status.exceptionMessage().c_str());
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
index e34a8f0..eb818d1 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
@@ -97,7 +97,7 @@
static int getRotateAndCropOverride(String16 packageName, int lensFacing, int userId);
// Detect if the camera is disabled by device policy.
- static bool isCameraDisabled();
+ static bool isCameraDisabled(int userId);
};
} // android
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index 2eb2d55..7dde268 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -458,7 +458,7 @@
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
if (timestampBase < OutputConfiguration::TIMESTAMP_BASE_DEFAULT ||
- timestampBase > OutputConfiguration::TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED) {
+ timestampBase > OutputConfiguration::TIMESTAMP_BASE_MAX) {
String8 msg = String8::format("Camera %s: invalid timestamp base %d",
logicalCameraId.string(), timestampBase);
ALOGE("%s: %s", __FUNCTION__, msg.string());
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp
index 4e6f832..5444f2a 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp
@@ -50,7 +50,7 @@
for (const auto &stream : aidl.streams) {
if (static_cast<int>(stream.dynamicRangeProfile) !=
ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
- ALOGE("%s Dynamic range profile %" PRId64 " not supported by HIDL", __FUNCTION__,
+ ALOGE("%s Dynamic range profile %" PRId64 " not supported by HIDL", __FUNCTION__,
stream.dynamicRangeProfile);
return BAD_VALUE;
}
diff --git a/services/camera/libcameraservice/utils/TagMonitor.cpp b/services/camera/libcameraservice/utils/TagMonitor.cpp
index 461f5e9..fe87ed6 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.cpp
+++ b/services/camera/libcameraservice/utils/TagMonitor.cpp
@@ -169,6 +169,22 @@
camera_metadata_entry lastEntry = lastValues.find(tag);
+ // Monitor when the stream ids change, this helps visually see what
+ // monitored metadata values are for capture requests with different
+ // stream ids.
+ if (source == REQUEST) {
+ if (inputStreamId != mLastInputStreamId) {
+ mMonitoringEvents.emplace(source, frameNumber, timestamp, camera_metadata_ro_entry_t{},
+ cameraId, std::unordered_set<int>(), inputStreamId);
+ mLastInputStreamId = inputStreamId;
+ }
+
+ if (outputStreamIds != mLastStreamIds) {
+ mMonitoringEvents.emplace(source, frameNumber, timestamp, camera_metadata_ro_entry_t{},
+ cameraId, outputStreamIds, -1);
+ mLastStreamIds = outputStreamIds;
+ }
+ }
if (entry.count > 0) {
bool isDifferent = false;
if (lastEntry.count > 0) {
@@ -190,22 +206,14 @@
// No last entry, so always consider to be different
isDifferent = true;
}
- // Also monitor when the stream ids change, this helps visually see what
- // monitored metadata values are for capture requests with different
- // stream ids.
- if (source == REQUEST &&
- (inputStreamId != mLastInputStreamId || outputStreamIds != mLastStreamIds)) {
- mLastInputStreamId = inputStreamId;
- mLastStreamIds = outputStreamIds;
- isDifferent = true;
- }
+
if (isDifferent) {
ALOGV("%s: Tag %s changed", __FUNCTION__,
get_local_camera_metadata_tag_name_vendor_id(
tag, mVendorTagId));
lastValues.update(entry);
mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId,
- outputStreamIds, inputStreamId);
+ std::unordered_set<int>(), -1);
}
} else if (lastEntry.count > 0) {
// Value has been removed
@@ -219,8 +227,8 @@
entry.count = 0;
mLastInputStreamId = inputStreamId;
mLastStreamIds = outputStreamIds;
- mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId, outputStreamIds,
- inputStreamId);
+ mMonitoringEvents.emplace(source, frameNumber, timestamp, entry, cameraId,
+ std::unordered_set<int>(), -1);
}
}
@@ -261,23 +269,39 @@
for (const auto& event : mMonitoringEvents) {
int indentation = (event.source == REQUEST) ? 15 : 30;
- String8 eventString = String8::format("f%d:%" PRId64 "ns:%*s%*s%s.%s: ",
+ String8 eventString = String8::format("f%d:%" PRId64 "ns:%*s%*s",
event.frameNumber, event.timestamp,
2, event.cameraId.c_str(),
indentation,
- event.source == REQUEST ? "REQ:" : "RES:",
+ event.source == REQUEST ? "REQ:" : "RES:");
+
+ if (!event.outputStreamIds.empty()) {
+ eventString += " output stream ids:";
+ for (const auto& id : event.outputStreamIds) {
+ eventString.appendFormat(" %d", id);
+ }
+ eventString += "\n";
+ vec.emplace_back(eventString.string());
+ continue;
+ }
+
+ if (event.inputStreamId != -1) {
+ eventString.appendFormat(" input stream id: %d\n", event.inputStreamId);
+ vec.emplace_back(eventString.string());
+ continue;
+ }
+
+ eventString += String8::format(
+ "%s.%s: ",
get_local_camera_metadata_section_name_vendor_id(event.tag, mVendorTagId),
get_local_camera_metadata_tag_name_vendor_id(event.tag, mVendorTagId));
- if (event.newData.size() == 0) {
- eventString += " (Removed)";
+
+ if (event.newData.empty()) {
+ eventString += " (Removed)\n";
} else {
- eventString += getEventDataString(event.newData.data(),
- event.tag,
- event.type,
- event.newData.size() / camera_metadata_type_size[event.type],
- indentation + 18,
- event.outputStreamIds,
- event.inputStreamId);
+ eventString += getEventDataString(
+ event.newData.data(), event.tag, event.type,
+ event.newData.size() / camera_metadata_type_size[event.type], indentation + 18);
}
vec.emplace_back(eventString.string());
}
@@ -285,13 +309,8 @@
#define CAMERA_METADATA_ENUM_STRING_MAX_SIZE 29
-String8 TagMonitor::getEventDataString(const uint8_t* data_ptr,
- uint32_t tag,
- int type,
- int count,
- int indentation,
- const std::unordered_set<int32_t>& outputStreamIds,
- int32_t inputStreamId) {
+String8 TagMonitor::getEventDataString(const uint8_t* data_ptr, uint32_t tag, int type, int count,
+ int indentation) {
static int values_per_line[NUM_TYPES] = {
[TYPE_BYTE] = 16,
[TYPE_INT32] = 8,
@@ -362,17 +381,7 @@
returnStr += "??? ";
}
}
- returnStr += "] ";
- if (!outputStreamIds.empty()) {
- returnStr += "output stream ids: ";
- for (const auto &id : outputStreamIds) {
- returnStr.appendFormat(" %d ", id);
- }
- }
- if (inputStreamId != -1) {
- returnStr.appendFormat("input stream id: %d", inputStreamId);
- }
- returnStr += "\n";
+ returnStr += "]\n";
}
return returnStr;
}
@@ -385,11 +394,12 @@
source(src),
frameNumber(frameNumber),
timestamp(timestamp),
+ cameraId(cameraId),
tag(value.tag),
type(value.type),
newData(value.data.u8, value.data.u8 + camera_metadata_type_size[value.type] * value.count),
- cameraId(cameraId), outputStreamIds(outputStreamIds), inputStreamId(inputStreamId) {
-}
+ outputStreamIds(outputStreamIds),
+ inputStreamId(inputStreamId) {}
TagMonitor::MonitorEvent::~MonitorEvent() {
}
diff --git a/services/camera/libcameraservice/utils/TagMonitor.h b/services/camera/libcameraservice/utils/TagMonitor.h
index 088d6fe..9ded15d 100644
--- a/services/camera/libcameraservice/utils/TagMonitor.h
+++ b/services/camera/libcameraservice/utils/TagMonitor.h
@@ -85,12 +85,8 @@
// function.
void dumpMonitoredTagEventsToVectorLocked(std::vector<std::string> &out);
- static String8 getEventDataString(const uint8_t *data_ptr,
- uint32_t tag, int type,
- int count,
- int indentation,
- const std::unordered_set<int32_t> &outputStreamIds,
- int32_t inputStreamId);
+ static String8 getEventDataString(const uint8_t* data_ptr, uint32_t tag, int type, int count,
+ int indentation);
void monitorSingleMetadata(TagMonitor::eventSource source, int64_t frameNumber,
nsecs_t timestamp, const std::string& cameraId, uint32_t tag,
@@ -128,12 +124,15 @@
eventSource source;
uint32_t frameNumber;
nsecs_t timestamp;
+ std::string cameraId;
uint32_t tag;
uint8_t type;
std::vector<uint8_t> newData;
- std::string cameraId;
+ // NOTE: We want to print changes to outputStreamIds and inputStreamId in their own lines.
+ // So any MonitorEvent where these fields are not the default value will have garbage
+ // values for all fields other than source, frameNumber, timestamp, and cameraId.
std::unordered_set<int32_t> outputStreamIds;
- int32_t inputStreamId = 1;
+ int32_t inputStreamId = -1;
};
// A ring buffer for tracking the last kMaxMonitorEvents metadata changes
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 3f18b95..ea817ab 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -71,6 +71,8 @@
aaudio_result_t AAudioServiceEndpointMMAP::open(const aaudio::AAudioStreamRequest &request) {
aaudio_result_t result = AAUDIO_OK;
copyFrom(request.getConstantConfiguration());
+ mRequestedDeviceId = getDeviceId();
+
mMmapClient.attributionSource = request.getAttributionSource();
// TODO b/182392769: use attribution source util
mMmapClient.attributionSource.uid = VALUE_OR_FATAL(
@@ -115,7 +117,7 @@
const audio_attributes_t attributes = getAudioAttributesFrom(this);
- mRequestedDeviceId = deviceId = getDeviceId();
+ deviceId = mRequestedDeviceId;
// Fill in config
config.format = audioFormat;
@@ -151,6 +153,10 @@
audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
// Open HAL stream. Set mMmapStream
+ ALOGD("%s trying to open MMAP stream with format=%#x, "
+ "sample_rate=%u, channel_mask=%#x, device=%d",
+ __func__, config.format, config.sample_rate,
+ config.channel_mask, deviceId);
status_t status = MmapStreamInterface::openMmapStream(streamDirection,
&attributes,
&config,
@@ -221,6 +227,9 @@
error:
close();
+ // restore original requests
+ setDeviceId(mRequestedDeviceId);
+ setSessionId(requestedSessionId);
return result;
}
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 9f48f80..f4ee84f 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -435,7 +435,15 @@
}
}
if (isIdle_l() && AudioClock::getNanoseconds() >= standbyTime) {
- standby_l();
+ aaudio_result_t result = standby_l();
+ if (result != AAUDIO_OK) {
+ // If standby failed because of the function is not implemented, there is no
+ // need to retry. Otherwise, retry standby later.
+ ALOGW("Failed to enter standby, error=%d", result);
+ standbyTime = result == AAUDIO_ERROR_UNIMPLEMENTED
+ ? std::numeric_limits<int64_t>::max()
+ : AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
+ }
}
if (command != nullptr) {
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index b2ba725..b5f8b90 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -320,7 +320,7 @@
}
virtual aaudio_result_t standby_l() REQUIRES(mLock) {
- return AAUDIO_ERROR_UNAVAILABLE;
+ return AAUDIO_ERROR_UNIMPLEMENTED;
}
class ExitStandbyParam : public AAudioCommandParam {
public: