Merge changes from topic "frame-drop-metrics"
* changes:
Add accurate onFrameRendered callbacks
Allow for reschedule-able messages
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 0d156a5..9174adf 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -9215,24 +9215,25 @@
* camera's crop region is set to maximum size, the FOV of the physical streams for the
* ultrawide lens will be the same as the logical stream, by making the crop region
* smaller than its active array size to compensate for the smaller focal length.</p>
- * <p>There are two ways for the application to capture RAW images from a logical camera
- * with RAW capability:</p>
+ * <p>For a logical camera, typically the underlying physical cameras have different RAW
+ * capabilities (such as resolution or CFA pattern). There are two ways for the
+ * application to capture RAW images from the logical camera:</p>
* <ul>
- * <li>Because the underlying physical cameras may have different RAW capabilities (such
- * as resolution or CFA pattern), to maintain backward compatibility, when a RAW stream
- * is configured, the camera device makes sure the default active physical camera remains
- * active and does not switch to other physical cameras. (One exception is that, if the
- * logical camera consists of identical image sensors and advertises multiple focalLength
- * due to different lenses, the camera device may generate RAW images from different
- * physical cameras based on the focalLength being set by the application.) This
- * backward-compatible approach usually results in loss of optical zoom, to telephoto
- * lens or to ultrawide lens.</li>
- * <li>Alternatively, to take advantage of the full zoomRatio range of the logical camera,
- * the application should use <a href="https://developer.android.com/reference/android/hardware/camera2/MultiResolutionImageReader.html">MultiResolutionImageReader</a>
- * to capture RAW images from the currently active physical camera. Because different
- * physical camera may have different RAW characteristics, the application needs to use
- * the characteristics and result metadata of the active physical camera for the
- * relevant RAW metadata.</li>
+ * <li>If the logical camera has RAW capability, the application can create and use RAW
+ * streams in the same way as before. In case a RAW stream is configured, to maintain
+ * backward compatibility, the camera device makes sure the default active physical
+ * camera remains active and does not switch to other physical cameras. (One exception
+ * is that, if the logical camera consists of identical image sensors and advertises
+ * multiple focalLength due to different lenses, the camera device may generate RAW
+ * images from different physical cameras based on the focalLength being set by the
+ * application.) This backward-compatible approach usually results in loss of optical
+ * zoom, to telephoto lens or to ultrawide lens.</li>
+ * <li>Alternatively, if supported by the device,
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/MultiResolutionImageReader.html">MultiResolutionImageReader</a>
+ * can be used to capture RAW images from one of the underlying physical cameras (
+ * depending on current zoom level). Because different physical cameras may have
+ * different RAW characteristics, the application needs to use the characteristics
+ * and result metadata of the active physical camera for the relevant RAW metadata.</li>
* </ul>
* <p>The capture request and result metadata tags required for backward compatible camera
* functionalities will be solely based on the logical camera capability. On the other
diff --git a/drm/TEST_MAPPING b/drm/TEST_MAPPING
index 3642898..b2d4d6e 100644
--- a/drm/TEST_MAPPING
+++ b/drm/TEST_MAPPING
@@ -1,5 +1,5 @@
{
- "presubmit-large": [
+ "presubmit": [
// The following tests validate codec and drm path.
{
"name": "GtsMediaTestCases",
diff --git a/drm/libmediadrm/DrmHalAidl.cpp b/drm/libmediadrm/DrmHalAidl.cpp
index 1844acb..5ec7337 100644
--- a/drm/libmediadrm/DrmHalAidl.cpp
+++ b/drm/libmediadrm/DrmHalAidl.cpp
@@ -459,7 +459,7 @@
DrmStatus DrmHalAidl::createPlugin(const uint8_t uuid[16], const String8& appPackageName) {
Mutex::Autolock autoLock(mLock);
-
+ if (mInitCheck == ERROR_UNSUPPORTED) return mInitCheck;
Uuid uuidAidl = DrmUtils::toAidlUuid(uuid);
std::string appPackageNameAidl = toStdString(appPackageName);
std::shared_ptr<IDrmPluginAidl> pluginAidl;
@@ -1216,7 +1216,7 @@
closeOpenSessions();
Mutex::Autolock autoLock(mLock);
- reportFrameworkMetrics(reportPluginMetrics());
+ if (mInitCheck == OK) reportFrameworkMetrics(reportPluginMetrics());
setListener(NULL);
mInitCheck = NO_INIT;
diff --git a/drm/libmediadrm/DrmHalHidl.cpp b/drm/libmediadrm/DrmHalHidl.cpp
index 6010739..6106aa7 100644
--- a/drm/libmediadrm/DrmHalHidl.cpp
+++ b/drm/libmediadrm/DrmHalHidl.cpp
@@ -557,6 +557,7 @@
DrmStatus DrmHalHidl::createPlugin(const uint8_t uuid[16], const String8& appPackageName) {
Mutex::Autolock autoLock(mLock);
+ if (mInitCheck == ERROR_UNSUPPORTED) return mInitCheck;
for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
auto hResult = mFactories[i]->isCryptoSchemeSupported(uuid);
if (hResult.isOk() && hResult) {
diff --git a/drm/libmediadrm/DrmMetricsLogger.cpp b/drm/libmediadrm/DrmMetricsLogger.cpp
index 89b1dcc..bc004c8 100644
--- a/drm/libmediadrm/DrmMetricsLogger.cpp
+++ b/drm/libmediadrm/DrmMetricsLogger.cpp
@@ -41,6 +41,70 @@
DrmMetricsLogger::~DrmMetricsLogger() {}
+int MediaErrorToJavaError(status_t err) {
+#define STATUS_CASE(status) \
+ case status: \
+ return J##status
+
+ switch (err) {
+ STATUS_CASE(ERROR_DRM_UNKNOWN);
+ STATUS_CASE(ERROR_DRM_NO_LICENSE);
+ STATUS_CASE(ERROR_DRM_LICENSE_EXPIRED);
+ STATUS_CASE(ERROR_DRM_RESOURCE_BUSY);
+ STATUS_CASE(ERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION);
+ STATUS_CASE(ERROR_DRM_SESSION_NOT_OPENED);
+ STATUS_CASE(ERROR_DRM_CANNOT_HANDLE);
+ STATUS_CASE(ERROR_DRM_INSUFFICIENT_SECURITY);
+ STATUS_CASE(ERROR_DRM_FRAME_TOO_LARGE);
+ STATUS_CASE(ERROR_DRM_SESSION_LOST_STATE);
+ STATUS_CASE(ERROR_DRM_CERTIFICATE_MALFORMED);
+ STATUS_CASE(ERROR_DRM_CERTIFICATE_MISSING);
+ STATUS_CASE(ERROR_DRM_CRYPTO_LIBRARY);
+ STATUS_CASE(ERROR_DRM_GENERIC_OEM);
+ STATUS_CASE(ERROR_DRM_GENERIC_PLUGIN);
+ STATUS_CASE(ERROR_DRM_INIT_DATA);
+ STATUS_CASE(ERROR_DRM_KEY_NOT_LOADED);
+ STATUS_CASE(ERROR_DRM_LICENSE_PARSE);
+ STATUS_CASE(ERROR_DRM_LICENSE_POLICY);
+ STATUS_CASE(ERROR_DRM_LICENSE_RELEASE);
+ STATUS_CASE(ERROR_DRM_LICENSE_REQUEST_REJECTED);
+ STATUS_CASE(ERROR_DRM_LICENSE_RESTORE);
+ STATUS_CASE(ERROR_DRM_LICENSE_STATE);
+ STATUS_CASE(ERROR_DRM_MEDIA_FRAMEWORK);
+ STATUS_CASE(ERROR_DRM_PROVISIONING_CERTIFICATE);
+ STATUS_CASE(ERROR_DRM_PROVISIONING_CONFIG);
+ STATUS_CASE(ERROR_DRM_PROVISIONING_PARSE);
+ STATUS_CASE(ERROR_DRM_PROVISIONING_REQUEST_REJECTED);
+ STATUS_CASE(ERROR_DRM_PROVISIONING_RETRY);
+ STATUS_CASE(ERROR_DRM_RESOURCE_CONTENTION);
+ STATUS_CASE(ERROR_DRM_SECURE_STOP_RELEASE);
+ STATUS_CASE(ERROR_DRM_STORAGE_READ);
+ STATUS_CASE(ERROR_DRM_STORAGE_WRITE);
+ STATUS_CASE(ERROR_DRM_ZERO_SUBSAMPLES);
+#undef STATUS_CASE
+ }
+ return static_cast<int>(err);
+}
+
+int DrmPluginSecurityLevelToJavaSecurityLevel(DrmPlugin::SecurityLevel securityLevel) {
+#define STATUS_CASE(status) \
+ case DrmPlugin::k##status: \
+ return J##status
+
+ switch (securityLevel) {
+ STATUS_CASE(SecurityLevelUnknown);
+ STATUS_CASE(SecurityLevelSwSecureCrypto);
+ STATUS_CASE(SecurityLevelSwSecureDecode);
+ STATUS_CASE(SecurityLevelHwSecureCrypto);
+ STATUS_CASE(SecurityLevelHwSecureDecode);
+ STATUS_CASE(SecurityLevelHwSecureAll);
+ STATUS_CASE(SecurityLevelMax);
+#undef STATUS_CASE
+ }
+ return static_cast<int>(securityLevel);
+}
+
+
DrmStatus DrmMetricsLogger::initCheck() const {
DrmStatus status = mImpl->initCheck();
if (status != OK) {
@@ -63,6 +127,8 @@
DrmStatus DrmMetricsLogger::createPlugin(const uint8_t uuid[IDRM_UUID_SIZE],
const String8& appPackageName) {
std::memcpy(mUuid.data(), uuid, IDRM_UUID_SIZE);
+ mUuid[0] = betoh64(mUuid[0]);
+ mUuid[1] = betoh64(mUuid[1]);
if (kUuidSchemeMap.count(mUuid)) {
mScheme = kUuidSchemeMap.at(mUuid);
} else {
@@ -73,6 +139,10 @@
}
DrmStatus status = mImpl->createPlugin(uuid, appPackageName);
if (status == OK) {
+ String8 version8;
+ if (getPropertyString(String8("version"), version8) == OK) {
+ mVersion = version8.string();
+ }
reportMediaDrmCreated();
} else {
reportMediaDrmErrored(status, __func__);
@@ -101,6 +171,9 @@
if (getSecurityLevel(sessionId, &ctx.mActualSecurityLevel) != OK) {
ctx.mActualSecurityLevel = DrmPlugin::kSecurityLevelUnknown;
}
+ if (!mVersion.empty()) {
+ ctx.mVersion = mVersion;
+ }
{
const std::lock_guard<std::mutex> lock(mSessionMapMutex);
mSessionMap.insert({sessionKey, ctx});
@@ -460,10 +533,11 @@
void DrmMetricsLogger::reportMediaDrmCreated() const {
mediametrics_handle_t handle(mediametrics_create("mediadrm.created"));
mediametrics_setCString(handle, "scheme", mScheme.c_str());
- mediametrics_setInt64(handle, "uuid_msb", be64toh(mUuid[0]));
- mediametrics_setInt64(handle, "uuid_lsb", be64toh(mUuid[1]));
+ mediametrics_setInt64(handle, "uuid_msb", mUuid[0]);
+ mediametrics_setInt64(handle, "uuid_lsb", mUuid[1]);
mediametrics_setInt32(handle, "frontend", mFrontend);
mediametrics_setCString(handle, "object_nonce", mObjNonce.c_str());
+ mediametrics_setCString(handle, "version", mVersion.c_str());
mediametrics_selfRecord(handle);
mediametrics_delete(handle);
}
@@ -471,16 +545,19 @@
void DrmMetricsLogger::reportMediaDrmSessionOpened(const std::vector<uint8_t>& sessionId) const {
mediametrics_handle_t handle(mediametrics_create("mediadrm.session_opened"));
mediametrics_setCString(handle, "scheme", mScheme.c_str());
- mediametrics_setInt64(handle, "uuid_msb", be64toh(mUuid[0]));
- mediametrics_setInt64(handle, "uuid_lsb", be64toh(mUuid[1]));
+ mediametrics_setInt64(handle, "uuid_msb", mUuid[0]);
+ mediametrics_setInt64(handle, "uuid_lsb", mUuid[1]);
mediametrics_setInt32(handle, "frontend", mFrontend);
+ mediametrics_setCString(handle, "version", mVersion.c_str());
mediametrics_setCString(handle, "object_nonce", mObjNonce.c_str());
const std::lock_guard<std::mutex> lock(mSessionMapMutex);
auto it = mSessionMap.find(sessionId);
if (it != mSessionMap.end()) {
mediametrics_setCString(handle, "session_nonce", it->second.mNonce.c_str());
- mediametrics_setInt64(handle, "requested_seucrity_level", it->second.mTargetSecurityLevel);
- mediametrics_setInt64(handle, "opened_seucrity_level", it->second.mActualSecurityLevel);
+ mediametrics_setInt32(handle, "requested_security_level",
+ DrmPluginSecurityLevelToJavaSecurityLevel(it->second.mTargetSecurityLevel));
+ mediametrics_setInt32(handle, "opened_security_level",
+ DrmPluginSecurityLevelToJavaSecurityLevel(it->second.mActualSecurityLevel));
}
mediametrics_selfRecord(handle);
mediametrics_delete(handle);
@@ -490,20 +567,22 @@
const std::vector<uint8_t>& sessionId) const {
mediametrics_handle_t handle(mediametrics_create("mediadrm.errored"));
mediametrics_setCString(handle, "scheme", mScheme.c_str());
- mediametrics_setInt64(handle, "uuid_msb", be64toh(mUuid[0]));
- mediametrics_setInt64(handle, "uuid_lsb", be64toh(mUuid[1]));
+ mediametrics_setInt64(handle, "uuid_msb", mUuid[0]);
+ mediametrics_setInt64(handle, "uuid_lsb", mUuid[1]);
mediametrics_setInt32(handle, "frontend", mFrontend);
+ mediametrics_setCString(handle, "version", mVersion.c_str());
mediametrics_setCString(handle, "object_nonce", mObjNonce.c_str());
if (!sessionId.empty()) {
const std::lock_guard<std::mutex> lock(mSessionMapMutex);
auto it = mSessionMap.find(sessionId);
if (it != mSessionMap.end()) {
mediametrics_setCString(handle, "session_nonce", it->second.mNonce.c_str());
- mediametrics_setInt64(handle, "seucrity_level", it->second.mActualSecurityLevel);
+ mediametrics_setInt32(handle, "security_level",
+ DrmPluginSecurityLevelToJavaSecurityLevel(it->second.mActualSecurityLevel));
}
}
mediametrics_setCString(handle, "api", api);
- mediametrics_setInt32(handle, "error_code", error_code);
+ mediametrics_setInt32(handle, "error_code", MediaErrorToJavaError(error_code));
mediametrics_setInt32(handle, "cdm_err", error_code.getCdmErr());
mediametrics_setInt32(handle, "oem_err", error_code.getOemErr());
mediametrics_setInt32(handle, "error_context", error_code.getContext());
diff --git a/drm/libmediadrm/DrmSessionManager.cpp b/drm/libmediadrm/DrmSessionManager.cpp
index e31395d..301538f 100644
--- a/drm/libmediadrm/DrmSessionManager.cpp
+++ b/drm/libmediadrm/DrmSessionManager.cpp
@@ -34,6 +34,7 @@
namespace android {
using aidl::android::media::MediaResourceParcel;
+using aidl::android::media::ClientInfoParcel;
namespace {
void ResourceManagerServiceDied(void* cookie) {
@@ -137,7 +138,10 @@
static int64_t clientId = 0;
mSessionMap[toStdVec(sessionId)] = (SessionInfo){pid, uid, clientId};
- mService->addResource(pid, uid, clientId++, drm, toResourceVec(sessionId, INT64_MAX));
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(pid),
+ .uid = static_cast<int32_t>(uid),
+ .id = clientId++};
+ mService->addResource(clientInfo, drm, toResourceVec(sessionId, INT64_MAX));
}
void DrmSessionManager::useSession(const Vector<uint8_t> &sessionId) {
@@ -150,7 +154,10 @@
}
auto info = it->second;
- mService->addResource(info.pid, info.uid, info.clientId, NULL, toResourceVec(sessionId, -1));
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(info.pid),
+ .uid = static_cast<int32_t>(info.uid),
+ .id = info.clientId};
+ mService->addResource(clientInfo, NULL, toResourceVec(sessionId, -1));
}
void DrmSessionManager::removeSession(const Vector<uint8_t> &sessionId) {
@@ -164,7 +171,10 @@
auto info = it->second;
// removeClient instead of removeSession because each client has only one session
- mService->removeClient(info.pid, info.clientId);
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(info.pid),
+ .uid = static_cast<int32_t>(info.uid),
+ .id = info.clientId};
+ mService->removeClient(clientInfo);
mSessionMap.erase(it);
}
@@ -182,9 +192,13 @@
// cannot update mSessionMap because we do not know which sessionId is reclaimed;
// we rely on IResourceManagerClient to removeSession in reclaimResource
- Vector<uint8_t> dummy;
+ Vector<uint8_t> placeHolder;
bool success;
- ScopedAStatus status = service->reclaimResource(callingPid, toResourceVec(dummy, INT64_MAX), &success);
+ uid_t uid = AIBinder_getCallingUid();
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(callingPid),
+ .uid = static_cast<int32_t>(uid)};
+ ScopedAStatus status = service->reclaimResource(
+ clientInfo, toResourceVec(placeHolder, INT64_MAX), &success);
return status.isOk() && success;
}
diff --git a/drm/libmediadrm/include/mediadrm/DrmMetricsLogger.h b/drm/libmediadrm/include/mediadrm/DrmMetricsLogger.h
index f4e3c3e..7666f04 100644
--- a/drm/libmediadrm/include/mediadrm/DrmMetricsLogger.h
+++ b/drm/libmediadrm/include/mediadrm/DrmMetricsLogger.h
@@ -25,10 +25,58 @@
namespace android {
+enum {
+ JERROR_DRM_UNKNOWN = 0,
+ JERROR_DRM_NO_LICENSE = 1,
+ JERROR_DRM_LICENSE_EXPIRED = 2,
+ JERROR_DRM_RESOURCE_BUSY = 3,
+ JERROR_DRM_INSUFFICIENT_OUTPUT_PROTECTION = 4,
+ JERROR_DRM_SESSION_NOT_OPENED = 5,
+ JERROR_DRM_CANNOT_HANDLE = 6,
+ JERROR_DRM_INSUFFICIENT_SECURITY = 7,
+ JERROR_DRM_FRAME_TOO_LARGE = 8,
+ JERROR_DRM_SESSION_LOST_STATE = 9,
+ JERROR_DRM_CERTIFICATE_MALFORMED = 10,
+ JERROR_DRM_CERTIFICATE_MISSING = 11,
+ JERROR_DRM_CRYPTO_LIBRARY = 12,
+ JERROR_DRM_GENERIC_OEM = 13,
+ JERROR_DRM_GENERIC_PLUGIN = 14,
+ JERROR_DRM_INIT_DATA = 15,
+ JERROR_DRM_KEY_NOT_LOADED = 16,
+ JERROR_DRM_LICENSE_PARSE = 17,
+ JERROR_DRM_LICENSE_POLICY = 18,
+ JERROR_DRM_LICENSE_RELEASE = 19,
+ JERROR_DRM_LICENSE_REQUEST_REJECTED = 20,
+ JERROR_DRM_LICENSE_RESTORE = 21,
+ JERROR_DRM_LICENSE_STATE = 22,
+ JERROR_DRM_MEDIA_FRAMEWORK = 23,
+ JERROR_DRM_PROVISIONING_CERTIFICATE = 24,
+ JERROR_DRM_PROVISIONING_CONFIG = 25,
+ JERROR_DRM_PROVISIONING_PARSE = 26,
+ JERROR_DRM_PROVISIONING_REQUEST_REJECTED = 27,
+ JERROR_DRM_PROVISIONING_RETRY = 28,
+ JERROR_DRM_RESOURCE_CONTENTION = 29,
+ JERROR_DRM_SECURE_STOP_RELEASE = 30,
+ JERROR_DRM_STORAGE_READ = 31,
+ JERROR_DRM_STORAGE_WRITE = 32,
+ JERROR_DRM_ZERO_SUBSAMPLES = 33,
+};
+
+enum {
+ JSecurityLevelUnknown = 0,
+ JSecurityLevelSwSecureCrypto = 1,
+ JSecurityLevelSwSecureDecode = 2,
+ JSecurityLevelHwSecureCrypto = 3,
+ JSecurityLevelHwSecureDecode = 4,
+ JSecurityLevelHwSecureAll = 5,
+ JSecurityLevelMax = 6,
+};
+
struct SessionContext {
std::string mNonce;
- int64_t mTargetSecurityLevel;
+ DrmPlugin::SecurityLevel mTargetSecurityLevel;
DrmPlugin::SecurityLevel mActualSecurityLevel;
+ std::string mVersion;
};
class DrmMetricsLogger : public IDrm {
@@ -161,6 +209,7 @@
std::array<int64_t, 2> mUuid;
std::string mObjNonce;
std::string mScheme;
+ std::string mVersion;
std::map<std::vector<uint8_t>, SessionContext> mSessionMap;
mutable std::mutex mSessionMapMutex;
IDrmFrontend mFrontend;
diff --git a/drm/libmediadrm/interface/mediadrm/DrmUtils.h b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
index 94cf743..ba20b95 100644
--- a/drm/libmediadrm/interface/mediadrm/DrmUtils.h
+++ b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
@@ -37,6 +37,7 @@
#include <ctime>
#include <deque>
#include <endian.h>
+#include <inttypes.h>
#include <iterator>
#include <mutex>
#include <string>
@@ -105,9 +106,9 @@
void LogToBuffer(android_LogPriority level, const uint8_t uuid[16], const char *fmt, Args... args) {
uint64_t uuid2[2] = {};
std::memcpy(uuid2, uuid, sizeof(uuid2));
- std::string uuidFmt("uuid=[%lx %lx] ");
+ std::string uuidFmt("uuid=[%" PRIx64 " %" PRIx64 "] ");
uuidFmt += fmt;
- LogToBuffer(level, uuidFmt.c_str(), htobe64(uuid2[0]), htobe64(uuid2[1]), args...);
+ LogToBuffer(level, uuidFmt.c_str(), betoh64(uuid2[0]), betoh64(uuid2[1]), args...);
}
#ifndef LOG2BE
diff --git a/media/TEST_MAPPING b/media/TEST_MAPPING
index a22ec19..9aa896c 100644
--- a/media/TEST_MAPPING
+++ b/media/TEST_MAPPING
@@ -34,12 +34,15 @@
}
],
"file_patterns": ["(?i)drm|crypto"]
- }
- ],
-
- "imports": [
+ },
{
- "path": "frameworks/av/drm/mediadrm/plugins"
+ "name": "CtsMediaDrmFrameworkTestCases",
+ "options" : [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ }
+ ],
+ "file_patterns": ["(?i)drm|crypto"]
}
],
diff --git a/media/audioaidlconversion/AidlConversionCppNdk.cpp b/media/audioaidlconversion/AidlConversionCppNdk.cpp
index b0852f5..46bb6f1 100644
--- a/media/audioaidlconversion/AidlConversionCppNdk.cpp
+++ b/media/audioaidlconversion/AidlConversionCppNdk.cpp
@@ -2184,6 +2184,11 @@
audio_port_device_ext legacy{};
RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
aidl.device, &legacy.type, legacy.address));
+ legacy.encapsulation_modes = VALUE_OR_RETURN(
+ aidl2legacy_AudioEncapsulationMode_mask(aidl.encapsulationModes));
+ legacy.encapsulation_metadata_types = VALUE_OR_RETURN(
+ aidl2legacy_AudioEncapsulationMetadataType_mask(
+ aidl.encapsulationMetadataTypes));
return legacy;
}
@@ -2192,6 +2197,10 @@
AudioPortDeviceExt aidl;
aidl.device = VALUE_OR_RETURN(
legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+ aidl.encapsulationModes = VALUE_OR_RETURN(
+ legacy2aidl_AudioEncapsulationMode_mask(legacy.encapsulation_modes));
+ aidl.encapsulationMetadataTypes = VALUE_OR_RETURN(
+ legacy2aidl_AudioEncapsulationMetadataType_mask(legacy.encapsulation_metadata_types));
return aidl;
}
@@ -2699,6 +2708,10 @@
return AUDIO_LATENCY_MODE_FREE;
case AudioLatencyMode::LOW:
return AUDIO_LATENCY_MODE_LOW;
+ case AudioLatencyMode::DYNAMIC_SPATIAL_AUDIO_SOFTWARE:
+ return AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_SOFTWARE;
+ case AudioLatencyMode::DYNAMIC_SPATIAL_AUDIO_HARDWARE:
+ return AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_HARDWARE;
}
return unexpected(BAD_VALUE);
}
@@ -2709,6 +2722,10 @@
return AudioLatencyMode::FREE;
case AUDIO_LATENCY_MODE_LOW:
return AudioLatencyMode::LOW;
+ case AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_SOFTWARE:
+ return AudioLatencyMode::DYNAMIC_SPATIAL_AUDIO_SOFTWARE;
+ case AUDIO_LATENCY_MODE_DYNAMIC_SPATIAL_AUDIO_HARDWARE:
+ return AudioLatencyMode::DYNAMIC_SPATIAL_AUDIO_HARDWARE;
}
return unexpected(BAD_VALUE);
}
diff --git a/media/audioaidlconversion/AidlConversionEffect.cpp b/media/audioaidlconversion/AidlConversionEffect.cpp
index 2df97d1..c053a5d 100644
--- a/media/audioaidlconversion/AidlConversionEffect.cpp
+++ b/media/audioaidlconversion/AidlConversionEffect.cpp
@@ -14,12 +14,16 @@
* limitations under the License.
*/
+#include <cstdint>
+#include <inttypes.h>
#include <utility>
#define LOG_TAG "AidlConversionEffect"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <aidl/android/hardware/audio/effect/DefaultExtension.h>
+#include <aidl/android/hardware/audio/effect/VendorExtension.h>
#include <media/AidlConversionCppNdk.h>
#include <media/AidlConversionEffect.h>
@@ -32,16 +36,22 @@
using ::aidl::android::hardware::audio::effect::AcousticEchoCanceler;
using ::aidl::android::hardware::audio::effect::AutomaticGainControlV2;
using ::aidl::android::hardware::audio::effect::BassBoost;
+using ::aidl::android::hardware::audio::effect::DefaultExtension;
using ::aidl::android::hardware::audio::effect::Descriptor;
using ::aidl::android::hardware::audio::effect::Downmix;
using ::aidl::android::hardware::audio::effect::DynamicsProcessing;
using ::aidl::android::hardware::audio::effect::Flags;
using ::aidl::android::hardware::audio::effect::Parameter;
using ::aidl::android::hardware::audio::effect::PresetReverb;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
+using ::aidl::android::hardware::audio::effect::Visualizer;
using ::aidl::android::media::audio::common::AudioDeviceDescription;
using ::android::BAD_VALUE;
+using ::android::OK;
using ::android::base::unexpected;
+using ::android::effect::utils::EffectParamReader;
+using ::android::effect::utils::EffectParamWriter;
////////////////////////////////////////////////////////////////////////////////////////////////////
// Converters
@@ -349,5 +359,116 @@
return static_cast<int32_t>(aidl);
}
+ConversionResult<uint32_t> aidl2legacy_Parameter_Visualizer_ScalingMode_uint32(
+ Visualizer::ScalingMode aidl) {
+ switch (aidl) {
+ case Visualizer::ScalingMode::NORMALIZED: {
+ return 0;
+ }
+ case Visualizer::ScalingMode::AS_PLAYED: {
+ return 1;
+ }
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<Visualizer::ScalingMode> legacy2aidl_Parameter_Visualizer_uint32_ScalingMode(
+ uint32_t legacy) {
+ if (legacy == 0) {
+ return Visualizer::ScalingMode::NORMALIZED;
+ } else if (legacy == 1) {
+ return Visualizer::ScalingMode::AS_PLAYED;
+ } else {
+ return unexpected(BAD_VALUE);
+ }
+}
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_Visualizer_MeasurementMode_uint32(
+ Visualizer::MeasurementMode aidl) {
+ switch (aidl) {
+ case Visualizer::MeasurementMode::NONE: {
+ return 0;
+ }
+ case Visualizer::MeasurementMode::PEAK_RMS: {
+ return 1;
+ }
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<Visualizer::MeasurementMode>
+legacy2aidl_Parameter_Visualizer_uint32_MeasurementMode(uint32_t legacy) {
+ if (legacy == 0) {
+ return Visualizer::MeasurementMode::NONE;
+ } else if (legacy == 1) {
+ return Visualizer::MeasurementMode::PEAK_RMS;
+ } else {
+ return unexpected(BAD_VALUE);
+ }
+}
+
+/**
+ * Copy the parameter area of effect_param_t to DefaultExtension::bytes.
+ */
+ConversionResult<VendorExtension> legacy2aidl_EffectParameterReader_Param_VendorExtension(
+ EffectParamReader& param) {
+ size_t len = param.getParameterSize();
+ DefaultExtension defaultExt;
+ defaultExt.bytes.resize(len);
+ RETURN_IF_ERROR(param.readFromParameter(defaultExt.bytes.data(), len));
+
+ VendorExtension ext;
+ ext.extension.setParcelable(defaultExt);
+ return ext;
+}
+
+/**
+ * Copy the data area of effect_param_t to DefaultExtension::bytes.
+ */
+ConversionResult<VendorExtension> legacy2aidl_EffectParameterReader_Data_VendorExtension(
+ EffectParamReader& param) {
+ size_t len = param.getValueSize();
+ DefaultExtension defaultExt;
+ defaultExt.bytes.resize(len);
+ RETURN_IF_ERROR(param.readFromValue(defaultExt.bytes.data(), len));
+
+ VendorExtension ext;
+ ext.extension.setParcelable(defaultExt);
+ return ext;
+}
+
+/**
+ * Copy DefaultExtension::bytes to the data area of effect_param_t.
+ */
+ConversionResult<status_t> aidl2legacy_VendorExtension_EffectParameterWriter_Data(
+ EffectParamWriter& param, VendorExtension ext) {
+ std::optional<DefaultExtension> defaultExt;
+ RETURN_IF_ERROR(ext.extension.getParcelable(&defaultExt));
+ if (!defaultExt.has_value()) {
+ return unexpected(BAD_VALUE);
+ }
+
+ RETURN_IF_ERROR(param.writeToValue(defaultExt->bytes.data(), defaultExt->bytes.size()));
+
+ return OK;
+}
+
+ConversionResult<Parameter> legacy2aidl_EffectParameterReader_ParameterExtension(
+ EffectParamReader& param) {
+ VendorExtension ext =
+ VALUE_OR_RETURN(legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ return UNION_MAKE(Parameter, specific, UNION_MAKE(Parameter::Specific, vendorEffect, ext));
+}
+
+ConversionResult<::android::status_t> aidl2legacy_ParameterExtension_EffectParameterWriter(
+ const ::aidl::android::hardware::audio::effect::Parameter& aidl,
+ EffectParamWriter& legacy) {
+ VendorExtension ext = VALUE_OR_RETURN(
+ (::aidl::android::getParameterSpecific<Parameter, VendorExtension,
+ Parameter::Specific::vendorEffect>(aidl)));
+ return VALUE_OR_RETURN_STATUS(
+ aidl2legacy_VendorExtension_EffectParameterWriter_Data(legacy, ext));
+}
+
} // namespace android
} // aidl
diff --git a/media/audioaidlconversion/AidlConversionNdk.cpp b/media/audioaidlconversion/AidlConversionNdk.cpp
index 7c63339..71c547c 100644
--- a/media/audioaidlconversion/AidlConversionNdk.cpp
+++ b/media/audioaidlconversion/AidlConversionNdk.cpp
@@ -14,14 +14,18 @@
* limitations under the License.
*/
+#include <sstream>
#include <utility>
+#include <system/audio.h>
#define LOG_TAG "AidlConversionNdk"
//#define LOG_NDEBUG 0
#include <utils/Log.h>
+#include <utils/Errors.h>
#include <media/AidlConversionCppNdk.h>
#include <media/AidlConversionNdk.h>
+#include <Utils.h>
////////////////////////////////////////////////////////////////////////////////////////////////////
// AIDL NDK backend to legacy audio data structure conversion utilities.
@@ -29,6 +33,51 @@
namespace aidl {
namespace android {
+using hardware::audio::common::PlaybackTrackMetadata;
+using hardware::audio::common::RecordTrackMetadata;
+using ::android::BAD_VALUE;
+using ::android::OK;
+
+namespace {
+
+::android::status_t combineString(
+ const std::vector<std::string>& v, char separator, std::string* result) {
+ std::ostringstream oss;
+ for (const auto& s : v) {
+ if (oss.tellp() > 0) {
+ oss << separator;
+ }
+ if (s.find(separator) == std::string::npos) {
+ oss << s;
+ } else {
+ ALOGE("%s: string \"%s\" contains separator character \"%c\"",
+ __func__, s.c_str(), separator);
+ return BAD_VALUE;
+ }
+ }
+ *result = oss.str();
+ return OK;
+}
+
+std::vector<std::string> splitString(const std::string& s, char separator) {
+ std::istringstream iss(s);
+ std::string t;
+ std::vector<std::string> result;
+ while (std::getline(iss, t, separator)) {
+ result.push_back(std::move(t));
+ }
+ return result;
+}
+
+std::vector<std::string> filterOutNonVendorTags(const std::vector<std::string>& tags) {
+ std::vector<std::string> result;
+ std::copy_if(tags.begin(), tags.end(), std::back_inserter(result),
+ ::aidl::android::hardware::audio::common::maybeVendorExtension);
+ return result;
+}
+
+} // namespace
+
// buffer_provider_t is not supported thus skipped
ConversionResult<buffer_config_t> aidl2legacy_AudioConfigBase_buffer_config_t(
const media::audio::common::AudioConfigBase& aidl, bool isInput) {
@@ -68,5 +117,79 @@
return aidl;
}
+::android::status_t aidl2legacy_AudioAttributesTags(
+ const std::vector<std::string>& aidl, char* legacy) {
+ std::string aidlTags;
+ RETURN_STATUS_IF_ERROR(combineString(
+ filterOutNonVendorTags(aidl), AUDIO_ATTRIBUTES_TAGS_SEPARATOR, &aidlTags));
+ RETURN_STATUS_IF_ERROR(aidl2legacy_string(aidlTags, legacy, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE));
+ return OK;
+}
+
+ConversionResult<std::vector<std::string>> legacy2aidl_AudioAttributesTags(const char* legacy) {
+ std::string legacyTags = VALUE_OR_RETURN(legacy2aidl_string(
+ legacy, AUDIO_ATTRIBUTES_TAGS_MAX_SIZE));
+ return filterOutNonVendorTags(splitString(legacyTags, AUDIO_ATTRIBUTES_TAGS_SEPARATOR));
+}
+
+ConversionResult<playback_track_metadata_v7>
+aidl2legacy_PlaybackTrackMetadata_playback_track_metadata_v7(const PlaybackTrackMetadata& aidl) {
+ playback_track_metadata_v7 legacy;
+ legacy.base.usage = VALUE_OR_RETURN(aidl2legacy_AudioUsage_audio_usage_t(aidl.usage));
+ legacy.base.content_type = VALUE_OR_RETURN(aidl2legacy_AudioContentType_audio_content_type_t(
+ aidl.contentType));
+ legacy.base.gain = aidl.gain;
+ legacy.channel_mask = VALUE_OR_RETURN(aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ aidl.channelMask, false /*isInput*/));
+ RETURN_IF_ERROR(aidl2legacy_AudioAttributesTags(aidl.tags, legacy.tags));
+ return legacy;
+}
+
+ConversionResult<PlaybackTrackMetadata>
+legacy2aidl_playback_track_metadata_v7_PlaybackTrackMetadata(
+ const playback_track_metadata_v7& legacy) {
+ PlaybackTrackMetadata aidl;
+ aidl.usage = VALUE_OR_RETURN(legacy2aidl_audio_usage_t_AudioUsage(legacy.base.usage));
+ aidl.contentType = VALUE_OR_RETURN(legacy2aidl_audio_content_type_t_AudioContentType(
+ legacy.base.content_type));
+ aidl.gain = legacy.base.gain;
+ aidl.channelMask = VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ legacy.channel_mask, false /*isInput*/));
+ aidl.tags = VALUE_OR_RETURN(legacy2aidl_AudioAttributesTags(legacy.tags));
+ return aidl;
+}
+
+ConversionResult<record_track_metadata_v7>
+aidl2legacy_RecordTrackMetadata_record_track_metadata_v7(const RecordTrackMetadata& aidl) {
+ record_track_metadata_v7 legacy;
+ legacy.base.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(aidl.source));
+ legacy.base.gain = aidl.gain;
+ if (aidl.destinationDevice.has_value()) {
+ RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(aidl.destinationDevice.value(),
+ &legacy.base.dest_device, legacy.base.dest_device_address));
+ } else {
+ legacy.base.dest_device = AUDIO_DEVICE_NONE;
+ }
+ legacy.channel_mask = VALUE_OR_RETURN(aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ aidl.channelMask, true /*isInput*/));
+ RETURN_IF_ERROR(aidl2legacy_AudioAttributesTags(aidl.tags, legacy.tags));
+ return legacy;
+}
+
+ConversionResult<RecordTrackMetadata>
+legacy2aidl_record_track_metadata_v7_RecordTrackMetadata(const record_track_metadata_v7& legacy) {
+ RecordTrackMetadata aidl;
+ aidl.source = VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.base.source));
+ aidl.gain = legacy.base.gain;
+ if (legacy.base.dest_device != AUDIO_DEVICE_NONE) {
+ aidl.destinationDevice = VALUE_OR_RETURN(legacy2aidl_audio_device_AudioDevice(
+ legacy.base.dest_device, legacy.base.dest_device_address));
+ }
+ aidl.channelMask = VALUE_OR_RETURN(legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ legacy.channel_mask, true /*isInput*/));
+ aidl.tags = VALUE_OR_RETURN(legacy2aidl_AudioAttributesTags(legacy.tags));
+ return aidl;
+}
+
} // namespace android
} // aidl
diff --git a/media/audioaidlconversion/Android.bp b/media/audioaidlconversion/Android.bp
index c0024ef..bdb3a2c 100644
--- a/media/audioaidlconversion/Android.bp
+++ b/media/audioaidlconversion/Android.bp
@@ -135,12 +135,16 @@
],
defaults: [
"audio_aidl_conversion_common_default",
+ "latest_android_hardware_audio_common_ndk_shared",
"latest_android_media_audio_common_types_ndk_shared",
],
shared_libs: [
"libbinder_ndk",
"libbase",
],
+ static_libs: [
+ "libaudioaidlcommon",
+ ],
cflags: [
"-DBACKEND_NDK",
],
diff --git a/media/audioaidlconversion/TEST_MAPPING b/media/audioaidlconversion/TEST_MAPPING
new file mode 100644
index 0000000..a0c9759
--- /dev/null
+++ b/media/audioaidlconversion/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+ "presubmit": [
+ {
+ "name": "audio_aidl_ndk_conversion_tests"
+ }
+ ]
+}
diff --git a/media/audioaidlconversion/include/media/AidlConversionCppNdk.h b/media/audioaidlconversion/include/media/AidlConversionCppNdk.h
index abf0231..e1daf31 100644
--- a/media/audioaidlconversion/include/media/AidlConversionCppNdk.h
+++ b/media/audioaidlconversion/include/media/AidlConversionCppNdk.h
@@ -127,6 +127,11 @@
ConversionResult<media::audio::common::AudioChannelLayout>
legacy2aidl_audio_channel_mask_t_AudioChannelLayout(audio_channel_mask_t legacy, bool isInput);
+audio_channel_mask_t aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+ int aidlLayout, bool isInput);
+int legacy2aidl_audio_channel_mask_t_bits_AudioChannelLayout_layout(
+ audio_channel_mask_t legacy, bool isInput);
+
enum class AudioPortDirection {
INPUT, OUTPUT
};
diff --git a/media/audioaidlconversion/include/media/AidlConversionEffect.h b/media/audioaidlconversion/include/media/AidlConversionEffect.h
index 83aa614..5e245a7 100644
--- a/media/audioaidlconversion/include/media/AidlConversionEffect.h
+++ b/media/audioaidlconversion/include/media/AidlConversionEffect.h
@@ -26,6 +26,7 @@
#include <hardware/audio_effect.h>
#include <media/AidlConversionUtil.h>
#include <system/audio_effect.h>
+#include <system/audio_effects/audio_effects_utils.h>
#include <aidl/android/hardware/audio/effect/IEffect.h>
@@ -45,19 +46,39 @@
return VALUE_OR_RETURN((unionGetField<T, field>(spec)));
}
-#define GET_PARAMETER_SPECIFIC_FIELD(u, specific, tag, field, fieldType) \
- getParameterSpecificField<std::decay_t<decltype(u)>, specific, \
- aidl::android::hardware::audio::effect::Parameter::Specific::tag, \
- specific::field, fieldType>(u)
+#define GET_PARAMETER_SPECIFIC_FIELD(_u, _effect, _tag, _field, _fieldType) \
+ getParameterSpecificField<std::decay_t<decltype(_u)>, _effect, \
+ aidl::android::hardware::audio::effect::Parameter::Specific::_tag, \
+ _effect::_field, _fieldType>(_u)
-#define MAKE_SPECIFIC_PARAMETER(spec, tag, field, value) \
- UNION_MAKE(aidl::android::hardware::audio::effect::Parameter, specific, \
- UNION_MAKE(aidl::android::hardware::audio::effect::Parameter::Specific, tag, \
- UNION_MAKE(spec, field, value)))
+#define MAKE_SPECIFIC_PARAMETER(_spec, _tag, _field, _value) \
+ UNION_MAKE(aidl::android::hardware::audio::effect::Parameter, specific, \
+ UNION_MAKE(aidl::android::hardware::audio::effect::Parameter::Specific, _tag, \
+ UNION_MAKE(_spec, _field, _value)))
-#define MAKE_SPECIFIC_PARAMETER_ID(spec, tag, field) \
- UNION_MAKE(aidl::android::hardware::audio::effect::Parameter::Id, tag, \
- UNION_MAKE(spec::Id, commonTag, field))
+#define MAKE_SPECIFIC_PARAMETER_ID(_spec, _tag, _field) \
+ UNION_MAKE(aidl::android::hardware::audio::effect::Parameter::Id, _tag, \
+ UNION_MAKE(_spec::Id, commonTag, _field))
+
+#define MAKE_EXTENSION_PARAMETER_ID(_effect, _tag, _field) \
+ UNION_MAKE(aidl::android::hardware::audio::effect::Parameter::Id, _tag, \
+ UNION_MAKE(_effect::Id, vendorExtensionTag, _field))
+
+#define VENDOR_EXTENSION_GET_AND_RETURN(_effect, _tag, _param) \
+ { \
+ aidl::android::hardware::audio::effect::VendorExtension _extId = VALUE_OR_RETURN_STATUS( \
+ aidl::android::legacy2aidl_EffectParameterReader_Param_VendorExtension(_param)); \
+ aidl::android::hardware::audio::effect::Parameter::Id _id = \
+ MAKE_EXTENSION_PARAMETER_ID(_effect, _tag##Tag, _extId); \
+ aidl::android::hardware::audio::effect::Parameter _aidlParam; \
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(_id, &_aidlParam))); \
+ aidl::android::hardware::audio::effect::VendorExtension _ext = \
+ VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD( \
+ _aidlParam, _effect, _tag, _effect::vendor, VendorExtension)); \
+ return VALUE_OR_RETURN_STATUS( \
+ aidl::android::aidl2legacy_ParameterExtension_EffectParameterWriter(_aidlParam, \
+ _param)); \
+ }
ConversionResult<uint32_t> aidl2legacy_Flags_Type_uint32(
::aidl::android::hardware::audio::effect::Flags::Type type);
@@ -126,5 +147,36 @@
ConversionResult<int32_t> aidl2legacy_DynamicsProcessing_ResolutionPreference_int32(
::aidl::android::hardware::audio::effect::DynamicsProcessing::ResolutionPreference aidl);
+ConversionResult<uint32_t> aidl2legacy_Parameter_Visualizer_ScalingMode_uint32(
+ ::aidl::android::hardware::audio::effect::Visualizer::ScalingMode aidl);
+ConversionResult<::aidl::android::hardware::audio::effect::Visualizer::ScalingMode>
+legacy2aidl_Parameter_Visualizer_uint32_ScalingMode(uint32_t legacy);
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_Visualizer_MeasurementMode_uint32(
+ ::aidl::android::hardware::audio::effect::Visualizer::MeasurementMode aidl);
+ConversionResult<::aidl::android::hardware::audio::effect::Visualizer::MeasurementMode>
+legacy2aidl_Parameter_Visualizer_uint32_MeasurementMode(uint32_t legacy);
+
+ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
+legacy2aidl_EffectParameterReader_ParameterExtension(
+ ::android::effect::utils::EffectParamReader& param);
+ConversionResult<::android::status_t> aidl2legacy_ParameterExtension_EffectParameterWriter(
+ const ::aidl::android::hardware::audio::effect::Parameter& aidl,
+ ::android::effect::utils::EffectParamWriter& legacy);
+
+ConversionResult<::aidl::android::hardware::audio::effect::VendorExtension>
+legacy2aidl_EffectParameterReader_Param_VendorExtension(
+ ::android::effect::utils::EffectParamReader& param);
+ConversionResult<::aidl::android::hardware::audio::effect::VendorExtension>
+legacy2aidl_EffectParameterReader_Data_VendorExtension(
+ ::android::effect::utils::EffectParamReader& param);
+
+ConversionResult<::android::status_t> aidl2legacy_VendorExtension_EffectParameterWriter_Data(
+ ::android::effect::utils::EffectParamWriter& param,
+ ::aidl::android::hardware::audio::effect::VendorExtension ext);
+ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
+legacy2aidl_EffectParameterReader_ParameterExtension(
+ ::android::effect::utils::EffectParamReader& param);
+
} // namespace android
} // namespace aidl
diff --git a/media/audioaidlconversion/include/media/AidlConversionNdk.h b/media/audioaidlconversion/include/media/AidlConversionNdk.h
index 98a7d41..e92f1a9 100644
--- a/media/audioaidlconversion/include/media/AidlConversionNdk.h
+++ b/media/audioaidlconversion/include/media/AidlConversionNdk.h
@@ -16,17 +16,20 @@
#pragma once
-#include <android/binder_auto_utils.h>
-#include <android/binder_manager.h>
-#include <android/binder_process.h>
-
/**
- * Can only handle conversion between AIDL (NDK backend) and legacy type.
+ * Can only handle conversion between AIDL (NDK backend) and legacy types.
*/
+
+#include <string>
+#include <vector>
+
#include <hardware/audio_effect.h>
-#include <media/AidlConversionUtil.h>
#include <system/audio_effect.h>
+
+#include <aidl/android/hardware/audio/common/PlaybackTrackMetadata.h>
+#include <aidl/android/hardware/audio/common/RecordTrackMetadata.h>
#include <aidl/android/media/audio/common/AudioConfig.h>
+#include <media/AidlConversionUtil.h>
namespace aidl {
namespace android {
@@ -36,5 +39,22 @@
ConversionResult<media::audio::common::AudioConfigBase> legacy2aidl_buffer_config_t_AudioConfigBase(
const buffer_config_t& legacy, bool isInput);
+::android::status_t aidl2legacy_AudioAttributesTags(
+ const std::vector<std::string>& aidl, char* legacy);
+ConversionResult<std::vector<std::string>> legacy2aidl_AudioAttributesTags(const char* legacy);
+
+ConversionResult<playback_track_metadata_v7>
+aidl2legacy_PlaybackTrackMetadata_playback_track_metadata_v7(
+ const hardware::audio::common::PlaybackTrackMetadata& aidl);
+ConversionResult<hardware::audio::common::PlaybackTrackMetadata>
+legacy2aidl_playback_track_metadata_v7_PlaybackTrackMetadata(
+ const playback_track_metadata_v7& legacy);
+
+ConversionResult<record_track_metadata_v7>
+aidl2legacy_RecordTrackMetadata_record_track_metadata_v7(
+ const hardware::audio::common::RecordTrackMetadata& aidl);
+ConversionResult<hardware::audio::common::RecordTrackMetadata>
+legacy2aidl_record_track_metadata_v7_RecordTrackMetadata(const record_track_metadata_v7& legacy);
+
} // namespace android
} // namespace aidl
diff --git a/media/audioaidlconversion/tests/Android.bp b/media/audioaidlconversion/tests/Android.bp
new file mode 100644
index 0000000..de7c8a2
--- /dev/null
+++ b/media/audioaidlconversion/tests/Android.bp
@@ -0,0 +1,46 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_av_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_defaults {
+ name: "libaudio_aidl_conversion_tests_defaults",
+ test_suites: ["device-tests"],
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+ sanitize: {
+ misc_undefined: [
+ "unsigned-integer-overflow",
+ "signed-integer-overflow",
+ ],
+ },
+}
+
+cc_test {
+ name: "audio_aidl_ndk_conversion_tests",
+
+ defaults: [
+ "latest_android_media_audio_common_types_ndk_static",
+ "latest_android_hardware_audio_common_ndk_static",
+ "libaudio_aidl_conversion_tests_defaults",
+ ],
+ srcs: ["audio_aidl_ndk_conversion_tests.cpp"],
+ shared_libs: [
+ "libbinder",
+ "libcutils",
+ "liblog",
+ "libutils",
+ ],
+ static_libs: [
+ "libaudio_aidl_conversion_common_ndk",
+ ],
+ cflags: [
+ "-DBACKEND_NDK",
+ ],
+}
diff --git a/media/audioaidlconversion/tests/audio_aidl_ndk_conversion_tests.cpp b/media/audioaidlconversion/tests/audio_aidl_ndk_conversion_tests.cpp
new file mode 100644
index 0000000..c505e60
--- /dev/null
+++ b/media/audioaidlconversion/tests/audio_aidl_ndk_conversion_tests.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <iostream>
+#include <type_traits>
+
+#include <gtest/gtest.h>
+
+#include <media/AidlConversionNdk.h>
+
+namespace {
+template<typename> struct mf_traits {};
+template<class T, class U> struct mf_traits<U T::*> {
+ using member_type = U;
+};
+} // namespace
+
+// Provide value printers for types generated from AIDL
+// They need to be in the same namespace as the types we intend to print
+namespace aidl::android::hardware::audio::common {
+ template <typename P>
+ std::enable_if_t<std::is_function_v<typename mf_traits<decltype(&P::toString)>::member_type>,
+ std::ostream&> operator<<(std::ostream& os, const P& p) {
+ return os << p.toString();
+ }
+ template <typename E>
+ std::enable_if_t<std::is_enum_v<E>, std::ostream&> operator<<(std::ostream& os, const E& e) {
+ return os << toString(e);
+ }
+} // namespace aidl::android::hardware::audio::common
+
+using aidl::android::hardware::audio::common::PlaybackTrackMetadata;
+using aidl::android::hardware::audio::common::RecordTrackMetadata;
+using aidl::android::media::audio::common::AudioSource;
+using aidl::android::media::audio::common::AudioUsage;
+using namespace aidl::android; // for conversion functions
+
+TEST(AudioPlaybackTrackMetadata, Aidl2Legacy2Aidl) {
+ const PlaybackTrackMetadata initial{ .usage = AudioUsage::UNKNOWN };
+ auto conv = aidl2legacy_PlaybackTrackMetadata_playback_track_metadata_v7(initial);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_playback_track_metadata_v7_PlaybackTrackMetadata(conv.value());
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial, convBack.value());
+}
+
+TEST(AudioPlaybackTrackMetadata, NonVendorTags) {
+ PlaybackTrackMetadata initial{ .usage = AudioUsage::UNKNOWN };
+ initial.tags.emplace_back("random string"); // Must be filtered out.
+ initial.tags.emplace_back("VX_GOOGLE_42");
+ auto conv = aidl2legacy_PlaybackTrackMetadata_playback_track_metadata_v7(initial);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_playback_track_metadata_v7_PlaybackTrackMetadata(conv.value());
+ ASSERT_TRUE(convBack.ok());
+ ASSERT_EQ(1, convBack.value().tags.size());
+ EXPECT_EQ(initial.tags[1], convBack.value().tags[0]);
+}
+
+TEST(AudioRecordTrackMetadata, Aidl2Legacy2Aidl) {
+ const RecordTrackMetadata initial{ .source = AudioSource::DEFAULT };
+ auto conv = aidl2legacy_RecordTrackMetadata_record_track_metadata_v7(initial);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_record_track_metadata_v7_RecordTrackMetadata(conv.value());
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial, convBack.value());
+}
+
+TEST(AudioRecordTrackMetadata, NonVendorTags) {
+ RecordTrackMetadata initial{ .source = AudioSource::DEFAULT };
+ initial.tags.emplace_back("random string"); // Must be filtered out.
+ initial.tags.emplace_back("VX_GOOGLE_42");
+ auto conv = aidl2legacy_RecordTrackMetadata_record_track_metadata_v7(initial);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_record_track_metadata_v7_RecordTrackMetadata(conv.value());
+ ASSERT_TRUE(convBack.ok());
+ ASSERT_EQ(1, convBack.value().tags.size());
+ EXPECT_EQ(initial.tags[1], convBack.value().tags[0]);
+}
diff --git a/media/codec2/TEST_MAPPING b/media/codec2/TEST_MAPPING
index 90bb054..8a894f3 100644
--- a/media/codec2/TEST_MAPPING
+++ b/media/codec2/TEST_MAPPING
@@ -8,17 +8,6 @@
],
"presubmit-large": [
{
- "name": "CtsMediaMiscTestCases",
- "options": [
- {
- "include-annotation": "android.platform.test.annotations.Presubmit"
- },
- {
- "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
- }
- ]
- },
- {
"name": "CtsMediaAudioTestCases",
"options": [
{
@@ -35,50 +24,6 @@
"exclude-filter": "android.media.audio.cts.AudioRecordTest"
}
]
- },
- {
- "name": "CtsMediaDecoderTestCases",
- "options": [
- {
- "include-annotation": "android.platform.test.annotations.Presubmit"
- },
- {
- "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
- }
- ]
- },
- {
- "name": "CtsMediaEncoderTestCases",
- "options": [
- {
- "include-annotation": "android.platform.test.annotations.Presubmit"
- },
- {
- "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
- }
- ]
- },
- {
- "name": "CtsMediaCodecTestCases",
- "options": [
- {
- "include-annotation": "android.platform.test.annotations.Presubmit"
- },
- {
- "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
- }
- ]
- },
- {
- "name": "CtsMediaPlayerTestCases",
- "options": [
- {
- "include-annotation": "android.platform.test.annotations.Presubmit"
- },
- {
- "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
- }
- ]
}
]
}
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index 6ff3dbc..417b261 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -2503,7 +2503,8 @@
* Note: This parameter allows a decoder to ignore the video peek machinery and
* to revert to its preferred behavior.
*/
-typedef C2StreamParam<C2Tuning, C2EasyEnum<C2PlatformConfig::tunnel_peek_mode_t>,
+typedef C2StreamParam<C2Tuning,
+ C2SimpleValueStruct<C2EasyEnum<C2PlatformConfig::tunnel_peek_mode_t>>,
kParamIndexTunnelPeekMode> C2StreamTunnelPeekModeTuning;
constexpr char C2_PARAMKEY_TUNNEL_PEEK_MODE[] =
"output.tunnel-peek-mode";
diff --git a/media/codec2/vndk/internal/C2BlockInternal.h b/media/codec2/vndk/internal/C2BlockInternal.h
index c510fca..6bcad4a 100644
--- a/media/codec2/vndk/internal/C2BlockInternal.h
+++ b/media/codec2/vndk/internal/C2BlockInternal.h
@@ -238,7 +238,7 @@
* - Local migration on blockpool side will be done automatically by
* blockpool.
* - Before attachBuffer(), BeginAttachBlockToBufferQueue() should be called
- * to test eligiblity.
+ * to test eligibility.
* - After attachBuffer() is called, EndAttachBlockToBufferQueue() should
* be called. This will set "held" status to true. If it returned
* false, cancelBuffer() should be called.
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 3a5ba78..b32667e 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -724,11 +724,6 @@
aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(aidl));
legacy.hw_module = VALUE_OR_RETURN(
aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
- legacy.encapsulation_modes = VALUE_OR_RETURN(
- aidl2legacy_AudioEncapsulationMode_mask(aidlSys.encapsulationModes));
- legacy.encapsulation_metadata_types = VALUE_OR_RETURN(
- aidl2legacy_AudioEncapsulationMetadataType_mask(
- aidlSys.encapsulationMetadataTypes));
return legacy;
}
@@ -738,10 +733,6 @@
*aidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(legacy));
aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidlDeviceExt->encapsulationModes = VALUE_OR_RETURN_STATUS(
- legacy2aidl_AudioEncapsulationMode_mask(legacy.encapsulation_modes));
- aidlDeviceExt->encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
- legacy2aidl_AudioEncapsulationMetadataType_mask(legacy.encapsulation_metadata_types));
return OK;
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl b/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
index 0f5a9b6..24ec230 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortDeviceExtSys.aidl
@@ -22,8 +22,4 @@
parcelable AudioPortDeviceExtSys {
/** Module the device is attached to. Interpreted as audio_module_handle_t. */
int hwModule;
- /** Bitmask, indexed by AudioEncapsulationMode. */
- int encapsulationModes;
- /** Bitmask, indexed by AudioEncapsulationMetadataType. */
- int encapsulationMetadataTypes;
}
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index 4185b5f..9ffc75b 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -186,12 +186,12 @@
deviceExt.encodedFormats = VALUE_OR_RETURN_STATUS(
convertContainer<std::vector<media::audio::common::AudioFormatDescription>>(
mEncodedFormats, legacy2aidl_audio_format_t_AudioFormatDescription));
+ deviceExt.encapsulationModes = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_AudioEncapsulationMode_mask(mEncapsulationModes));
+ deviceExt.encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_AudioEncapsulationMetadataType_mask(mEncapsulationMetadataTypes));
UNION_SET(parcelable->hal.ext, device, deviceExt);
media::AudioPortDeviceExtSys deviceSys;
- deviceSys.encapsulationModes = VALUE_OR_RETURN_STATUS(
- legacy2aidl_AudioEncapsulationMode_mask(mEncapsulationModes));
- deviceSys.encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
- legacy2aidl_AudioEncapsulationMetadataType_mask(mEncapsulationMetadataTypes));
UNION_SET(parcelable->sys.ext, device, deviceSys);
return OK;
}
@@ -214,12 +214,12 @@
mEncodedFormats = VALUE_OR_RETURN_STATUS(
convertContainer<FormatVector>(deviceExt.encodedFormats,
aidl2legacy_AudioFormatDescription_audio_format_t));
+ mEncapsulationModes = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioEncapsulationMode_mask(deviceExt.encapsulationModes));
+ mEncapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioEncapsulationMetadataType_mask(deviceExt.encapsulationMetadataTypes));
media::AudioPortDeviceExtSys deviceSys = VALUE_OR_RETURN_STATUS(
UNION_GET(parcelable.sys.ext, device));
- mEncapsulationModes = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioEncapsulationMode_mask(deviceSys.encapsulationModes));
- mEncapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioEncapsulationMetadataType_mask(deviceSys.encapsulationMetadataTypes));
return OK;
}
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index 3f19219..aea31a0 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -256,6 +256,7 @@
"EffectBufferHalAidl.cpp",
"EffectHalAidl.cpp",
"effectsAidlConversion/AidlConversionAec.cpp",
+ "effectsAidlConversion/AidlConversionAgc1.cpp",
"effectsAidlConversion/AidlConversionAgc2.cpp",
"effectsAidlConversion/AidlConversionBassBoost.cpp",
"effectsAidlConversion/AidlConversionDownmix.cpp",
diff --git a/media/libaudiohal/impl/DeviceHalAidl.cpp b/media/libaudiohal/impl/DeviceHalAidl.cpp
index 32ebe36..7d5c3b8 100644
--- a/media/libaudiohal/impl/DeviceHalAidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalAidl.cpp
@@ -45,18 +45,20 @@
using aidl::android::media::audio::common::AudioPort;
using aidl::android::media::audio::common::AudioPortConfig;
using aidl::android::media::audio::common::AudioPortDeviceExt;
+using aidl::android::media::audio::common::AudioPortMixExt;
+using aidl::android::media::audio::common::AudioPortMixExtUseCase;
using aidl::android::media::audio::common::AudioPortExt;
using aidl::android::media::audio::common::AudioSource;
using aidl::android::media::audio::common::Int;
using aidl::android::media::audio::common::Float;
+using aidl::android::hardware::audio::common::getFrameSizeInBytes;
+using aidl::android::hardware::audio::common::isBitPositionFlagSet;
+using aidl::android::hardware::audio::common::makeBitPositionFlagMask;
using aidl::android::hardware::audio::common::RecordTrackMetadata;
using aidl::android::hardware::audio::core::AudioPatch;
using aidl::android::hardware::audio::core::IModule;
using aidl::android::hardware::audio::core::ITelephony;
using aidl::android::hardware::audio::core::StreamDescriptor;
-using android::hardware::audio::common::getFrameSizeInBytes;
-using android::hardware::audio::common::isBitPositionFlagSet;
-using android::hardware::audio::common::makeBitPositionFlagMask;
namespace android {
@@ -248,13 +250,14 @@
::aidl::android::legacy2aidl_audio_config_t_AudioConfig(*config, true /*isInput*/));
AudioDevice aidlDevice;
aidlDevice.type.type = AudioDeviceType::IN_DEFAULT;
+ AudioSource aidlSource = AudioSource::DEFAULT;
AudioIoFlags aidlFlags = AudioIoFlags::make<AudioIoFlags::Tag::input>(0);
AudioPortConfig mixPortConfig;
Cleanups cleanups;
audio_config writableConfig = *config;
int32_t nominalLatency;
- RETURN_STATUS_IF_ERROR(prepareToOpenStream(0 /*handle*/, aidlDevice, aidlFlags, &writableConfig,
- &cleanups, &aidlConfig, &mixPortConfig, &nominalLatency));
+ RETURN_STATUS_IF_ERROR(prepareToOpenStream(0 /*handle*/, aidlDevice, aidlFlags, aidlSource,
+ &writableConfig, &cleanups, &aidlConfig, &mixPortConfig, &nominalLatency));
*size = aidlConfig.frameCount *
getFrameSizeInBytes(aidlConfig.base.format, aidlConfig.base.channelMask);
// Do not disarm cleanups to release temporary port configs.
@@ -263,7 +266,7 @@
status_t DeviceHalAidl::prepareToOpenStream(
int32_t aidlHandle, const AudioDevice& aidlDevice, const AudioIoFlags& aidlFlags,
- struct audio_config* config,
+ AudioSource aidlSource, struct audio_config* config,
Cleanups* cleanups, AudioConfig* aidlConfig, AudioPortConfig* mixPortConfig,
int32_t* nominalLatency) {
const bool isInput = aidlFlags.getTag() == AudioIoFlags::Tag::input;
@@ -275,7 +278,7 @@
if (created) {
cleanups->emplace_front(this, &DeviceHalAidl::resetPortConfig, devicePortConfig.id);
}
- RETURN_STATUS_IF_ERROR(findOrCreatePortConfig(*aidlConfig, aidlFlags, aidlHandle,
+ RETURN_STATUS_IF_ERROR(findOrCreatePortConfig(*aidlConfig, aidlFlags, aidlHandle, aidlSource,
mixPortConfig, &created));
if (created) {
cleanups->emplace_front(this, &DeviceHalAidl::resetPortConfig, mixPortConfig->id);
@@ -441,8 +444,9 @@
AudioPortConfig mixPortConfig;
Cleanups cleanups;
int32_t nominalLatency;
- RETURN_STATUS_IF_ERROR(prepareToOpenStream(aidlHandle, aidlDevice, aidlFlags, config,
- &cleanups, &aidlConfig, &mixPortConfig, &nominalLatency));
+ RETURN_STATUS_IF_ERROR(prepareToOpenStream(aidlHandle, aidlDevice, aidlFlags,
+ AudioSource::SYS_RESERVED_INVALID /*only needed for input*/,
+ config, &cleanups, &aidlConfig, &mixPortConfig, &nominalLatency));
::aidl::android::hardware::audio::core::IModule::OpenOutputStreamArguments args;
args.portConfigId = mixPortConfig.id;
const bool isOffload = isBitPositionFlagSet(
@@ -460,7 +464,7 @@
args.eventCallback = eventCb;
::aidl::android::hardware::audio::core::IModule::OpenOutputStreamReturn ret;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->openOutputStream(args, &ret)));
- StreamContextAidl context(ret.desc);
+ StreamContextAidl context(ret.desc, isOffload);
if (!context.isValid()) {
ALOGE("%s: Failed to created a valid stream context from the descriptor: %s",
__func__, ret.desc.toString().c_str());
@@ -505,8 +509,8 @@
AudioPortConfig mixPortConfig;
Cleanups cleanups;
int32_t nominalLatency;
- RETURN_STATUS_IF_ERROR(prepareToOpenStream(aidlHandle, aidlDevice, aidlFlags, config,
- &cleanups, &aidlConfig, &mixPortConfig, &nominalLatency));
+ RETURN_STATUS_IF_ERROR(prepareToOpenStream(aidlHandle, aidlDevice, aidlFlags, aidlSource,
+ config, &cleanups, &aidlConfig, &mixPortConfig, &nominalLatency));
::aidl::android::hardware::audio::core::IModule::OpenInputStreamArguments args;
args.portConfigId = mixPortConfig.id;
RecordTrackMetadata aidlTrackMetadata{
@@ -520,7 +524,7 @@
args.bufferSizeFrames = aidlConfig.frameCount;
::aidl::android::hardware::audio::core::IModule::OpenInputStreamReturn ret;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->openInputStream(args, &ret)));
- StreamContextAidl context(ret.desc);
+ StreamContextAidl context(ret.desc, false /*isAsynchronous*/);
if (!context.isValid()) {
ALOGE("%s: Failed to created a valid stream context from the descriptor: %s",
__func__, ret.desc.toString().c_str());
@@ -549,8 +553,19 @@
sources == nullptr || sinks == nullptr || patch == nullptr) {
return BAD_VALUE;
}
- // Note that the patch handle (*patch) is provided by the framework.
- // In tests it's possible that its value is AUDIO_PATCH_HANDLE_NONE.
+ // When the patch handle (*patch) is AUDIO_PATCH_HANDLE_NONE, it means
+ // the framework wants to create a new patch. The handle has to be generated
+ // by the HAL. Since handles generated this way can only be unique within
+ // a HAL module, the framework generates a globally unique handle, and maps
+ // it on the <HAL module, patch handle> pair.
+ // When the patch handle is set, it meant the framework intends to update
+ // an existing patch.
+ //
+ // This behavior corresponds to HAL module behavior, with the only difference
+ // that the HAL module uses `int32_t` for patch IDs. The following assert ensures
+ // that both the framework and the HAL use the same value for "no ID":
+ static_assert(AUDIO_PATCH_HANDLE_NONE == 0);
+ int32_t halPatchId = static_cast<int32_t>(*patch);
// Upon conversion, mix port configs contain audio configuration, while
// device port configs contain device address. This data is used to find
@@ -573,17 +588,12 @@
sinks[i], isInput, 0)));
}
Cleanups cleanups;
- auto existingPatchIt = mPatches.end();
- auto fwkHandlesIt = *patch != AUDIO_PATCH_HANDLE_NONE ?
- mFwkHandles.find(*patch) : mFwkHandles.end();
+ auto existingPatchIt = halPatchId != 0 ? mPatches.find(halPatchId): mPatches.end();
AudioPatch aidlPatch;
- if (fwkHandlesIt != mFwkHandles.end()) {
- existingPatchIt = mPatches.find(fwkHandlesIt->second);
- if (existingPatchIt != mPatches.end()) {
- aidlPatch = existingPatchIt->second;
- aidlPatch.sourcePortConfigIds.clear();
- aidlPatch.sinkPortConfigIds.clear();
- }
+ if (existingPatchIt != mPatches.end()) {
+ aidlPatch = existingPatchIt->second;
+ aidlPatch.sourcePortConfigIds.clear();
+ aidlPatch.sinkPortConfigIds.clear();
}
ALOGD("%s: sources: %s, sinks: %s",
__func__, ::android::internal::ToString(aidlSources).c_str(),
@@ -611,20 +621,8 @@
bool created = false;
RETURN_STATUS_IF_ERROR(findOrCreatePatch(aidlPatch, &aidlPatch, &created));
// Since no cleanup of the patch is needed, 'created' is ignored.
- if (fwkHandlesIt != mFwkHandles.end()) {
- fwkHandlesIt->second = aidlPatch.id;
- // Patch handle (*patch) stays the same.
- } else {
- if (*patch == AUDIO_PATCH_HANDLE_NONE) {
- // This isn't good as the module can't provide a handle which is really unique.
- // However, this situation should only happen in tests.
- *patch = aidlPatch.id;
- LOG_ALWAYS_FATAL_IF(mFwkHandles.count(*patch) > 0,
- "%s: patch id %d clashes with another framework patch handle",
- __func__, *patch);
- }
- mFwkHandles.emplace(*patch, aidlPatch.id);
- }
+ halPatchId = aidlPatch.id;
+ *patch = static_cast<audio_patch_handle_t>(halPatchId);
}
cleanups.disarmAll();
return OK;
@@ -634,12 +632,18 @@
ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
TIME_CHECK();
if (!mModule) return NO_INIT;
- auto idMapIt = mFwkHandles.find(patch);
- if (idMapIt == mFwkHandles.end()) {
+ static_assert(AUDIO_PATCH_HANDLE_NONE == 0);
+ if (patch == AUDIO_PATCH_HANDLE_NONE) {
return BAD_VALUE;
}
- RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->resetAudioPatch(idMapIt->second)));
- mFwkHandles.erase(idMapIt);
+ int32_t halPatchId = static_cast<int32_t>(patch);
+ auto patchIt = mPatches.find(halPatchId);
+ if (patchIt == mPatches.end()) {
+ ALOGE("%s: patch with id %d not found", __func__, halPatchId);
+ return BAD_VALUE;
+ }
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->resetAudioPatch(halPatchId)));
+ mPatches.erase(patchIt);
return OK;
}
@@ -744,22 +748,27 @@
return p.ext.get<AudioPortExt::Tag::device>().device == device;
}
-status_t DeviceHalAidl::createPortConfig(const AudioPortConfig& requestedPortConfig,
- AudioPortConfig* appliedPortConfig) {
+status_t DeviceHalAidl::createPortConfig(
+ const AudioPortConfig& requestedPortConfig, PortConfigs::iterator* result) {
TIME_CHECK();
+ AudioPortConfig appliedPortConfig;
bool applied = false;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->setAudioPortConfig(
- requestedPortConfig, appliedPortConfig, &applied)));
+ requestedPortConfig, &appliedPortConfig, &applied)));
if (!applied) {
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->setAudioPortConfig(
- *appliedPortConfig, appliedPortConfig, &applied)));
+ appliedPortConfig, &appliedPortConfig, &applied)));
if (!applied) {
ALOGE("%s: module %s did not apply suggested config %s",
- __func__, mInstance.c_str(), appliedPortConfig->toString().c_str());
+ __func__, mInstance.c_str(), appliedPortConfig.toString().c_str());
return NO_INIT;
}
}
- mPortConfigs.emplace(appliedPortConfig->id, *appliedPortConfig);
+ auto id = appliedPortConfig.id;
+ auto [it, inserted] = mPortConfigs.emplace(std::move(id), std::move(appliedPortConfig));
+ LOG_ALWAYS_FATAL_IF(!inserted, "%s: port config with id %d already exists",
+ __func__, it->first);
+ *result = it;
return OK;
}
@@ -806,10 +815,7 @@
}
AudioPortConfig requestedPortConfig;
requestedPortConfig.portId = portsIt->first;
- AudioPortConfig appliedPortConfig;
- RETURN_STATUS_IF_ERROR(createPortConfig(requestedPortConfig, &appliedPortConfig));
- portConfigIt = mPortConfigs.insert(
- mPortConfigs.end(), std::make_pair(appliedPortConfig.id, appliedPortConfig));
+ RETURN_STATUS_IF_ERROR(createPortConfig(requestedPortConfig, &portConfigIt));
*created = true;
} else {
*created = false;
@@ -820,24 +826,46 @@
status_t DeviceHalAidl::findOrCreatePortConfig(
const AudioConfig& config, const std::optional<AudioIoFlags>& flags, int32_t ioHandle,
- AudioPortConfig* portConfig, bool* created) {
+ AudioSource source, AudioPortConfig* portConfig, bool* created) {
+ // These flags get removed one by one in this order when retrying port finding.
+ static const std::vector<AudioInputFlags> kOptionalInputFlags{
+ AudioInputFlags::FAST, AudioInputFlags::RAW };
auto portConfigIt = findPortConfig(config, flags, ioHandle);
if (portConfigIt == mPortConfigs.end() && flags.has_value()) {
- auto portsIt = findPort(config, flags.value());
+ auto optionalInputFlagsIt = kOptionalInputFlags.begin();
+ AudioIoFlags matchFlags = flags.value();
+ auto portsIt = findPort(config, matchFlags);
+ while (portsIt == mPorts.end() && matchFlags.getTag() == AudioIoFlags::Tag::input
+ && optionalInputFlagsIt != kOptionalInputFlags.end()) {
+ if (!isBitPositionFlagSet(
+ matchFlags.get<AudioIoFlags::Tag::input>(), *optionalInputFlagsIt)) {
+ ++optionalInputFlagsIt;
+ continue;
+ }
+ matchFlags.set<AudioIoFlags::Tag::input>(matchFlags.get<AudioIoFlags::Tag::input>() &
+ ~makeBitPositionFlagMask(*optionalInputFlagsIt++));
+ portsIt = findPort(config, matchFlags);
+ ALOGI("%s: mix port for config %s, flags %s was not found in the module %s, "
+ "retried with flags %s", __func__, config.toString().c_str(),
+ flags.value().toString().c_str(), mInstance.c_str(),
+ matchFlags.toString().c_str());
+ }
if (portsIt == mPorts.end()) {
ALOGE("%s: mix port for config %s, flags %s is not found in the module %s",
- __func__, config.toString().c_str(), flags.value().toString().c_str(),
+ __func__, config.toString().c_str(), matchFlags.toString().c_str(),
mInstance.c_str());
return BAD_VALUE;
}
AudioPortConfig requestedPortConfig;
requestedPortConfig.portId = portsIt->first;
setPortConfigFromConfig(&requestedPortConfig, config);
- AudioPortConfig appliedPortConfig;
- RETURN_STATUS_IF_ERROR(createPortConfig(requestedPortConfig, &appliedPortConfig));
- appliedPortConfig.ext.get<AudioPortExt::Tag::mix>().handle = ioHandle;
- portConfigIt = mPortConfigs.insert(
- mPortConfigs.end(), std::make_pair(appliedPortConfig.id, appliedPortConfig));
+ requestedPortConfig.ext = AudioPortMixExt{ .handle = ioHandle };
+ if (matchFlags.getTag() == AudioIoFlags::Tag::input
+ && source != AudioSource::SYS_RESERVED_INVALID) {
+ requestedPortConfig.ext.get<AudioPortExt::Tag::mix>().usecase =
+ AudioPortMixExtUseCase::make<AudioPortMixExtUseCase::Tag::source>(source);
+ }
+ RETURN_STATUS_IF_ERROR(createPortConfig(requestedPortConfig, &portConfigIt));
*created = true;
} else if (!flags.has_value()) {
ALOGW("%s: mix port config for %s, handle %d not found in the module %s, "
@@ -864,8 +892,12 @@
}
AudioConfig config;
setConfigFromPortConfig(&config, requestedPortConfig);
+ AudioSource source = requestedPortConfig.ext.get<Tag::mix>().usecase.getTag() ==
+ AudioPortMixExtUseCase::Tag::source ?
+ requestedPortConfig.ext.get<Tag::mix>().usecase.
+ get<AudioPortMixExtUseCase::Tag::source>() : AudioSource::SYS_RESERVED_INVALID;
return findOrCreatePortConfig(config, requestedPortConfig.flags,
- requestedPortConfig.ext.get<Tag::mix>().handle, portConfig, created);
+ requestedPortConfig.ext.get<Tag::mix>().handle, source, portConfig, created);
} else if (requestedPortConfig.ext.getTag() == Tag::device) {
return findOrCreatePortConfig(
requestedPortConfig.ext.get<Tag::device>().device, portConfig, created);
@@ -899,12 +931,10 @@
DeviceHalAidl::Ports::iterator DeviceHalAidl::findPort(
const AudioConfig& config, const AudioIoFlags& flags) {
- using Tag = AudioPortExt::Tag;
- AudioIoFlags matchFlags = flags;
auto matcher = [&](const auto& pair) {
const auto& p = pair.second;
- return p.ext.getTag() == Tag::mix &&
- p.flags == matchFlags &&
+ return p.ext.getTag() == AudioPortExt::Tag::mix &&
+ p.flags == flags &&
std::find_if(p.profiles.begin(), p.profiles.end(),
[&](const auto& prof) {
return prof.format == config.base.format &&
@@ -913,15 +943,7 @@
std::find(prof.sampleRates.begin(), prof.sampleRates.end(),
config.base.sampleRate) != prof.sampleRates.end();
}) != p.profiles.end(); };
- auto it = std::find_if(mPorts.begin(), mPorts.end(), matcher);
- if (it == mPorts.end() && flags.getTag() == AudioIoFlags::Tag::input &&
- isBitPositionFlagSet(flags.get<AudioIoFlags::Tag::input>(), AudioInputFlags::FAST)) {
- // "Fast" input is not a mandatory flag, try without it.
- matchFlags.set<AudioIoFlags::Tag::input>(flags.get<AudioIoFlags::Tag::input>() &
- ~makeBitPositionFlagMask(AudioInputFlags::FAST));
- it = std::find_if(mPorts.begin(), mPorts.end(), matcher);
- }
- return it;
+ return std::find_if(mPorts.begin(), mPorts.end(), matcher);
}
DeviceHalAidl::PortConfigs::iterator DeviceHalAidl::findPortConfig(const AudioDevice& device) {
diff --git a/media/libaudiohal/impl/DeviceHalAidl.h b/media/libaudiohal/impl/DeviceHalAidl.h
index 6f16daf..04afa68 100644
--- a/media/libaudiohal/impl/DeviceHalAidl.h
+++ b/media/libaudiohal/impl/DeviceHalAidl.h
@@ -179,7 +179,7 @@
const ::aidl::android::media::audio::common::AudioPortConfig& p);
status_t createPortConfig(
const ::aidl::android::media::audio::common::AudioPortConfig& requestedPortConfig,
- ::aidl::android::media::audio::common::AudioPortConfig* appliedPortConfig);
+ PortConfigs::iterator* result);
status_t findOrCreatePatch(
const std::set<int32_t>& sourcePortConfigIds,
const std::set<int32_t>& sinkPortConfigIds,
@@ -195,6 +195,7 @@
const ::aidl::android::media::audio::common::AudioConfig& config,
const std::optional<::aidl::android::media::audio::common::AudioIoFlags>& flags,
int32_t ioHandle,
+ ::aidl::android::media::audio::common::AudioSource aidlSource,
::aidl::android::media::audio::common::AudioPortConfig* portConfig, bool* created);
status_t findOrCreatePortConfig(
const ::aidl::android::media::audio::common::AudioPortConfig& requestedPortConfig,
@@ -218,6 +219,7 @@
int32_t aidlHandle,
const ::aidl::android::media::audio::common::AudioDevice& aidlDevice,
const ::aidl::android::media::audio::common::AudioIoFlags& aidlFlags,
+ ::aidl::android::media::audio::common::AudioSource aidlSource,
struct audio_config* config,
Cleanups* cleanups,
::aidl::android::media::audio::common::AudioConfig* aidlConfig,
@@ -248,7 +250,6 @@
int32_t mDefaultOutputPortId = -1;
PortConfigs mPortConfigs;
Patches mPatches;
- std::map<audio_patch_handle_t, int32_t /*patch ID*/> mFwkHandles;
std::mutex mLock;
std::map<void*, Callbacks> mCallbacks GUARDED_BY(mLock);
};
diff --git a/media/libaudiohal/impl/EffectBufferHalAidl.cpp b/media/libaudiohal/impl/EffectBufferHalAidl.cpp
index 5af8e24..a701852 100644
--- a/media/libaudiohal/impl/EffectBufferHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectBufferHalAidl.cpp
@@ -14,26 +14,39 @@
* limitations under the License.
*/
+#include <algorithm>
+#include <cstdint>
+#include <cstring>
+#include <sys/mman.h>
#define LOG_TAG "EffectBufferHalAidl"
//#define LOG_NDEBUG 0
+#include <cutils/ashmem.h>
#include <utils/Log.h>
#include "EffectBufferHalAidl.h"
+using ndk::ScopedFileDescriptor;
+
namespace android {
namespace effect {
// static
status_t EffectBufferHalAidl::allocate(size_t size, sp<EffectBufferHalInterface>* buffer) {
- ALOGE("%s not implemented yet %zu %p", __func__, size, buffer);
return mirror(nullptr, size, buffer);
}
status_t EffectBufferHalAidl::mirror(void* external, size_t size,
sp<EffectBufferHalInterface>* buffer) {
- // buffer->setExternalData(external);
- ALOGW("%s not implemented yet %p %zu %p", __func__, external, size, buffer);
+ sp<EffectBufferHalAidl> tempBuffer = new EffectBufferHalAidl(size);
+ status_t status = tempBuffer.get()->init();
+ if (status != OK) {
+ ALOGE("%s init failed %d", __func__, status);
+ return status;
+ }
+
+ tempBuffer->setExternalData(external);
+ *buffer = tempBuffer;
return OK;
}
@@ -48,7 +61,22 @@
}
status_t EffectBufferHalAidl::init() {
- ALOGW("%s not implemented yet", __func__);
+ int fd = ashmem_create_region("audioEffectAidl", mBufferSize);
+ if (fd < 0) {
+ ALOGE("%s create ashmem failed %d", __func__, fd);
+ return fd;
+ }
+
+ ScopedFileDescriptor tempFd(fd);
+ mAudioBuffer.raw = mmap(nullptr /* address */, mBufferSize /* length */, PROT_READ | PROT_WRITE,
+ MAP_SHARED, fd, 0 /* offset */);
+ if (mAudioBuffer.raw == MAP_FAILED) {
+ ALOGE("mmap failed for fd %d", fd);
+ mAudioBuffer.raw = nullptr;
+ return INVALID_OPERATION;
+ }
+
+ mMemory = {std::move(tempFd), static_cast<int64_t>(mBufferSize)};
return OK;
}
@@ -76,11 +104,26 @@
}
void EffectBufferHalAidl::update() {
- ALOGW("%s not implemented yet", __func__);
+ update(mBufferSize);
}
void EffectBufferHalAidl::commit() {
- ALOGW("%s not implemented yet", __func__);
+ commit(mBufferSize);
+}
+
+void EffectBufferHalAidl::copy(void* dst, const void* src, size_t n) const {
+ if (!dst || !src) {
+ return;
+ }
+ std::memcpy(dst, src, std::min(n, mBufferSize));
+}
+
+void EffectBufferHalAidl::update(size_t n) {
+ copy(mAudioBuffer.raw, mExternalData, n);
+}
+
+void EffectBufferHalAidl::commit(size_t n) {
+ copy(mExternalData, mAudioBuffer.raw, n);
}
} // namespace effect
diff --git a/media/libaudiohal/impl/EffectBufferHalAidl.h b/media/libaudiohal/impl/EffectBufferHalAidl.h
index f488708..035314b 100644
--- a/media/libaudiohal/impl/EffectBufferHalAidl.h
+++ b/media/libaudiohal/impl/EffectBufferHalAidl.h
@@ -16,6 +16,8 @@
#pragma once
+#include <aidl/android/hardware/common/Ashmem.h>
+
#include <media/audiohal/EffectBufferHalInterface.h>
#include <system/audio_effect.h>
@@ -44,16 +46,18 @@
private:
friend class EffectBufferHalInterface;
+ // buffer size in bytes
const size_t mBufferSize;
bool mFrameCountChanged;
void* mExternalData;
+ aidl::android::hardware::common::Ashmem mMemory;
audio_buffer_t mAudioBuffer;
// Can not be constructed directly by clients.
explicit EffectBufferHalAidl(size_t size);
~EffectBufferHalAidl();
-
+ void copy(void* dst, const void* src, size_t n) const;
status_t init();
};
diff --git a/media/libaudiohal/impl/EffectConversionHelperAidl.cpp b/media/libaudiohal/impl/EffectConversionHelperAidl.cpp
index 7e25b04..519b871 100644
--- a/media/libaudiohal/impl/EffectConversionHelperAidl.cpp
+++ b/media/libaudiohal/impl/EffectConversionHelperAidl.cpp
@@ -24,6 +24,7 @@
#include <media/AidlConversionCppNdk.h>
#include <media/AidlConversionNdk.h>
#include <media/AidlConversionEffect.h>
+#include <system/audio_effects/effect_visualizer.h>
#include <utils/Log.h>
@@ -35,6 +36,7 @@
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::CommandId;
using ::aidl::android::hardware::audio::effect::Descriptor;
+using ::aidl::android::hardware::audio::effect::Flags;
using ::aidl::android::hardware::audio::effect::Parameter;
using ::aidl::android::media::audio::common::AudioDeviceDescription;
using ::aidl::android::media::audio::common::AudioMode;
@@ -59,12 +61,18 @@
{EFFECT_CMD_SET_INPUT_DEVICE, &EffectConversionHelperAidl::handleSetDevice},
{EFFECT_CMD_SET_VOLUME, &EffectConversionHelperAidl::handleSetVolume},
{EFFECT_CMD_OFFLOAD, &EffectConversionHelperAidl::handleSetOffload},
- {EFFECT_CMD_FIRST_PROPRIETARY, &EffectConversionHelperAidl::handleFirstPriority}};
+ // Only visualizer support these commands, reuse of EFFECT_CMD_FIRST_PROPRIETARY
+ {VISUALIZER_CMD_CAPTURE, &EffectConversionHelperAidl::handleVisualizerCapture},
+ {VISUALIZER_CMD_MEASURE, &EffectConversionHelperAidl::handleVisualizerMeasure}};
EffectConversionHelperAidl::EffectConversionHelperAidl(
std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
int32_t sessionId, int32_t ioId, const Descriptor& desc)
- : mSessionId(sessionId), mIoId(ioId), mDesc(desc), mEffect(std::move(effect)) {
+ : mSessionId(sessionId),
+ mIoId(ioId),
+ mDesc(desc),
+ mEffect(std::move(effect)),
+ mIsInputStream(mDesc.common.flags.type == Flags::Type::PRE_PROC) {
mCommon.session = sessionId;
mCommon.ioHandle = ioId;
mCommon.input = mCommon.output = kDefaultAudioConfig;
@@ -136,16 +144,32 @@
return ret;
}
-status_t EffectConversionHelperAidl::handleSetConfig(uint32_t cmdSize,
- const void* pCmdData __unused,
+status_t EffectConversionHelperAidl::handleSetConfig(uint32_t cmdSize, const void* pCmdData,
uint32_t* replySize, void* pReplyData) {
if (!replySize || *replySize != sizeof(int) || !pReplyData ||
cmdSize != sizeof(effect_config_t)) {
+ ALOGE("%s parameter invalid %u %p %p %p", __func__, cmdSize, pCmdData, replySize,
+ pReplyData);
return BAD_VALUE;
}
- // TODO: need to implement setConfig with setParameter(common)
- return *static_cast<int32_t*>(pReplyData) = OK;
+ effect_config_t* config = (effect_config_t*)pCmdData;
+ Parameter::Common aidlCommon = {
+ .session = mSessionId,
+ .ioHandle = mIoId,
+ .input = {.base = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_buffer_config_t_AudioConfigBase(
+ config->inputCfg, mIsInputStream))},
+ .output = {.base = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_buffer_config_t_AudioConfigBase(
+ config->outputCfg, mIsInputStream))}};
+
+ Parameter aidlParam = UNION_MAKE(Parameter, common, aidlCommon);
+
+ status_t ret = statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+ EffectParamWriter writer(*(effect_param_t*)pReplyData);
+ writer.setStatus(ret);
+ return ret;
}
status_t EffectConversionHelperAidl::handleGetConfig(uint32_t cmdSize __unused,
@@ -250,17 +274,17 @@
return *static_cast<int32_t*>(pReplyData) = OK;
}
status_t EffectConversionHelperAidl::handleSetVolume(uint32_t cmdSize, const void* pCmdData,
- uint32_t* replySize, void* pReplyData) {
- if (cmdSize != 2 * sizeof(uint32_t) || !pCmdData || !replySize || !pReplyData) {
- ALOGE("%s parameter invalid %u %p %p %p", __func__, cmdSize, pCmdData, replySize,
- pReplyData);
+ uint32_t* replySize __unused,
+ void* pReplyData __unused) {
+ if (cmdSize != 2 * sizeof(uint32_t) || !pCmdData) {
+ ALOGE("%s parameter invalid %u %p", __func__, cmdSize, pCmdData);
return BAD_VALUE;
}
Parameter::VolumeStereo volume = {.left = (float)(*(uint32_t*)pCmdData) / (1 << 24),
.right = (float)(*(uint32_t*)pCmdData + 1) / (1 << 24)};
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
mEffect->setParameter(Parameter::make<Parameter::volumeStereo>(volume))));
- return *static_cast<int32_t*>(pReplyData) = OK;
+ return OK;
}
status_t EffectConversionHelperAidl::handleSetOffload(uint32_t cmdSize, const void* pCmdData,
@@ -274,16 +298,44 @@
return *static_cast<int32_t*>(pReplyData) = OK;
}
-status_t EffectConversionHelperAidl::handleFirstPriority(uint32_t cmdSize __unused,
- const void* pCmdData __unused,
- uint32_t* replySize, void* pReplyData) {
+status_t EffectConversionHelperAidl::handleVisualizerCapture(uint32_t cmdSize __unused,
+ const void* pCmdData __unused,
+ uint32_t* replySize,
+ void* pReplyData) {
if (!replySize || !pReplyData) {
ALOGE("%s parameter invalid %p %p", __func__, replySize, pReplyData);
return BAD_VALUE;
}
- // TODO to be implemented
- return OK;
+ const auto& uuid = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::aidl2legacy_AudioUuid_audio_uuid_t(mDesc.common.id.type));
+ if (0 != memcmp(&uuid, SL_IID_VISUALIZATION, sizeof(effect_uuid_t))) {
+ ALOGE("%s visualizer command not supported by %s", __func__,
+ mDesc.common.id.toString().c_str());
+ return BAD_VALUE;
+ }
+
+ return visualizerCapture(replySize, pReplyData);
+}
+
+status_t EffectConversionHelperAidl::handleVisualizerMeasure(uint32_t cmdSize __unused,
+ const void* pCmdData __unused,
+ uint32_t* replySize,
+ void* pReplyData) {
+ if (!replySize || !pReplyData) {
+ ALOGE("%s parameter invalid %p %p", __func__, replySize, pReplyData);
+ return BAD_VALUE;
+ }
+
+ const auto& uuid = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::aidl2legacy_AudioUuid_audio_uuid_t(mDesc.common.id.type));
+ if (0 != memcmp(&uuid, SL_IID_VISUALIZATION, sizeof(effect_uuid_t))) {
+ ALOGE("%s visualizer command not supported by %s", __func__,
+ mDesc.common.id.toString().c_str());
+ return BAD_VALUE;
+ }
+
+ return visualizerMeasure(replySize, pReplyData);
}
} // namespace effect
diff --git a/media/libaudiohal/impl/EffectConversionHelperAidl.h b/media/libaudiohal/impl/EffectConversionHelperAidl.h
index 94435c6..54df1b8 100644
--- a/media/libaudiohal/impl/EffectConversionHelperAidl.h
+++ b/media/libaudiohal/impl/EffectConversionHelperAidl.h
@@ -30,12 +30,18 @@
status_t handleCommand(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData, uint32_t* replySize,
void* pReplyData);
virtual ~EffectConversionHelperAidl() {}
+ const ::aidl::android::hardware::audio::effect::IEffect::OpenEffectReturn&
+ getEffectReturnParam() const {
+ return mOpenReturn;
+ }
protected:
const int32_t mSessionId;
const int32_t mIoId;
const ::aidl::android::hardware::audio::effect::Descriptor mDesc;
const std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> mEffect;
+ // whether the effect is instantiated on an input stream
+ const bool mIsInputStream;
::aidl::android::hardware::audio::effect::IEffect::OpenEffectReturn mOpenReturn;
::aidl::android::hardware::audio::effect::Parameter::Common mCommon;
@@ -92,12 +98,20 @@
void* pReplyData);
status_t handleSetOffload(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
void* pReplyData);
- status_t handleFirstPriority(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
- void* pReplyData);
+ status_t handleVisualizerCapture(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleVisualizerMeasure(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
// implemented by conversion of each effect
virtual status_t setParameter(utils::EffectParamReader& param) = 0;
virtual status_t getParameter(utils::EffectParamWriter& param) = 0;
+ virtual status_t visualizerCapture(uint32_t* replySize __unused, void* pReplyData __unused) {
+ return BAD_VALUE;
+ }
+ virtual status_t visualizerMeasure(uint32_t* replySize __unused, void* pReplyData __unused) {
+ return BAD_VALUE;
+ }
};
} // namespace effect
diff --git a/media/libaudiohal/impl/EffectHalAidl.cpp b/media/libaudiohal/impl/EffectHalAidl.cpp
index 8fa301a..3e843c6 100644
--- a/media/libaudiohal/impl/EffectHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectHalAidl.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <cstddef>
#define LOG_TAG "EffectHalAidl"
//#define LOG_NDEBUG 0
@@ -34,6 +35,7 @@
#include <aidl/android/hardware/audio/effect/IEffect.h>
#include "effectsAidlConversion/AidlConversionAec.h"
+#include "effectsAidlConversion/AidlConversionAgc1.h"
#include "effectsAidlConversion/AidlConversionAgc2.h"
#include "effectsAidlConversion/AidlConversionBassBoost.h"
#include "effectsAidlConversion/AidlConversionDownmix.h"
@@ -88,6 +90,9 @@
if (typeUuid == kAcousticEchoCancelerTypeUUID) {
mConversion =
std::make_unique<android::effect::AidlConversionAec>(effect, sessionId, ioId, desc);
+ } else if (typeUuid == kAutomaticGainControl1TypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionAgc1>(effect, sessionId, ioId,
+ desc);
} else if (typeUuid == kAutomaticGainControl2TypeUUID) {
mConversion = std::make_unique<android::effect::AidlConversionAgc2>(effect, sessionId, ioId,
desc);
@@ -136,24 +141,51 @@
}
status_t EffectHalAidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
- if (buffer == nullptr) {
- return BAD_VALUE;
- }
- ALOGW("%s not implemented yet", __func__);
+ mInBuffer = buffer;
return OK;
}
status_t EffectHalAidl::setOutBuffer(const sp<EffectBufferHalInterface>& buffer) {
- if (buffer == nullptr) {
- return BAD_VALUE;
- }
- ALOGW("%s not implemented yet", __func__);
+ mOutBuffer = buffer;
return OK;
}
+
+// write to input FMQ here, wait for statusMQ STATUS_OK, and read from output FMQ
status_t EffectHalAidl::process() {
- ALOGW("%s not implemented yet", __func__);
- // write to input FMQ here, and wait for statusMQ STATUS_OK
+ size_t available = mInputQ->availableToWrite();
+ size_t floatsToWrite = std::min(available, mInBuffer->getSize() / sizeof(float));
+ if (floatsToWrite == 0) {
+ ALOGW("%s not able to write, floats in buffer %zu, space in FMQ %zu", __func__,
+ mInBuffer->getSize() / sizeof(float), available);
+ return INVALID_OPERATION;
+ }
+ if (!mInputQ->write((float*)mInBuffer->ptr(), floatsToWrite)) {
+ ALOGW("%s failed to write %zu into inputQ", __func__, floatsToWrite);
+ return INVALID_OPERATION;
+ }
+
+ IEffect::Status retStatus{};
+ if (!mStatusQ->readBlocking(&retStatus, 1) || retStatus.status != OK ||
+ (size_t)retStatus.fmqConsumed != floatsToWrite || retStatus.fmqProduced == 0) {
+ ALOGW("%s read status failed: %s", __func__, retStatus.toString().c_str());
+ return INVALID_OPERATION;
+ }
+
+ available = mOutputQ->availableToRead();
+ size_t floatsToRead = std::min(available, mOutBuffer->getSize() / sizeof(float));
+ if (floatsToRead == 0) {
+ ALOGW("%s not able to read, buffer space %zu, floats in FMQ %zu", __func__,
+ mOutBuffer->getSize() / sizeof(float), available);
+ return INVALID_OPERATION;
+ }
+ if (!mOutputQ->read((float*)mOutBuffer->ptr(), floatsToRead)) {
+ ALOGW("%s failed to read %zu from outputQ", __func__, floatsToRead);
+ return INVALID_OPERATION;
+ }
+
+ ALOGD("%s %s consumed %zu produced %zu", __func__, mDesc.common.name.c_str(), floatsToWrite,
+ floatsToRead);
return OK;
}
@@ -165,14 +197,32 @@
status_t EffectHalAidl::command(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
uint32_t* replySize, void* pReplyData) {
- return mConversion
- ? mConversion->handleCommand(cmdCode, cmdSize, pCmdData, replySize, pReplyData)
- : INVALID_OPERATION;
+ TIME_CHECK();
+ if (!mConversion) {
+ ALOGE("%s can not handle command %d when conversion not exist", __func__, cmdCode);
+ return INVALID_OPERATION;
+ }
+
+ status_t ret = mConversion->handleCommand(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
+ // update FMQs when effect open successfully
+ if (ret == OK && cmdCode == EFFECT_CMD_INIT) {
+ const auto& retParam = mConversion->getEffectReturnParam();
+ mStatusQ = std::make_unique<StatusMQ>(retParam.statusMQ);
+ mInputQ = std::make_unique<DataMQ>(retParam.inputDataMQ);
+ mOutputQ = std::make_unique<DataMQ>(retParam.outputDataMQ);
+ if (!mStatusQ->isValid() || !mInputQ->isValid() || !mOutputQ->isValid()) {
+ ALOGE("%s return with invalid FMQ", __func__);
+ return NO_INIT;
+ }
+ }
+
+ return ret;
}
status_t EffectHalAidl::getDescriptor(effect_descriptor_t* pDescriptor) {
- ALOGW("%s %p", __func__, pDescriptor);
+ TIME_CHECK();
if (pDescriptor == nullptr) {
+ ALOGE("%s null descriptor pointer", __func__);
return BAD_VALUE;
}
Descriptor aidlDesc;
@@ -184,12 +234,13 @@
}
status_t EffectHalAidl::close() {
+ TIME_CHECK();
return statusTFromBinderStatus(mEffect->close());
}
status_t EffectHalAidl::dump(int fd) {
- ALOGW("%s not implemented yet, fd %d", __func__, fd);
- return OK;
+ TIME_CHECK();
+ return mEffect->dump(fd, nullptr, 0);
}
} // namespace effect
diff --git a/media/libaudiohal/impl/EffectHalAidl.h b/media/libaudiohal/impl/EffectHalAidl.h
index 83b644b..194150d 100644
--- a/media/libaudiohal/impl/EffectHalAidl.h
+++ b/media/libaudiohal/impl/EffectHalAidl.h
@@ -16,11 +16,13 @@
#pragma once
+#include <memory>
+
#include <aidl/android/hardware/audio/effect/IEffect.h>
#include <aidl/android/hardware/audio/effect/IFactory.h>
+#include <fmq/AidlMessageQueue.h>
#include <media/audiohal/EffectHalInterface.h>
#include <system/audio_effect.h>
-#include <memory>
#include "EffectConversionHelperAidl.h"
@@ -29,6 +31,12 @@
class EffectHalAidl : public EffectHalInterface {
public:
+ using StatusMQ = ::android::AidlMessageQueue<
+ ::aidl::android::hardware::audio::effect::IEffect::Status,
+ ::aidl::android::hardware::common::fmq::SynchronizedReadWrite>;
+ using DataMQ = ::android::AidlMessageQueue<
+ float, ::aidl::android::hardware::common::fmq::SynchronizedReadWrite>;
+
// Set the input buffer.
status_t setInBuffer(const sp<EffectBufferHalInterface>& buffer) override;
@@ -63,6 +71,9 @@
return mEffect;
}
+ // for TIME_CHECK
+ const std::string getClassName() const { return "EffectHalAidl"; }
+
private:
friend class sp<EffectHalAidl>;
@@ -73,6 +84,8 @@
const int32_t mIoId;
const ::aidl::android::hardware::audio::effect::Descriptor mDesc;
std::unique_ptr<EffectConversionHelperAidl> mConversion;
+ std::unique_ptr<StatusMQ> mStatusQ;
+ std::unique_ptr<DataMQ> mInputQ, mOutputQ;
sp<EffectBufferHalInterface> mInBuffer, mOutBuffer;
effect_config_t mConfig;
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index ed952a3..7ecdbd2 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -107,13 +107,13 @@
}
status_t EffectHalHidl::process() {
- // TIME_CHECK(); // TODO(b/238654698) reenable only when optimized.
+ // TIME_CHECK(); // TODO(b/243839867) reenable only when optimized.
return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS));
}
status_t EffectHalHidl::processReverse() {
- // TIME_CHECK(); // TODO(b/238654698) reenable only when optimized.
+ // TIME_CHECK(); // TODO(b/243839867) reenable only when optimized.
return processImpl(static_cast<uint32_t>(MessageQueueFlagBits::REQUEST_PROCESS_REVERSE));
}
diff --git a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
index 0aae87b..b418b6c 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
@@ -20,7 +20,6 @@
#define LOG_TAG "EffectsFactoryHalAidl"
//#define LOG_NDEBUG 0
-#include <aidl/android/hardware/audio/effect/IFactory.h>
#include <error/expected_utils.h>
#include <android/binder_manager.h>
#include <media/AidlConversionCppNdk.h>
@@ -139,20 +138,18 @@
}
status_t EffectsFactoryHalAidl::dumpEffects(int fd) {
- ALOGE("%s not implemented yet, fd %d", __func__, fd);
- return INVALID_OPERATION;
+ // TODO: add proxy dump here because AIDL service EffectFactory doesn't have proxy handle
+ return mFactory->dump(fd, nullptr, 0);
}
status_t EffectsFactoryHalAidl::allocateBuffer(size_t size, sp<EffectBufferHalInterface>* buffer) {
ALOGI("%s size %zu buffer %p", __func__, size, buffer);
- // Buffer doesn't allocated here for AIDL, instead each effect open will return I/O data FMQ.
return EffectBufferHalAidl::allocate(size, buffer);
}
status_t EffectsFactoryHalAidl::mirrorBuffer(void* external, size_t size,
sp<EffectBufferHalInterface>* buffer) {
ALOGI("%s extern %p size %zu buffer %p", __func__, external, size, buffer);
- // TODO: implement with FMQ
return EffectBufferHalAidl::mirror(external, size, buffer);
}
diff --git a/media/libaudiohal/impl/EffectsFactoryHalAidl.h b/media/libaudiohal/impl/EffectsFactoryHalAidl.h
index 1e85da9..9c3643b 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalAidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalAidl.h
@@ -20,6 +20,7 @@
#include <memory>
#include <mutex>
+#include <aidl/android/hardware/audio/effect/IFactory.h>
#include <android-base/thread_annotations.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
#include <system/thread_defs.h>
@@ -59,6 +60,9 @@
detail::AudioHalVersionInfo getHalVersion() const override;
+ // for TIME_CHECK
+ const std::string getClassName() const { return "EffectHalAidl"; }
+
private:
std::mutex mLock;
const std::shared_ptr<IFactory> mFactory;
diff --git a/media/libaudiohal/impl/StreamHalAidl.cpp b/media/libaudiohal/impl/StreamHalAidl.cpp
index 757215d..98c655e 100644
--- a/media/libaudiohal/impl/StreamHalAidl.cpp
+++ b/media/libaudiohal/impl/StreamHalAidl.cpp
@@ -21,16 +21,25 @@
#include <cstdint>
#include <audio_utils/clock.h>
+#include <media/AidlConversionCppNdk.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionUtil.h>
+#include <media/AudioParameter.h>
#include <mediautils/TimeCheck.h>
+#include <system/audio.h>
#include <utils/Log.h>
#include "DeviceHalAidl.h"
#include "StreamHalAidl.h"
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::common::PlaybackTrackMetadata;
+using ::aidl::android::hardware::audio::common::RecordTrackMetadata;
using ::aidl::android::hardware::audio::core::IStreamCommon;
using ::aidl::android::hardware::audio::core::IStreamIn;
using ::aidl::android::hardware::audio::core::IStreamOut;
using ::aidl::android::hardware::audio::core::StreamDescriptor;
+using ::aidl::android::legacy2aidl_audio_channel_mask_t_AudioChannelLayout;
namespace android {
@@ -259,7 +268,7 @@
status_t StreamHalAidl::transfer(void *buffer, size_t bytes, size_t *transferred) {
ALOGV("%p %s::%s", this, getClassName().c_str(), __func__);
- // TIME_CHECK(); // TODO(b/238654698) reenable only when optimized.
+ // TIME_CHECK(); // TODO(b/243839867) reenable only when optimized.
if (!mStream || mContext.getDataMQ() == nullptr) return NO_INIT;
mWorkerTid.store(gettid(), std::memory_order_release);
// Switch the stream into an active state if needed.
@@ -323,6 +332,26 @@
if (mIsInput) {
return sendCommand(makeHalCommand<HalCommand::Tag::burst>(0), reply);
} else {
+ if (mContext.isAsynchronous()) {
+ // Handle pause-flush-resume sequence. 'flush' from PAUSED goes to
+ // IDLE. We move here from IDLE to ACTIVE (same as 'start' from PAUSED).
+ const auto state = getState();
+ if (state == StreamDescriptor::State::IDLE) {
+ StreamDescriptor::Reply localReply{};
+ StreamDescriptor::Reply* innerReply = reply ?: &localReply;
+ if (status_t status =
+ sendCommand(makeHalCommand<HalCommand::Tag::burst>(0), innerReply);
+ status != OK) {
+ return status;
+ }
+ if (innerReply->state != StreamDescriptor::State::ACTIVE) {
+ ALOGE("%s: unexpected stream state: %s (expected ACTIVE)",
+ __func__, toString(innerReply->state).c_str());
+ return INVALID_OPERATION;
+ }
+ return OK;
+ }
+ }
return sendCommand(makeHalCommand<HalCommand::Tag::start>(), reply);
}
}
@@ -334,7 +363,8 @@
return sendCommand(makeHalCommand<HalCommand::Tag::drain>(
mIsInput ? StreamDescriptor::DrainMode::DRAIN_UNSPECIFIED :
earlyNotify ? StreamDescriptor::DrainMode::DRAIN_EARLY_NOTIFY :
- StreamDescriptor::DrainMode::DRAIN_ALL), reply);
+ StreamDescriptor::DrainMode::DRAIN_ALL), reply,
+ true /*safeFromNonWorkerThread*/);
}
status_t StreamHalAidl::flush(StreamDescriptor::Reply* reply) {
@@ -404,7 +434,7 @@
const ::aidl::android::hardware::audio::core::StreamDescriptor::Command &command,
::aidl::android::hardware::audio::core::StreamDescriptor::Reply* reply,
bool safeFromNonWorkerThread) {
- // TIME_CHECK(); // TODO(b/238654698) reenable only when optimized.
+ // TIME_CHECK(); // TODO(b/243839867) reenable only when optimized.
if (!safeFromNonWorkerThread) {
const pid_t workerTid = mWorkerTid.load(std::memory_order_acquire);
LOG_ALWAYS_FATAL_IF(workerTid != gettid(),
@@ -455,12 +485,29 @@
return OK;
}
+// static
+::aidl::ConversionResult<::aidl::android::hardware::audio::common::SourceMetadata>
+StreamOutHalAidl::legacy2aidl_SourceMetadata(const StreamOutHalInterface::SourceMetadata& legacy) {
+ ::aidl::android::hardware::audio::common::SourceMetadata aidl;
+ aidl.tracks = VALUE_OR_RETURN(
+ ::aidl::android::convertContainer<std::vector<PlaybackTrackMetadata>>(
+ legacy.tracks,
+ ::aidl::android::legacy2aidl_playback_track_metadata_v7_PlaybackTrackMetadata));
+ return aidl;
+}
+
StreamOutHalAidl::StreamOutHalAidl(
const audio_config& config, StreamContextAidl&& context, int32_t nominalLatency,
const std::shared_ptr<IStreamOut>& stream, const sp<CallbackBroker>& callbackBroker)
: StreamHalAidl("StreamOutHalAidl", false /*isInput*/, config, nominalLatency,
std::move(context), getStreamCommon(stream)),
- mStream(stream), mCallbackBroker(callbackBroker) {}
+ mStream(stream), mCallbackBroker(callbackBroker) {
+ // Initialize the offload metadata
+ mOffloadMetadata.sampleRate = static_cast<int32_t>(config.sample_rate);
+ mOffloadMetadata.channelMask = VALUE_OR_FATAL(
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(config.channel_mask, false));
+ mOffloadMetadata.averageBitRatePerSecond = static_cast<int32_t>(config.offload_info.bit_rate);
+}
StreamOutHalAidl::~StreamOutHalAidl() {
if (auto broker = mCallbackBroker.promote(); broker != nullptr) {
@@ -468,15 +515,27 @@
}
}
+status_t StreamOutHalAidl::setParameters(const String8& kvPairs) {
+ if (!mStream) return NO_INIT;
+
+ AudioParameter parameters(kvPairs);
+ ALOGD("%s parameters: %s", __func__, parameters.toString().c_str());
+
+ if (status_t status = filterAndUpdateOffloadMetadata(parameters); status != OK) {
+ ALOGW("%s filtering or updating offload metadata failed: %d", __func__, status);
+ }
+
+ return StreamHalAidl::setParameters(parameters.toString());
+}
+
status_t StreamOutHalAidl::getLatency(uint32_t *latency) {
return StreamHalAidl::getLatency(latency);
}
-status_t StreamOutHalAidl::setVolume(float left __unused, float right __unused) {
+status_t StreamOutHalAidl::setVolume(float left, float right) {
TIME_CHECK();
if (!mStream) return NO_INIT;
- ALOGE("%s not implemented yet", __func__);
- return OK;
+ return statusTFromBinderStatus(mStream->setHwVolume({left, right}));
}
status_t StreamOutHalAidl::selectPresentation(int presentationId __unused, int programId __unused) {
@@ -514,6 +573,10 @@
status_t StreamOutHalAidl::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
TIME_CHECK();
if (!mStream) return NO_INIT;
+ if (!mContext.isAsynchronous()) {
+ ALOGE("%s: the callback is intended for asynchronous streams only", __func__);
+ return INVALID_OPERATION;
+ }
if (auto broker = mCallbackBroker.promote(); broker != nullptr) {
if (auto cb = callback.promote(); cb != nullptr) {
broker->setStreamOutCallback(this, cb);
@@ -577,11 +640,12 @@
}
status_t StreamOutHalAidl::updateSourceMetadata(
- const StreamOutHalInterface::SourceMetadata& sourceMetadata __unused) {
+ const StreamOutHalInterface::SourceMetadata& sourceMetadata) {
TIME_CHECK();
if (!mStream) return NO_INIT;
- ALOGE("%s not implemented yet", __func__);
- return OK;
+ ::aidl::android::hardware::audio::common::SourceMetadata aidlMetadata =
+ VALUE_OR_RETURN_STATUS(legacy2aidl_SourceMetadata(sourceMetadata));
+ return statusTFromBinderStatus(mStream->updateMetadata(aidlMetadata));
}
status_t StreamOutHalAidl::getDualMonoMode(audio_dual_mono_mode_t* mode __unused) {
@@ -617,7 +681,7 @@
TIME_CHECK();
if (!mStream) return NO_INIT;
ALOGE("%s not implemented yet", __func__);
- return OK;
+ return BAD_VALUE;
}
status_t StreamOutHalAidl::setPlaybackRateParameters(
@@ -625,7 +689,7 @@
TIME_CHECK();
if (!mStream) return NO_INIT;
ALOGE("%s not implemented yet", __func__);
- return OK;
+ return BAD_VALUE;
}
status_t StreamOutHalAidl::setEventCallback(
@@ -667,6 +731,89 @@
return StreamHalAidl::exit();
}
+status_t StreamOutHalAidl::filterAndUpdateOffloadMetadata(AudioParameter ¶meters) {
+ TIME_CHECK();
+
+ if (parameters.containsKey(String8(AudioParameter::keyOffloadCodecAverageBitRate)) ||
+ parameters.containsKey(String8(AudioParameter::keyOffloadCodecSampleRate)) ||
+ parameters.containsKey(String8(AudioParameter::keyOffloadCodecChannels)) ||
+ parameters.containsKey(String8(AudioParameter::keyOffloadCodecDelaySamples)) ||
+ parameters.containsKey(String8(AudioParameter::keyOffloadCodecPaddingSamples))) {
+ int value = 0;
+ if (parameters.getInt(String8(AudioParameter::keyOffloadCodecAverageBitRate), value)
+ == NO_ERROR) {
+ if (value <= 0) {
+ return BAD_VALUE;
+ }
+ mOffloadMetadata.averageBitRatePerSecond = value;
+ parameters.remove(String8(AudioParameter::keyOffloadCodecAverageBitRate));
+ }
+
+ if (parameters.getInt(String8(AudioParameter::keyOffloadCodecSampleRate), value)
+ == NO_ERROR) {
+ if (value <= 0) {
+ return BAD_VALUE;
+ }
+ mOffloadMetadata.sampleRate = value;
+ parameters.remove(String8(AudioParameter::keyOffloadCodecSampleRate));
+ }
+
+ if (parameters.getInt(String8(AudioParameter::keyOffloadCodecChannels), value)
+ == NO_ERROR) {
+ if (value <= 0) {
+ return BAD_VALUE;
+ }
+ audio_channel_mask_t channel_mask =
+ audio_channel_out_mask_from_count(static_cast<uint32_t>(value));
+ if (channel_mask == AUDIO_CHANNEL_INVALID) {
+ return BAD_VALUE;
+ }
+ mOffloadMetadata.channelMask =
+ VALUE_OR_RETURN_STATUS(legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ channel_mask, false));
+ parameters.remove(String8(AudioParameter::keyOffloadCodecChannels));
+ }
+
+ // The legacy keys are misnamed. The delay and padding are in frame.
+ if (parameters.getInt(String8(AudioParameter::keyOffloadCodecDelaySamples), value)
+ == NO_ERROR) {
+ if (value < 0) {
+ return BAD_VALUE;
+ }
+ mOffloadMetadata.delayFrames = value;
+ parameters.remove(String8(AudioParameter::keyOffloadCodecDelaySamples));
+ }
+
+ if (parameters.getInt(String8(AudioParameter::keyOffloadCodecPaddingSamples), value)
+ == NO_ERROR) {
+ if (value < 0) {
+ return BAD_VALUE;
+ }
+ mOffloadMetadata.paddingFrames = value;
+ parameters.remove(String8(AudioParameter::keyOffloadCodecPaddingSamples));
+ }
+
+ ALOGD("%s set offload metadata %s", __func__, mOffloadMetadata.toString().c_str());
+ status_t status = statusTFromBinderStatus(mStream->updateOffloadMetadata(mOffloadMetadata));
+ if (status != OK) {
+ ALOGE("%s: updateOffloadMetadata failed %d", __func__, status);
+ return status;
+ }
+ }
+ return OK;
+}
+
+// static
+::aidl::ConversionResult<::aidl::android::hardware::audio::common::SinkMetadata>
+StreamInHalAidl::legacy2aidl_SinkMetadata(const StreamInHalInterface::SinkMetadata& legacy) {
+ ::aidl::android::hardware::audio::common::SinkMetadata aidl;
+ aidl.tracks = VALUE_OR_RETURN(
+ ::aidl::android::convertContainer<std::vector<RecordTrackMetadata>>(
+ legacy.tracks,
+ ::aidl::android::legacy2aidl_record_track_metadata_v7_RecordTrackMetadata));
+ return aidl;
+}
+
StreamInHalAidl::StreamInHalAidl(
const audio_config& config, StreamContextAidl&& context, int32_t nominalLatency,
const std::shared_ptr<IStreamIn>& stream)
@@ -716,11 +863,12 @@
}
status_t StreamInHalAidl::updateSinkMetadata(
- const StreamInHalInterface::SinkMetadata& sinkMetadata __unused) {
+ const StreamInHalInterface::SinkMetadata& sinkMetadata) {
TIME_CHECK();
if (!mStream) return NO_INIT;
- ALOGE("%s not implemented yet", __func__);
- return OK;
+ ::aidl::android::hardware::audio::common::SinkMetadata aidlMetadata =
+ VALUE_OR_RETURN_STATUS(legacy2aidl_SinkMetadata(sinkMetadata));
+ return statusTFromBinderStatus(mStream->updateMetadata(aidlMetadata));
}
status_t StreamInHalAidl::setPreferredMicrophoneDirection(
diff --git a/media/libaudiohal/impl/StreamHalAidl.h b/media/libaudiohal/impl/StreamHalAidl.h
index f43c8e2..e3cae77 100644
--- a/media/libaudiohal/impl/StreamHalAidl.h
+++ b/media/libaudiohal/impl/StreamHalAidl.h
@@ -21,16 +21,20 @@
#include <mutex>
#include <string_view>
+#include <aidl/android/hardware/audio/common/AudioOffloadMetadata.h>
#include <aidl/android/hardware/audio/core/BpStreamCommon.h>
#include <aidl/android/hardware/audio/core/BpStreamIn.h>
#include <aidl/android/hardware/audio/core/BpStreamOut.h>
#include <fmq/AidlMessageQueue.h>
#include <media/audiohal/EffectHalInterface.h>
#include <media/audiohal/StreamHalInterface.h>
+#include <media/AudioParameter.h>
#include "ConversionHelperAidl.h"
#include "StreamPowerLog.h"
+using ::aidl::android::hardware::audio::common::AudioOffloadMetadata;
+
namespace android {
class StreamContextAidl {
@@ -43,24 +47,28 @@
::aidl::android::hardware::common::fmq::SynchronizedReadWrite> DataMQ;
explicit StreamContextAidl(
- const ::aidl::android::hardware::audio::core::StreamDescriptor& descriptor)
+ const ::aidl::android::hardware::audio::core::StreamDescriptor& descriptor,
+ bool isAsynchronous)
: mFrameSizeBytes(descriptor.frameSizeBytes),
mCommandMQ(new CommandMQ(descriptor.command)),
mReplyMQ(new ReplyMQ(descriptor.reply)),
mBufferSizeFrames(descriptor.bufferSizeFrames),
- mDataMQ(maybeCreateDataMQ(descriptor)) {}
+ mDataMQ(maybeCreateDataMQ(descriptor)),
+ mIsAsynchronous(isAsynchronous) {}
StreamContextAidl(StreamContextAidl&& other) :
mFrameSizeBytes(other.mFrameSizeBytes),
mCommandMQ(std::move(other.mCommandMQ)),
mReplyMQ(std::move(other.mReplyMQ)),
mBufferSizeFrames(other.mBufferSizeFrames),
- mDataMQ(std::move(other.mDataMQ)) {}
+ mDataMQ(std::move(other.mDataMQ)),
+ mIsAsynchronous(other.mIsAsynchronous) {}
StreamContextAidl& operator=(StreamContextAidl&& other) {
mFrameSizeBytes = other.mFrameSizeBytes;
mCommandMQ = std::move(other.mCommandMQ);
mReplyMQ = std::move(other.mReplyMQ);
mBufferSizeFrames = other.mBufferSizeFrames;
mDataMQ = std::move(other.mDataMQ);
+ mIsAsynchronous = other.mIsAsynchronous;
return *this;
}
bool isValid() const {
@@ -78,6 +86,7 @@
DataMQ* getDataMQ() const { return mDataMQ.get(); }
size_t getFrameSizeBytes() const { return mFrameSizeBytes; }
ReplyMQ* getReplyMQ() const { return mReplyMQ.get(); }
+ bool isAsynchronous() const { return mIsAsynchronous; }
private:
static std::unique_ptr<DataMQ> maybeCreateDataMQ(
@@ -94,6 +103,7 @@
std::unique_ptr<ReplyMQ> mReplyMQ;
size_t mBufferSizeFrames;
std::unique_ptr<DataMQ> mDataMQ;
+ bool mIsAsynchronous;
};
class StreamHalAidl : public virtual StreamHalInterface, public ConversionHelperAidl {
@@ -224,6 +234,9 @@
class StreamOutHalAidl : public StreamOutHalInterface, public StreamHalAidl {
public:
+ // Extract the output stream parameters and set by AIDL APIs.
+ status_t setParameters(const String8& kvPairs) override;
+
// Return the audio hardware driver estimated latency in milliseconds.
status_t getLatency(uint32_t *latency) override;
@@ -300,9 +313,14 @@
private:
friend class sp<StreamOutHalAidl>;
+ static ::aidl::ConversionResult<::aidl::android::hardware::audio::common::SourceMetadata>
+ legacy2aidl_SourceMetadata(const StreamOutHalInterface::SourceMetadata& legacy);
+
const std::shared_ptr<::aidl::android::hardware::audio::core::IStreamOut> mStream;
const wp<CallbackBroker> mCallbackBroker;
+ AudioOffloadMetadata mOffloadMetadata;
+
// Can not be constructed directly by clients.
StreamOutHalAidl(
const audio_config& config, StreamContextAidl&& context, int32_t nominalLatency,
@@ -310,6 +328,10 @@
const sp<CallbackBroker>& callbackBroker);
~StreamOutHalAidl() override;
+
+ // Filter and update the offload metadata. The parameters which are related to the offload
+ // metadata will be removed after filtering.
+ status_t filterAndUpdateOffloadMetadata(AudioParameter ¶meters);
};
class StreamInHalAidl : public StreamInHalInterface, public StreamHalAidl {
@@ -343,6 +365,9 @@
private:
friend class sp<StreamInHalAidl>;
+ static ::aidl::ConversionResult<::aidl::android::hardware::audio::common::SinkMetadata>
+ legacy2aidl_SinkMetadata(const StreamInHalInterface::SinkMetadata& legacy);
+
const std::shared_ptr<::aidl::android::hardware::audio::core::IStreamIn> mStream;
// Can not be constructed directly by clients.
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 07c6df5..192790c 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -441,7 +441,7 @@
#endif
status_t StreamOutHalHidl::write(const void *buffer, size_t bytes, size_t *written) {
- // TIME_CHECK(); // TODO(b/238654698) reenable only when optimized.
+ // TIME_CHECK(); // TODO(b/243839867) reenable only when optimized.
if (mStream == 0) return NO_INIT;
*written = 0;
@@ -587,7 +587,7 @@
}
status_t StreamOutHalHidl::getRenderPosition(uint32_t *dspFrames) {
- // TIME_CHECK(); // TODO(b/238654698) reenable only when optimized.
+ // TIME_CHECK(); // TODO(b/243839867) reenable only when optimized.
if (mStream == 0) return NO_INIT;
Result retval;
Return<void> ret = mStream->getRenderPosition(
@@ -668,7 +668,7 @@
}
status_t StreamOutHalHidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
- // TIME_CHECK(); // TODO(b/238654698) reenable only when optimized.
+ // TIME_CHECK(); // TODO(b/243839867) reenable only when optimized.
if (mStream == 0) return NO_INIT;
if (mWriterClient == gettid() && mCommandMQ) {
return callWriterThread(
@@ -1012,7 +1012,7 @@
}
status_t StreamInHalHidl::read(void *buffer, size_t bytes, size_t *read) {
- // TIME_CHECK(); // TODO(b/238654698) reenable only when optimized.
+ // TIME_CHECK(); // TODO(b/243839867) reenable only when optimized.
if (mStream == 0) return NO_INIT;
*read = 0;
@@ -1146,7 +1146,7 @@
}
status_t StreamInHalHidl::getCapturePosition(int64_t *frames, int64_t *time) {
- // TIME_CHECK(); // TODO(b/238654698) reenable only when optimized.
+ // TIME_CHECK(); // TODO(b/243839867) reenable only when optimized.
if (mStream == 0) return NO_INIT;
if (mReaderClient == gettid() && mCommandMQ) {
ReadParameters params;
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.cpp
index 15768b3..0bc23f9 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.cpp
@@ -33,9 +33,11 @@
namespace android {
namespace effect {
+using ::aidl::android::getParameterSpecificField;
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::AcousticEchoCanceler;
using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
@@ -64,8 +66,13 @@
break;
}
default: {
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(AcousticEchoCanceler, acousticEchoCanceler, vendor,
+ ext);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->setParameter(aidlParam)));
+ break;
}
}
@@ -73,7 +80,7 @@
}
status_t AidlConversionAec::getParameter(EffectParamWriter& param) {
- uint32_t type = 0, value = 0;
+ uint32_t type = 0;
if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
OK != param.readFromParameter(&type)) {
param.setStatus(BAD_VALUE);
@@ -85,29 +92,30 @@
case AEC_PARAM_ECHO_DELAY:
FALLTHROUGH_INTENDED;
case AEC_PARAM_PROPERTIES: {
+ int32_t delay = 0;
Parameter::Id id =
MAKE_SPECIFIC_PARAMETER_ID(AcousticEchoCanceler, acousticEchoCancelerTag,
AcousticEchoCanceler::echoDelayUs);
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
- value = VALUE_OR_RETURN_STATUS(
+ delay = VALUE_OR_RETURN_STATUS(
aidl::android::aidl2legacy_Parameter_aec_uint32_echoDelay(aidlParam));
- break;
+ return param.writeToValue(&delay);
}
case AEC_PARAM_MOBILE_MODE: {
+ int32_t mode = 0;
Parameter::Id id =
MAKE_SPECIFIC_PARAMETER_ID(AcousticEchoCanceler, acousticEchoCancelerTag,
AcousticEchoCanceler::mobileMode);
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
- value = VALUE_OR_RETURN_STATUS(
+ mode = VALUE_OR_RETURN_STATUS(
aidl::android::aidl2legacy_Parameter_aec_uint32_mobileMode(aidlParam));
- break;
+ return param.writeToValue(&mode);
}
- default:
- // use vendor extension implementation
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ default: {
+ // use vendor extension implementation, the first 32bits (param type) won't pass to HAL
+ VENDOR_EXTENSION_GET_AND_RETURN(AcousticEchoCanceler, acousticEchoCanceler, param);
+ }
}
- return param.writeToValue(&value);
}
} // namespace effect
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc1.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc1.cpp
new file mode 100644
index 0000000..8c19612
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc1.cpp
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionAgc1"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_agc.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionAgc1.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::getParameterSpecificField;
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::AutomaticGainControlV1;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionAgc1::setParameterLevel(EffectParamReader& param) {
+ int16_t level;
+ RETURN_STATUS_IF_ERROR(param.readFromValue(&level));
+ Parameter aidlParam = MAKE_SPECIFIC_PARAMETER(AutomaticGainControlV1, automaticGainControlV1,
+ targetPeakLevelDbFs, level);
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionAgc1::setParameterGain(EffectParamReader& param) {
+ int16_t gain;
+ RETURN_STATUS_IF_ERROR(param.readFromValue(&gain));
+ Parameter aidlParam = MAKE_SPECIFIC_PARAMETER(AutomaticGainControlV1, automaticGainControlV1,
+ maxCompressionGainDb, gain);
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionAgc1::setParameterLimiterEnable(EffectParamReader& param) {
+ bool enable;
+ RETURN_STATUS_IF_ERROR(param.readFromValue(&enable));
+ Parameter aidlParam = MAKE_SPECIFIC_PARAMETER(AutomaticGainControlV1, automaticGainControlV1,
+ enableLimiter, enable);
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionAgc1::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ if (OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ switch (type) {
+ case AGC_PARAM_TARGET_LEVEL: {
+ return setParameterLevel(param);
+ }
+ case AGC_PARAM_COMP_GAIN: {
+ return setParameterGain(param);
+ }
+ case AGC_PARAM_LIMITER_ENA: {
+ return setParameterLimiterEnable(param);
+ }
+ case AGC_PARAM_PROPERTIES: {
+ RETURN_STATUS_IF_ERROR(setParameterLevel(param));
+ RETURN_STATUS_IF_ERROR(setParameterGain(param));
+ RETURN_STATUS_IF_ERROR(setParameterLimiterEnable(param));
+ return OK;
+ }
+ default: {
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ Parameter aidlParam = MAKE_SPECIFIC_PARAMETER(AutomaticGainControlV1,
+ automaticGainControlV1, vendor, ext);
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+ }
+ }
+}
+
+status_t AidlConversionAgc1::getParameterLevel(EffectParamWriter& param) {
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(AutomaticGainControlV1, automaticGainControlV1Tag,
+ AutomaticGainControlV1::targetPeakLevelDbFs);
+ Parameter aidlParam;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ int32_t level = VALUE_OR_RETURN_STATUS(
+ GET_PARAMETER_SPECIFIC_FIELD(aidlParam, AutomaticGainControlV1, automaticGainControlV1,
+ AutomaticGainControlV1::targetPeakLevelDbFs, int32_t));
+ return param.writeToValue(&level);
+}
+
+status_t AidlConversionAgc1::getParameterGain(EffectParamWriter& param) {
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(AutomaticGainControlV1, automaticGainControlV1Tag,
+ AutomaticGainControlV1::maxCompressionGainDb);
+ Parameter aidlParam;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ int32_t gain = VALUE_OR_RETURN_STATUS(
+ GET_PARAMETER_SPECIFIC_FIELD(aidlParam, AutomaticGainControlV1, automaticGainControlV1,
+ AutomaticGainControlV1::maxCompressionGainDb, int32_t));
+ return param.writeToValue(&gain);
+}
+
+status_t AidlConversionAgc1::getParameterLimiterEnable(EffectParamWriter& param) {
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(AutomaticGainControlV1, automaticGainControlV1Tag,
+ AutomaticGainControlV1::enableLimiter);
+ Parameter aidlParam;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ bool enable = VALUE_OR_RETURN_STATUS(
+ GET_PARAMETER_SPECIFIC_FIELD(aidlParam, AutomaticGainControlV1, automaticGainControlV1,
+ AutomaticGainControlV1::enableLimiter, bool));
+ return param.writeToValue(&enable);
+}
+
+status_t AidlConversionAgc1::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0;
+ if (OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ switch (type) {
+ case AGC_PARAM_TARGET_LEVEL: {
+ return getParameterLevel(param);
+ }
+ case AGC_PARAM_COMP_GAIN: {
+ return getParameterGain(param);
+ }
+ case AGC_PARAM_LIMITER_ENA: {
+ return getParameterLimiterEnable(param);
+ }
+ case AGC_PARAM_PROPERTIES: {
+ RETURN_STATUS_IF_ERROR(getParameterLevel(param));
+ RETURN_STATUS_IF_ERROR(getParameterGain(param));
+ RETURN_STATUS_IF_ERROR(getParameterLimiterEnable(param));
+ return OK;
+ }
+ default: {
+ VENDOR_EXTENSION_GET_AND_RETURN(AutomaticGainControlV1, automaticGainControlV1, param);
+ }
+ }
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc1.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc1.h
new file mode 100644
index 0000000..b0509fd
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc1.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionAgc1 : public EffectConversionHelperAidl {
+ public:
+ AidlConversionAgc1(std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionAgc1() {}
+
+ private:
+ status_t setParameterLevel(utils::EffectParamReader& param);
+ status_t setParameterGain(utils::EffectParamReader& param);
+ status_t setParameterLimiterEnable(utils::EffectParamReader& param);
+ status_t setParameter(utils::EffectParamReader& param) override;
+
+ status_t getParameterLevel(utils::EffectParamWriter& param);
+ status_t getParameterGain(utils::EffectParamWriter& param);
+ status_t getParameterLimiterEnable(utils::EffectParamWriter& param);
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.cpp
index b736936..82cea3d 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.cpp
@@ -33,9 +33,11 @@
namespace android {
namespace effect {
+using ::aidl::android::getParameterSpecificField;
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::AutomaticGainControlV2;
using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
@@ -65,8 +67,12 @@
break;
}
default: {
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(AutomaticGainControlV2, automaticGainControlV2,
+ vendor, ext);
+ break;
}
}
@@ -110,8 +116,7 @@
break;
}
default: {
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ VENDOR_EXTENSION_GET_AND_RETURN(AutomaticGainControlV2, automaticGainControlV2, param);
}
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.cpp
index 9ec593f..1cf2c73 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.cpp
@@ -35,10 +35,12 @@
namespace effect {
using ::aidl::android::convertIntegral;
+using ::aidl::android::getParameterSpecificField;
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::BassBoost;
using ::aidl::android::hardware::audio::effect::Parameter;
using ::aidl::android::hardware::audio::effect::Range;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
@@ -63,8 +65,11 @@
return BAD_VALUE;
}
default: {
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(BassBoost, bassBoost, vendor, ext);
+ break;
}
}
@@ -98,8 +103,7 @@
return param.writeToValue(&value);
}
default: {
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ VENDOR_EXTENSION_GET_AND_RETURN(BassBoost, bassBoost, param);
}
}
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.cpp
index 17cedf7..7bee37b 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.cpp
@@ -34,9 +34,11 @@
namespace android {
namespace effect {
+using ::aidl::android::getParameterSpecificField;
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::Downmix;
using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
@@ -57,8 +59,10 @@
break;
}
default: {
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Downmix, downmix, vendor, ext);
}
}
@@ -83,8 +87,7 @@
break;
}
default: {
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ VENDOR_EXTENSION_GET_AND_RETURN(Downmix, downmix, param);
}
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.cpp
index 4555c9f..d8fea47 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.cpp
@@ -27,7 +27,7 @@
#include <media/audiohal/AudioEffectUuid.h>
#include <system/audio_effect.h>
#include <system/audio_effects/effect_dynamicsprocessing.h>
-
+#include <Utils.h>
#include <utils/Log.h>
#include "AidlConversionDynamicsProcessing.h"
@@ -36,30 +36,26 @@
namespace effect {
using ::aidl::android::convertIntegral;
+using ::aidl::android::getParameterSpecificField;
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::Capability;
using ::aidl::android::hardware::audio::effect::DynamicsProcessing;
using ::aidl::android::hardware::audio::effect::Parameter;
using ::aidl::android::hardware::audio::effect::toString;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
status_t AidlConversionDp::setParameter(EffectParamReader& param) {
uint32_t type = 0;
- if (OK != param.readFromParameter(&type)) {
- ALOGE("%s invalid param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.readFromParameter(&type));
Parameter aidlParam;
switch (type) {
case DP_PARAM_INPUT_GAIN: {
DynamicsProcessing::InputGain inputGainAidl;
- if (OK != param.readFromParameter(&inputGainAidl.channel) ||
- OK != param.readFromValue(&inputGainAidl.gainDb)) {
- ALOGE("%s invalid inputGain %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.readFromParameter(&inputGainAidl.channel));
+ RETURN_STATUS_IF_ERROR(param.readFromValue(&inputGainAidl.gainDb));
aidlParam = MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing, inputGain,
{inputGainAidl});
break;
@@ -122,8 +118,12 @@
break;
}
default: {
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam =
+ MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing, vendor, ext);
+ break;
}
}
@@ -132,17 +132,12 @@
status_t AidlConversionDp::getParameter(EffectParamWriter& param) {
uint32_t type = 0;
- if (OK != param.readFromParameter(&type)) {
- ALOGE("%s invalid param %s", __func__, param.toString().c_str());
- }
+ RETURN_STATUS_IF_ERROR(param.readFromParameter(&type));
Parameter aidlParam;
switch (type) {
case DP_PARAM_INPUT_GAIN: {
int32_t channel;
- if (OK != param.readFromParameter(&channel)) {
- ALOGE("%s invalid inputGain %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.readFromParameter(&channel));
Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag,
DynamicsProcessing::inputGain);
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
@@ -161,11 +156,6 @@
return BAD_VALUE;
}
case DP_PARAM_ENGINE_ARCHITECTURE: {
- int32_t channel;
- if (OK != param.readFromParameter(&channel)) {
- ALOGE("%s invalid inputGain %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag,
DynamicsProcessing::engineArchitecture);
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
@@ -186,18 +176,15 @@
VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(engine.postEqStage.inUse));
int32_t limiterInUse =
VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(engine.limiterInUse));
- if (OK != param.writeToValue(&resolution) ||
- OK != param.writeToValue(&engine.preferredProcessingDurationMs) ||
- OK != param.writeToValue(&preEqInUse) ||
- OK != param.writeToValue(&engine.preEqStage.bandCount) ||
- OK != param.writeToValue(&mbcInUse) ||
- OK != param.writeToValue(&engine.mbcStage.bandCount) ||
- OK != param.writeToValue(&postEqInUse) ||
- OK != param.writeToValue(&engine.postEqStage.bandCount) ||
- OK != param.writeToValue(&limiterInUse)) {
- ALOGE("%s invalid engineArchitecture %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&resolution));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&engine.preferredProcessingDurationMs));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&preEqInUse));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&engine.preEqStage.bandCount));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&mbcInUse));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&engine.mbcStage.bandCount));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&postEqInUse));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&engine.postEqStage.bandCount));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&limiterInUse));
mEngine = engine;
return OK;
}
@@ -223,19 +210,13 @@
return getLimiterConfig(param);
}
case DP_PARAM_GET_CHANNEL_COUNT: {
- uint32_t channel = VALUE_OR_RETURN_STATUS(
- aidl::android::aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
- mCommon.input.base.channelMask, true /* input */));
- if (OK != param.writeToValue(&channel)) {
- ALOGE("%s write channel number %d to param failed %s", __func__, channel,
- param.toString().c_str());
- return BAD_VALUE;
- }
+ uint32_t channel = ::aidl::android::hardware::audio::common::getChannelCount(
+ mCommon.input.base.channelMask);
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&channel));
return OK;
}
default: {
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ VENDOR_EXTENSION_GET_AND_RETURN(DynamicsProcessing, dynamicsProcessing, param);
}
}
}
@@ -243,10 +224,9 @@
aidl::ConversionResult<DynamicsProcessing::ChannelConfig>
AidlConversionDp::readChannelConfigFromParam(EffectParamReader& param) {
int32_t enable, channel;
- if (OK != param.readFromParameter(&channel) || OK != param.readFromValue(&enable)) {
- ALOGE("%s invalid channel config param %s", __func__, param.toString().c_str());
- return ::android::base::unexpected(::android::BAD_VALUE);
- }
+ RETURN_IF_ERROR(param.readFromParameter(&channel));
+ RETURN_IF_ERROR(param.readFromValue(&enable));
+
return DynamicsProcessing::ChannelConfig(
{.enable = VALUE_OR_RETURN(convertIntegral<bool>(enable)), .channel = channel});
}
@@ -255,14 +235,12 @@
AidlConversionDp::readEqBandConfigFromParam(EffectParamReader& param) {
DynamicsProcessing::EqBandConfig config;
int32_t enable;
- if (OK != param.readFromParameter(&config.channel) ||
- OK != param.readFromParameter(&config.band) ||
- OK != param.readFromValue(&enable) ||
- OK != param.readFromValue(&config.cutoffFrequencyHz) ||
- OK != param.readFromValue(&config.gainDb)) {
- ALOGE("%s invalid eq band param %s", __func__, param.toString().c_str());
- return ::android::base::unexpected(::android::BAD_VALUE);
- }
+ RETURN_IF_ERROR(param.readFromParameter(&config.channel));
+ RETURN_IF_ERROR(param.readFromParameter(&config.band));
+ RETURN_IF_ERROR(param.readFromValue(&enable));
+ RETURN_IF_ERROR(param.readFromValue(&config.cutoffFrequencyHz));
+ RETURN_IF_ERROR(param.readFromValue(&config.gainDb));
+
config.enable = VALUE_OR_RETURN(convertIntegral<bool>(enable));
return config;
}
@@ -271,22 +249,20 @@
AidlConversionDp::readMbcBandConfigFromParam(EffectParamReader& param) {
DynamicsProcessing::MbcBandConfig config;
int32_t enable;
- if (OK != param.readFromParameter(&config.channel) ||
- OK != param.readFromParameter(&config.band) ||
- OK != param.readFromValue(&enable) ||
- OK != param.readFromValue(&config.cutoffFrequencyHz) ||
- OK != param.readFromValue(&config.attackTimeMs) ||
- OK != param.readFromValue(&config.releaseTimeMs) ||
- OK != param.readFromValue(&config.ratio) ||
- OK != param.readFromValue(&config.thresholdDb) ||
- OK != param.readFromValue(&config.kneeWidthDb) ||
- OK != param.readFromValue(&config.noiseGateThresholdDb) ||
- OK != param.readFromValue(&config.expanderRatio) ||
- OK != param.readFromValue(&config.preGainDb) ||
- OK != param.readFromValue(&config.postGainDb)) {
- ALOGE("%s invalid mbc band config param %s", __func__, param.toString().c_str());
- return ::android::base::unexpected(::android::BAD_VALUE);
- }
+ RETURN_IF_ERROR(param.readFromParameter(&config.channel));
+ RETURN_IF_ERROR(param.readFromParameter(&config.band));
+ RETURN_IF_ERROR(param.readFromValue(&enable));
+ RETURN_IF_ERROR(param.readFromValue(&config.cutoffFrequencyHz));
+ RETURN_IF_ERROR(param.readFromValue(&config.attackTimeMs));
+ RETURN_IF_ERROR(param.readFromValue(&config.releaseTimeMs));
+ RETURN_IF_ERROR(param.readFromValue(&config.ratio));
+ RETURN_IF_ERROR(param.readFromValue(&config.thresholdDb));
+ RETURN_IF_ERROR(param.readFromValue(&config.kneeWidthDb));
+ RETURN_IF_ERROR(param.readFromValue(&config.noiseGateThresholdDb));
+ RETURN_IF_ERROR(param.readFromValue(&config.expanderRatio));
+ RETURN_IF_ERROR(param.readFromValue(&config.preGainDb));
+ RETURN_IF_ERROR(param.readFromValue(&config.postGainDb));
+
config.enable = VALUE_OR_RETURN(convertIntegral<bool>(enable));
return config;
}
@@ -295,18 +271,16 @@
AidlConversionDp::readLimiterConfigFromParam(EffectParamReader& param) {
DynamicsProcessing::LimiterConfig config;
int32_t enable, inUse;
- if (OK != param.readFromParameter(&config.channel) ||
- OK != param.readFromValue(&inUse) ||
- OK != param.readFromValue(&enable) ||
- OK != param.readFromValue(&config.linkGroup) ||
- OK != param.readFromValue(&config.attackTimeMs) ||
- OK != param.readFromValue(&config.releaseTimeMs) ||
- OK != param.readFromValue(&config.ratio) ||
- OK != param.readFromValue(&config.thresholdDb) ||
- OK != param.readFromValue(&config.postGainDb)) {
- ALOGE("%s invalid limiter config param %s", __func__, param.toString().c_str());
- return ::android::base::unexpected(::android::BAD_VALUE);
- }
+ RETURN_IF_ERROR(param.readFromParameter(&config.channel));
+ RETURN_IF_ERROR(param.readFromValue(&inUse));
+ RETURN_IF_ERROR(param.readFromValue(&enable));
+ RETURN_IF_ERROR(param.readFromValue(&config.linkGroup));
+ RETURN_IF_ERROR(param.readFromValue(&config.attackTimeMs));
+ RETURN_IF_ERROR(param.readFromValue(&config.releaseTimeMs));
+ RETURN_IF_ERROR(param.readFromValue(&config.ratio));
+ RETURN_IF_ERROR(param.readFromValue(&config.thresholdDb));
+ RETURN_IF_ERROR(param.readFromValue(&config.postGainDb));
+
config.enable = VALUE_OR_RETURN(convertIntegral<bool>(enable));
return config;
}
@@ -315,18 +289,15 @@
AidlConversionDp::readEngineArchitectureFromParam(EffectParamReader& param) {
DynamicsProcessing::EngineArchitecture engine;
int32_t variant, preEqInUse, mbcInUse, postEqInUse, limiterInUse;
- if (OK != param.readFromValue(&variant) &&
- OK != param.readFromValue(&engine.preferredProcessingDurationMs) &&
- OK != param.readFromValue(&preEqInUse) &&
- OK != param.readFromValue(&engine.preEqStage.bandCount) &&
- OK != param.readFromValue(&mbcInUse) &&
- OK != param.readFromValue(&engine.mbcStage.bandCount) &&
- OK != param.readFromValue(&postEqInUse) &&
- OK != param.readFromValue(&engine.postEqStage.bandCount) &&
- OK != param.readFromValue(&limiterInUse)) {
- ALOGE("%s invalid engineArchitecture %s", __func__, param.toString().c_str());
- return ::android::base::unexpected(::android::BAD_VALUE);
- }
+ RETURN_IF_ERROR(param.readFromValue(&variant));
+ RETURN_IF_ERROR(param.readFromValue(&engine.preferredProcessingDurationMs));
+ RETURN_IF_ERROR(param.readFromValue(&preEqInUse));
+ RETURN_IF_ERROR(param.readFromValue(&engine.preEqStage.bandCount));
+ RETURN_IF_ERROR(param.readFromValue(&mbcInUse));
+ RETURN_IF_ERROR(param.readFromValue(&engine.mbcStage.bandCount));
+ RETURN_IF_ERROR(param.readFromValue(&postEqInUse));
+ RETURN_IF_ERROR(param.readFromValue(&engine.postEqStage.bandCount));
+ RETURN_IF_ERROR(param.readFromValue(&limiterInUse));
engine.resolutionPreference = VALUE_OR_RETURN(
aidl::android::legacy2aidl_int32_DynamicsProcessing_ResolutionPreference(variant));
@@ -339,10 +310,7 @@
status_t AidlConversionDp::getChannelConfig(DynamicsProcessing::Tag tag, EffectParamWriter& param) {
int32_t channel;
- if (OK != param.readFromParameter(&channel)) {
- ALOGE("%s invalid parameter %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.readFromParameter(&channel));
Parameter aidlParam;
Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag, tag);
@@ -384,13 +352,9 @@
for (const auto& ch : channels) {
if (ch.channel == channel) {
int32_t enable = ch.enable;
- if (OK != param.writeToValue(&inUse) ||
- OK != param.writeToValue(&enable) ||
- OK != param.writeToValue(&bandCount)) {
- ALOGE("%s failed to write into param value %s", __func__,
- param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&inUse));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&enable));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandCount));
return OK;
}
}
@@ -400,10 +364,8 @@
status_t AidlConversionDp::getEqBandConfig(DynamicsProcessing::Tag tag, EffectParamWriter& param) {
int32_t channel, band;
- if (OK != param.readFromParameter(&channel) || OK != param.readFromParameter(&band)) {
- ALOGE("%s invalid parameter %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.readFromParameter(&channel));
+ RETURN_STATUS_IF_ERROR(param.readFromParameter(&band));
Parameter aidlParam;
Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag, tag);
@@ -425,12 +387,9 @@
for (const auto& bandIt : bands) {
if (bandIt.channel == channel && bandIt.band == band) {
int32_t enable = bandIt.enable;
- if (OK != param.writeToValue(&enable) ||
- OK != param.writeToValue(&bandIt.cutoffFrequencyHz) ||
- OK != param.writeToValue(&bandIt.gainDb)) {
- ALOGE("%s failed to write into param value %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&enable));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.cutoffFrequencyHz));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.gainDb));
return OK;
}
}
@@ -440,10 +399,8 @@
status_t AidlConversionDp::getMbcBandConfig(EffectParamWriter& param) {
int32_t channel, band;
- if (OK != param.readFromParameter(&channel) || OK != param.readFromParameter(&band)) {
- ALOGE("%s invalid parameter %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.readFromParameter(&channel));
+ RETURN_STATUS_IF_ERROR(param.readFromParameter(&band));
Parameter aidlParam;
Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag,
DynamicsProcessing::mbcBand);
@@ -457,20 +414,17 @@
for (const auto& bandIt : bands) {
if (bandIt.channel == channel && bandIt.band == band) {
int32_t enable = bandIt.enable;
- if (OK != param.writeToValue(&enable) ||
- OK != param.writeToValue(&bandIt.cutoffFrequencyHz) ||
- OK != param.writeToValue(&bandIt.attackTimeMs) ||
- OK != param.writeToValue(&bandIt.releaseTimeMs) ||
- OK != param.writeToValue(&bandIt.ratio) ||
- OK != param.writeToValue(&bandIt.thresholdDb) ||
- OK != param.writeToValue(&bandIt.kneeWidthDb) ||
- OK != param.writeToValue(&bandIt.noiseGateThresholdDb) ||
- OK != param.writeToValue(&bandIt.expanderRatio) ||
- OK != param.writeToValue(&bandIt.preGainDb) ||
- OK != param.writeToValue(&bandIt.postGainDb)) {
- ALOGE("%s failed to write into param value %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&enable));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.cutoffFrequencyHz));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.attackTimeMs));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.releaseTimeMs));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.ratio));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.thresholdDb));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.kneeWidthDb));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.noiseGateThresholdDb));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.expanderRatio));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.preGainDb));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&bandIt.postGainDb));
return OK;
}
}
@@ -480,10 +434,7 @@
status_t AidlConversionDp::getLimiterConfig(EffectParamWriter& param) {
int32_t channel;
- if (OK != param.readFromParameter(&channel)) {
- ALOGE("%s invalid parameter %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.readFromParameter(&channel));
Parameter aidlParam;
Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag,
DynamicsProcessing::limiter);
@@ -498,17 +449,14 @@
if (config.channel == channel) {
int32_t inUse = mEngine.limiterInUse;
int32_t enable = config.enable;
- if (OK != param.writeToValue(&inUse) ||
- OK != param.writeToValue(&enable) ||
- OK != param.writeToValue(&config.linkGroup) ||
- OK != param.writeToValue(&config.attackTimeMs) ||
- OK != param.writeToValue(&config.releaseTimeMs) ||
- OK != param.writeToValue(&config.ratio) ||
- OK != param.writeToValue(&config.thresholdDb) ||
- OK != param.writeToValue(&config.postGainDb)) {
- ALOGE("%s failed to write into param value %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&inUse));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&enable));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&config.linkGroup));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&config.attackTimeMs));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&config.releaseTimeMs));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&config.ratio));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&config.thresholdDb));
+ RETURN_STATUS_IF_ERROR(param.writeToValue(&config.postGainDb));
return OK;
}
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.cpp
index 0544e3f..49e41a4 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.cpp
@@ -39,6 +39,7 @@
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::EnvironmentalReverb;
using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
@@ -166,7 +167,13 @@
break;
}
default: {
- // TODO: handle with vendor extension
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ Parameter aidlParam = MAKE_SPECIFIC_PARAMETER(EnvironmentalReverb,
+ environmentalReverb, vendor, ext);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->setParameter(aidlParam)));
+ break;
}
}
return OK;
@@ -240,8 +247,7 @@
break;
}
default: {
- // TODO: handle with vendor extension
- return BAD_VALUE;
+ VENDOR_EXTENSION_GET_AND_RETURN(EnvironmentalReverb, environmentalReverb, param);
}
}
return OK;
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.cpp
index a10d271..f1c2926 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.cpp
@@ -37,16 +37,16 @@
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::Equalizer;
using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::Range;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
+using ::android::base::unexpected;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
status_t AidlConversionEq::setParameter(EffectParamReader& param) {
uint32_t type;
- uint16_t value = 0;
- if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
- OK != param.readFromParameter(&type) ||
- OK != param.readFromValue(&value)) {
+ if (OK != param.readFromParameter(&type)) {
ALOGE("%s invalid param %s", __func__, param.toString().c_str());
return BAD_VALUE;
}
@@ -54,13 +54,18 @@
Parameter aidlParam;
switch (type) {
case EQ_PARAM_CUR_PRESET: {
+ uint16_t value = 0;
+ if (OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
aidlParam = MAKE_SPECIFIC_PARAMETER(Equalizer, equalizer, preset, (int)value);
break;
}
case EQ_PARAM_BAND_LEVEL: {
int32_t band;
- uint16_t level;
- if (OK != param.readFromParameter(&band) || OK != param.readFromParameter(&level)) {
+ int16_t level;
+ if (OK != param.readFromParameter(&band) || OK != param.readFromValue(&level)) {
ALOGE("%s invalid bandLevel param %s", __func__, param.toString().c_str());
return BAD_VALUE;
}
@@ -69,13 +74,39 @@
break;
}
case EQ_PARAM_PROPERTIES: {
- // TODO: handle properties setting
+ int16_t num;
+ if (OK != param.readFromValue(&num)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ // set preset if it's valid
+ if (num >= 0) {
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Equalizer, equalizer, preset, (int)num);
+ break;
+ }
+ // set bandLevel if no preset was set
+ if (OK != param.readFromValue(&num)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ std::vector<Equalizer::BandLevel> bandLevels;
+ for (int i = 0; i < num; i++) {
+ Equalizer::BandLevel level({.index = i});
+ if (OK != param.readFromValue((uint16_t*)&level.levelMb)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ bandLevels.push_back(level);
+ }
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Equalizer, equalizer, bandLevels, bandLevels);
break;
}
default: {
- // TODO: implement vendor extension parameters
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Equalizer, equalizer, vendor, ext);
+ break;
}
}
@@ -89,29 +120,187 @@
return aidlParam;
}
+aidl::ConversionResult<int32_t> AidlConversionEq::getParameterPreset() {
+ Parameter aidlParam = VALUE_OR_RETURN_STATUS(getAidlParameter(Equalizer::preset));
+ return VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(aidlParam, Equalizer, equalizer,
+ Equalizer::preset, int32_t));
+}
+
+aidl::ConversionResult<std::string> AidlConversionEq::getParameterPresetName(
+ EffectParamWriter& param) {
+ int32_t presetIdx;
+ if (OK != param.readFromParameter(&presetIdx)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return unexpected(BAD_VALUE);
+ }
+ Parameter aidlParam = VALUE_OR_RETURN(getAidlParameter(Equalizer::presets));
+ const auto& presets = VALUE_OR_RETURN(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Equalizer, equalizer, Equalizer::presets, std::vector<Equalizer::Preset>));
+ for (const auto& preset : presets) {
+ if (presetIdx == preset.index) {
+ return preset.name;
+ }
+ }
+ return unexpected(BAD_VALUE);
+}
+
status_t AidlConversionEq::getParameter(EffectParamWriter& param) {
- uint32_t type = 0, value = 0;
- if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
- OK != param.readFromParameter(&type)) {
+ uint32_t type = 0;
+ if (OK != param.readFromParameter(&type)) {
param.setStatus(BAD_VALUE);
ALOGE("%s invalid param %s", __func__, param.toString().c_str());
return BAD_VALUE;
}
- Parameter aidlParam;
+
switch (type) {
case EQ_PARAM_NUM_BANDS: {
- aidlParam = VALUE_OR_RETURN_STATUS(getAidlParameter(Equalizer::bandLevels));
- auto bandLevels = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ Parameter aidlParam = VALUE_OR_RETURN_STATUS(getAidlParameter(Equalizer::bandLevels));
+ const auto& bandLevels = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
aidlParam, Equalizer, equalizer, Equalizer::bandLevels,
std::vector<Equalizer::BandLevel>));
- uint32_t num = bandLevels.size();
+ uint16_t bands = bandLevels.size();
+ return param.writeToValue(&bands);
+ }
+ case EQ_PARAM_LEVEL_RANGE: {
+ const auto& ranges = mDesc.capability.range.get<Range::equalizer>();
+ for (const auto& r : ranges) {
+ if (r.min.getTag() == Equalizer::bandLevels &&
+ r.max.getTag() == Equalizer::bandLevels) {
+ const auto& aidlMin = r.min.get<Equalizer::bandLevels>();
+ const auto& aidlMax = r.max.get<Equalizer::bandLevels>();
+ int16_t min =
+ std::min_element(aidlMin.begin(), aidlMin.end(), [](auto& a, auto& b) {
+ return a.levelMb < b.levelMb;
+ })->levelMb;
+ int16_t max =
+ std::max_element(aidlMax.begin(), aidlMax.end(), [](auto& a, auto& b) {
+ return a.levelMb < b.levelMb;
+ })->levelMb;
+ return (OK == param.writeToValue(&min) && OK == param.writeToValue(&max))
+ ? OK
+ : BAD_VALUE;
+ }
+ }
+ break;
+ }
+ case EQ_PARAM_BAND_LEVEL: {
+ int32_t bandIdx;
+ if (OK != param.readFromParameter(&bandIdx)) {
+ break;
+ }
+
+ Parameter aidlParam = VALUE_OR_RETURN_STATUS(getAidlParameter(Equalizer::bandLevels));
+ const auto& bandLevels = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Equalizer, equalizer, Equalizer::bandLevels,
+ std::vector<Equalizer::BandLevel>));
+ for (const auto& band : bandLevels) {
+ if (band.index == bandIdx) {
+ return param.writeToValue((uint16_t *)&band.levelMb);
+ }
+ }
+ break;
+ }
+ case EQ_PARAM_CENTER_FREQ: {
+ int32_t index;
+ if (OK != param.readFromParameter(&index)) {
+ break;
+ }
+
+ Parameter aidlParam = VALUE_OR_RETURN_STATUS(getAidlParameter(Equalizer::centerFreqMh));
+ const auto& freqs = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Equalizer, equalizer, Equalizer::centerFreqMh, std::vector<int>));
+ if ((size_t)index >= freqs.size()) {
+ ALOGE("%s index %d exceed size %zu", __func__, index, freqs.size());
+ break;
+ }
+ return param.writeToValue(&freqs[index]);
+ }
+ case EQ_PARAM_BAND_FREQ_RANGE: {
+ int32_t index;
+ if (OK != param.readFromParameter(&index)) {
+ break;
+ }
+
+ Parameter aidlParam =
+ VALUE_OR_RETURN_STATUS(getAidlParameter(Equalizer::bandFrequencies));
+ const auto& bands = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Equalizer, equalizer, Equalizer::bandFrequencies,
+ std::vector<Equalizer::BandFrequency>));
+ for (const auto& band : bands) {
+ if (band.index == index) {
+ return (OK == param.writeToValue(&band.minMh) &&
+ OK == param.writeToValue(&band.maxMh))
+ ? OK
+ : BAD_VALUE;
+ }
+ }
+ break;
+ }
+ case EQ_PARAM_GET_BAND: {
+ int32_t freq;
+ if (OK != param.readFromParameter(&freq)) {
+ break;
+ }
+
+ Parameter aidlParam =
+ VALUE_OR_RETURN_STATUS(getAidlParameter(Equalizer::bandFrequencies));
+ const auto& bands = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Equalizer, equalizer, Equalizer::bandFrequencies,
+ std::vector<Equalizer::BandFrequency>));
+ for (const auto& band : bands) {
+ if (freq >= band.minMh && freq <= band.maxMh) {
+ return param.writeToValue((uint16_t*)&band.index);
+ }
+ }
+ break;
+ }
+ case EQ_PARAM_CUR_PRESET: {
+ int32_t preset = VALUE_OR_RETURN_STATUS(getParameterPreset());
+ return param.writeToValue((uint16_t*)&preset);
+ }
+ case EQ_PARAM_GET_NUM_OF_PRESETS: {
+ Parameter aidlParam = VALUE_OR_RETURN_STATUS(getAidlParameter(Equalizer::presets));
+ const auto& presets = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Equalizer, equalizer, Equalizer::presets,
+ std::vector<Equalizer::Preset>));
+ uint16_t num = presets.size();
return param.writeToValue(&num);
}
- default:
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ case EQ_PARAM_GET_PRESET_NAME: {
+ std::string name = VALUE_OR_RETURN_STATUS(getParameterPresetName(param));
+ return param.writeToValue(name.c_str(), name.length());
+ }
+ case EQ_PARAM_PROPERTIES: {
+ int32_t preset = VALUE_OR_RETURN_STATUS(getParameterPreset());
+ if (OK != param.writeToValue((uint16_t*)&preset)) {
+ break;
+ }
+ Parameter aidlParam = VALUE_OR_RETURN_STATUS(getAidlParameter(Equalizer::bandLevels));
+ std::vector<Equalizer::BandLevel> bandLevels =
+ VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Equalizer, equalizer, Equalizer::bandLevels,
+ std::vector<Equalizer::BandLevel>));
+ uint16_t bands = bandLevels.size();
+ if (OK != param.writeToValue(&bands)) {
+ break;
+ }
+ std::sort(bandLevels.begin(), bandLevels.end(),
+ [](const auto& a, const auto& b) { return a.index < b.index; });
+ for (const auto& level : bandLevels) {
+ if (status_t status = param.writeToValue((uint16_t*)&level.levelMb); status != OK) {
+ return status;
+ }
+ }
+ return OK;
+ }
+ default: {
+ VENDOR_EXTENSION_GET_AND_RETURN(Equalizer, equalizer, param);
+ }
}
- return param.writeToValue(&value);
+
+ param.setStatus(BAD_VALUE);
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
}
} // namespace effect
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.h
index 0433965..2509c20 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.h
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.h
@@ -35,6 +35,8 @@
status_t getParameter(utils::EffectParamWriter& param) override;
aidl::ConversionResult<::aidl::android::hardware::audio::effect::Parameter> getAidlParameter(
::aidl::android::hardware::audio::effect::Equalizer::Tag tag);
+ aidl::ConversionResult<int32_t> getParameterPreset();
+ aidl::ConversionResult<std::string> getParameterPresetName(utils::EffectParamWriter& param);
};
} // namespace effect
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.cpp
index 9575e7d..763e197 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.cpp
@@ -33,9 +33,11 @@
namespace android {
namespace effect {
+using ::aidl::android::getParameterSpecificField;
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::HapticGenerator;
using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
@@ -76,9 +78,11 @@
break;
}
default: {
- // TODO: implement vendor extension parameters
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(HapticGenerator, hapticGenerator, vendor, ext);
+ break;
}
}
@@ -86,8 +90,8 @@
}
// No parameter to get for HapticGenerator
-status_t AidlConversionHapticGenerator::getParameter(EffectParamWriter& param __unused) {
- return OK;
+status_t AidlConversionHapticGenerator::getParameter(EffectParamWriter& param) {
+ VENDOR_EXTENSION_GET_AND_RETURN(HapticGenerator, hapticGenerator, param);
}
} // namespace effect
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp
index e3c898f..fa74a79 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp
@@ -37,6 +37,7 @@
using ::aidl::android::getParameterSpecificField;
using ::aidl::android::hardware::audio::effect::LoudnessEnhancer;
using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
@@ -56,9 +57,11 @@
break;
}
default: {
- // TODO: implement vendor extension parameters
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(LoudnessEnhancer, loudnessEnhancer, vendor, ext);
+ break;
}
}
return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
@@ -84,9 +87,7 @@
return param.writeToValue(&gain);
}
default: {
- // TODO: implement vendor extension parameters
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ VENDOR_EXTENSION_GET_AND_RETURN(LoudnessEnhancer, loudnessEnhancer, param);
}
}
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.cpp
index 69184cf..3d75e48 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.cpp
@@ -33,10 +33,11 @@
namespace android {
namespace effect {
-using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::getParameterSpecificField;
-using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::NoiseSuppression;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
@@ -61,9 +62,11 @@
break;
}
default: {
- // TODO: implement vendor extension parameters
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(NoiseSuppression, noiseSuppression, vendor, ext);
+ break;
}
}
return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
@@ -100,9 +103,7 @@
break;
}
default: {
- // TODO: implement vendor extension parameters
- ALOGW("%s unknown param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
+ VENDOR_EXTENSION_GET_AND_RETURN(NoiseSuppression, noiseSuppression, param);
}
}
return param.writeToValue(&value);
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.cpp
index 3e9bf4b..f04c118 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.cpp
@@ -38,6 +38,7 @@
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::Parameter;
using ::aidl::android::hardware::audio::effect::PresetReverb;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
@@ -59,7 +60,10 @@
aidlParam = MAKE_SPECIFIC_PARAMETER(PresetReverb, presetReverb, preset,
static_cast<PresetReverb::Presets>(value));
} else {
- // handle vendor extension
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(PresetReverb, presetReverb, vendor, ext);
}
return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
@@ -86,6 +90,7 @@
value = static_cast<uint16_t>(aidlPreset);
} else {
// handle vendor extension
+ VENDOR_EXTENSION_GET_AND_RETURN(PresetReverb, presetReverb, param);
}
return param.writeToValue(&value);
}
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.cpp
index 1dac479..9a759d2 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.cpp
@@ -20,6 +20,8 @@
#define LOG_TAG "AidlConversionSpatializer"
//#define LOG_NDEBUG 0
+#include <aidl/android/hardware/audio/effect/DefaultExtension.h>
+#include <aidl/android/hardware/audio/effect/VendorExtension.h>
#include <error/expected_utils.h>
#include <media/AidlConversionNdk.h>
#include <media/AidlConversionEffect.h>
@@ -34,34 +36,37 @@
namespace effect {
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::DefaultExtension;
using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
status_t AidlConversionSpatializer::setParameter(EffectParamReader& param) {
- uint32_t type = 0;
- uint16_t value = 0;
- if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
- OK != param.readFromParameter(&type) || OK != param.readFromValue(&value)) {
- ALOGE("%s invalid param %s", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
- Parameter aidlParam;
- // TODO
+ Parameter aidlParam = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_EffectParameterReader_ParameterExtension(param));
return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
}
status_t AidlConversionSpatializer::getParameter(EffectParamWriter& param) {
- uint32_t type = 0, value = 0;
- if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
- OK != param.readFromParameter(&type)) {
+ DefaultExtension defaultExt;
+ // read parameters into DefaultExtension vector<uint8_t>
+ if (OK != param.readFromParameter(defaultExt.bytes.data(), param.getParameterSize())) {
ALOGE("%s invalid param %s", __func__, param.toString().c_str());
param.setStatus(BAD_VALUE);
return BAD_VALUE;
}
- // TODO
- return param.writeToValue(&value);
+
+ VendorExtension idTag;
+ idTag.extension.setParcelable(defaultExt);
+ Parameter::Id id = UNION_MAKE(Parameter::Id, vendorEffectTag, idTag);
+ Parameter aidlParam;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ // copy the AIDL extension data back to effect_param_t
+ return VALUE_OR_RETURN_STATUS(
+ ::aidl::android::aidl2legacy_ParameterExtension_EffectParameterWriter(aidlParam,
+ param));
}
} // namespace effect
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.cpp
index 3baf72e..488d5cd 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.cpp
@@ -22,6 +22,7 @@
//#define LOG_NDEBUG 0
#include <aidl/android/hardware/audio/effect/DefaultExtension.h>
+#include <aidl/android/hardware/audio/effect/VendorExtension.h>
#include <error/expected_utils.h>
#include <media/AidlConversionNdk.h>
#include <media/AidlConversionEffect.h>
@@ -50,48 +51,21 @@
* pass down in Parameter as is.
*/
status_t AidlConversionVendorExtension::setParameter(EffectParamReader& param) {
- size_t len = param.getValueSize();
- DefaultExtension ext;
- ext.bytes.resize(len);
- if (OK != param.readFromValue(ext.bytes.data(), len)) {
- ALOGE("%s read value from param %s failed", __func__, param.toString().c_str());
- return BAD_VALUE;
- }
- VendorExtension effectParam;
- effectParam.extension.setParcelable(ext);
- Parameter aidlParam = UNION_MAKE(Parameter, specific,
- UNION_MAKE(Parameter::Specific, vendorEffect, effectParam));
+ Parameter aidlParam = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_EffectParameterReader_ParameterExtension(param));
return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
}
status_t AidlConversionVendorExtension::getParameter(EffectParamWriter& param) {
- int32_t tag;
- if (OK != param.readFromParameter(&tag)) {
- ALOGE("%s invalid param %s", __func__, param.toString().c_str());
- param.setStatus(BAD_VALUE);
- return BAD_VALUE;
- }
-
+ VendorExtension extId = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Param_VendorExtension(param));
+ Parameter::Id id = UNION_MAKE(Parameter::Id, vendorEffectTag, extId);
Parameter aidlParam;
- Parameter::Id id = UNION_MAKE(Parameter::Id, vendorEffectTag, tag /* parameter tag */);
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
- VendorExtension effectParam = VALUE_OR_RETURN_STATUS(
- (::aidl::android::getParameterSpecific<Parameter, VendorExtension,
- Parameter::Specific::vendorEffect>(aidlParam)));
- std::optional<DefaultExtension> ext;
- if (STATUS_OK != effectParam.extension.getParcelable(&ext) || !ext.has_value()) {
- ALOGE("%s get extension parcelable failed", __func__);
- param.setStatus(BAD_VALUE);
- return BAD_VALUE;
- }
- const auto& extBytes = ext.value().bytes;
- if (param.getValueSize() < extBytes.size()) {
- ALOGE("%s extension return data %zu exceed vsize %zu", __func__, extBytes.size(),
- param.getValueSize());
- param.setStatus(BAD_VALUE);
- return BAD_VALUE;
- }
- return param.writeToValue(extBytes.data(), extBytes.size());
+ // copy the AIDL extension data back to effect_param_t
+ return VALUE_OR_RETURN_STATUS(
+ ::aidl::android::aidl2legacy_ParameterExtension_EffectParameterWriter(aidlParam,
+ param));
}
} // namespace effect
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.cpp
index 482114d..b34904b 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.cpp
@@ -21,10 +21,12 @@
//#define LOG_NDEBUG 0
#include <error/expected_utils.h>
+#include <media/AidlConversionCppNdk.h>
#include <media/AidlConversionNdk.h>
#include <media/AidlConversionEffect.h>
#include <media/audiohal/AudioEffectUuid.h>
-#include <system/audio_effects/effect_spatializer.h>
+#include <system/audio_effects/aidl_effects_utils.h>
+#include <system/audio_effects/effect_virtualizer.h>
#include <utils/Log.h>
@@ -34,34 +36,129 @@
namespace effect {
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::getParameterSpecificField;
using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::Range;
+using ::aidl::android::hardware::audio::effect::Virtualizer;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
+using ::aidl::android::media::audio::common::AudioDeviceDescription;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
status_t AidlConversionVirtualizer::setParameter(EffectParamReader& param) {
uint32_t type = 0;
- uint16_t value = 0;
- if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
- OK != param.readFromParameter(&type) || OK != param.readFromValue(&value)) {
+ if (OK != param.readFromParameter(&type)) {
ALOGE("%s invalid param %s", __func__, param.toString().c_str());
return BAD_VALUE;
}
Parameter aidlParam;
- // TODO
+ switch (type) {
+ case VIRTUALIZER_PARAM_STRENGTH: {
+ int16_t strength = 0;
+ if (OK != param.readFromValue(&strength)) {
+ ALOGE("%s invalid param %s for type %d", __func__, param.toString().c_str(), type);
+ return BAD_VALUE;
+ }
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Virtualizer, virtualizer, strengthPm, strength);
+ break;
+ }
+ case VIRTUALIZER_PARAM_FORCE_VIRTUALIZATION_MODE: {
+ audio_devices_t deviceType;
+ if (OK != param.readFromValue(&deviceType)) {
+ ALOGE("%s invalid param %s for type %d", __func__, param.toString().c_str(), type);
+ return BAD_VALUE;
+ }
+ AudioDeviceDescription deviceDesc = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_devices_t_AudioDeviceDescription(
+ deviceType));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Virtualizer, virtualizer, device, deviceDesc);
+ break;
+ }
+ default: {
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Virtualizer, virtualizer, vendor, ext);
+ break;
+ }
+ }
return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
}
status_t AidlConversionVirtualizer::getParameter(EffectParamWriter& param) {
- uint32_t type = 0, value = 0;
- if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
- OK != param.readFromParameter(&type)) {
+ uint32_t type = 0;
+ if (OK != param.readFromParameter(&type)) {
ALOGE("%s invalid param %s", __func__, param.toString().c_str());
param.setStatus(BAD_VALUE);
return BAD_VALUE;
}
- // TODO
- return param.writeToValue(&value);
+ Parameter aidlParam;
+ switch (type) {
+ case VIRTUALIZER_PARAM_STRENGTH_SUPPORTED: {
+ // an invalid range indicates not setting support for this parameter
+ uint32_t support =
+ ::aidl::android::hardware::audio::effect::isRangeValid<Range::Tag::virtualizer>(
+ Virtualizer::strengthPm, mDesc.capability);
+ return param.writeToValue(&support);
+ }
+ case VIRTUALIZER_PARAM_STRENGTH: {
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(Virtualizer, virtualizerTag,
+ Virtualizer::strengthPm);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ int16_t strength = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Virtualizer, virtualizer, Virtualizer::strengthPm, int32_t));
+ return param.writeToValue(&strength);
+ }
+ case VIRTUALIZER_PARAM_VIRTUAL_SPEAKER_ANGLES: {
+ audio_channel_mask_t mask;
+ audio_devices_t device;
+ if (OK != param.readFromParameter(&mask) || OK != param.readFromParameter(&device)) {
+ ALOGW("%s illegal param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Virtualizer::SpeakerAnglesPayload payload = {
+ .layout = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_channel_mask_t_AudioChannelLayout(
+ mask, false)),
+ .device = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_devices_t_AudioDeviceDescription(
+ device))};
+ Virtualizer::Id vId = UNION_MAKE(Virtualizer::Id, speakerAnglesPayload, payload);
+ Parameter::Id id = UNION_MAKE(Parameter::Id, virtualizerTag, vId);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ const auto& angles = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Virtualizer, virtualizer, Virtualizer::speakerAngles,
+ std::vector<Virtualizer::ChannelAngle>));
+ for (const auto& angle : angles) {
+ const audio_channel_mask_t chMask = ::aidl::android::
+ aidl2legacy_AudioChannelLayout_layout_audio_channel_mask_t_bits(
+ angle.channel, false);
+ ALOGW("%s aidl %d ch %d", __func__, angle.channel, chMask);
+ if (OK != param.writeToValue(&chMask) ||
+ OK != param.writeToValue(&angle.azimuthDegree) ||
+ OK != param.writeToValue(&angle.elevationDegree)) {
+ ALOGW("%s can't write angles to param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+ return OK;
+ }
+ case VIRTUALIZER_PARAM_VIRTUALIZATION_MODE: {
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(Virtualizer, virtualizerTag,
+ Virtualizer::device);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ AudioDeviceDescription device = VALUE_OR_RETURN_STATUS(
+ GET_PARAMETER_SPECIFIC_FIELD(aidlParam, Virtualizer, virtualizer,
+ Virtualizer::device, AudioDeviceDescription));
+ const audio_devices_t deviceType = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::aidl2legacy_AudioDeviceDescription_audio_devices_t(device));
+ return param.writeToValue(&deviceType);
+ }
+ default: {
+ VENDOR_EXTENSION_GET_AND_RETURN(Virtualizer, virtualizer, param);
+ }
+ }
}
} // namespace effect
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.cpp
index 9ed601f..72659ed 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.cpp
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.cpp
@@ -14,6 +14,7 @@
* limitations under the License.
*/
+#include <cstddef>
#include <cstdint>
#include <cstring>
#include <optional>
@@ -33,35 +34,144 @@
namespace android {
namespace effect {
+using ::aidl::android::getParameterSpecificField;
using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::VendorExtension;
+using ::aidl::android::hardware::audio::effect::Visualizer;
using ::android::status_t;
using utils::EffectParamReader;
using utils::EffectParamWriter;
status_t AidlConversionVisualizer::setParameter(EffectParamReader& param) {
- uint32_t type = 0;
- uint16_t value = 0;
- if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ uint32_t type = 0, value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
OK != param.readFromParameter(&type) || OK != param.readFromValue(&value)) {
ALOGE("%s invalid param %s", __func__, param.toString().c_str());
return BAD_VALUE;
}
Parameter aidlParam;
- // TODO
+ switch (type) {
+ case VISUALIZER_PARAM_CAPTURE_SIZE: {
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Visualizer, visualizer, captureSamples, value);
+ break;
+ }
+ case VISUALIZER_PARAM_SCALING_MODE: {
+ Visualizer::ScalingMode mode = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_Parameter_Visualizer_uint32_ScalingMode(value));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Visualizer, visualizer, scalingMode, mode);
+ break;
+ }
+ case VISUALIZER_PARAM_LATENCY: {
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Visualizer, visualizer, latencyMs, value);
+ break;
+ }
+ case VISUALIZER_PARAM_MEASUREMENT_MODE: {
+ Visualizer::MeasurementMode mode = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_Parameter_Visualizer_uint32_MeasurementMode(value));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Visualizer, visualizer, measurementMode, mode);
+ break;
+ }
+ default: {
+ // for vendor extension, copy data area to the DefaultExtension, parameter ignored
+ VendorExtension ext = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_EffectParameterReader_Data_VendorExtension(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Visualizer, visualizer, vendor, ext);
+ break;
+ }
+ }
return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
}
status_t AidlConversionVisualizer::getParameter(EffectParamWriter& param) {
uint32_t type = 0, value = 0;
- if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(int32_t)) ||
OK != param.readFromParameter(&type)) {
ALOGE("%s invalid param %s", __func__, param.toString().c_str());
param.setStatus(BAD_VALUE);
return BAD_VALUE;
}
- // TODO
- return param.writeToValue(&value);
+ Parameter aidlParam;
+ switch (type) {
+ case VISUALIZER_PARAM_CAPTURE_SIZE: {
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(Visualizer, visualizerTag,
+ Visualizer::captureSamples);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ value = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Visualizer, visualizer, Visualizer::captureSamples, int32_t));
+ mCaptureSize = value;
+ return param.writeToValue(&value);
+ }
+ case VISUALIZER_PARAM_SCALING_MODE: {
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(Visualizer, visualizerTag,
+ Visualizer::scalingMode);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ Visualizer::ScalingMode mode = VALUE_OR_RETURN_STATUS(
+ GET_PARAMETER_SPECIFIC_FIELD(aidlParam, Visualizer, visualizer,
+ Visualizer::scalingMode, Visualizer::ScalingMode));
+ value = VALUE_OR_RETURN_STATUS(
+ aidl::android::aidl2legacy_Parameter_Visualizer_ScalingMode_uint32(mode));
+ return param.writeToValue(&value);
+ }
+ case VISUALIZER_PARAM_LATENCY: {
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(Visualizer, visualizerTag,
+ Visualizer::latencyMs);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ value = (int32_t)VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Visualizer, visualizer, Visualizer::latencyMs, int32_t));
+ return param.writeToValue(&value);
+ }
+ case VISUALIZER_PARAM_MEASUREMENT_MODE: {
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(Visualizer, visualizerTag,
+ Visualizer::measurementMode);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ Visualizer::MeasurementMode mode = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Visualizer, visualizer, Visualizer::measurementMode,
+ Visualizer::MeasurementMode));
+ value = VALUE_OR_RETURN_STATUS(
+ aidl::android::aidl2legacy_Parameter_Visualizer_MeasurementMode_uint32(mode));
+ return param.writeToValue(&value);
+ }
+ default: {
+ VENDOR_EXTENSION_GET_AND_RETURN(Visualizer, visualizer, param);
+ }
+ }
+}
+
+status_t AidlConversionVisualizer::visualizerCapture(uint32_t* replySize, void* pReplyData) {
+ if (!replySize || !pReplyData || *replySize != mCaptureSize) {
+ ALOGE("%s illegal param replySize %p pReplyData %p", __func__, replySize, pReplyData);
+ return BAD_VALUE;
+ }
+
+ Parameter aidlParam;
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(Visualizer, visualizerTag,
+ Visualizer::captureSampleBuffer);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ const auto& samples = VALUE_OR_RETURN_STATUS(
+ GET_PARAMETER_SPECIFIC_FIELD(aidlParam, Visualizer, visualizer,
+ Visualizer::captureSampleBuffer, std::vector<uint8_t>));
+ size_t len = std::min((size_t)*replySize, samples.size());
+ std::memcpy(pReplyData, samples.data(), *replySize = len);
+ return OK;
+}
+
+status_t AidlConversionVisualizer::visualizerMeasure(uint32_t* replySize, void* pReplyData) {
+ if (!replySize || !pReplyData || *replySize != 2 * sizeof(int32_t)) {
+ ALOGE("%s illegal param replySize %p pReplyData %p", __func__, replySize, pReplyData);
+ return BAD_VALUE;
+ }
+
+ Parameter aidlParam;
+ Parameter::Id id =
+ MAKE_SPECIFIC_PARAMETER_ID(Visualizer, visualizerTag, Visualizer::measurement);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ const auto& measure = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Visualizer, visualizer, Visualizer::measurement, Visualizer::Measurement));
+ int32_t* reply = (int32_t *) pReplyData;
+ *reply++ = measure.rms;
+ *reply = measure.peak;
+ return OK;
}
} // namespace effect
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.h
index a7e4ea1..e380bc6 100644
--- a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.h
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.h
@@ -32,8 +32,11 @@
~AidlConversionVisualizer() {}
private:
+ uint32_t mCaptureSize = 0;
status_t setParameter(utils::EffectParamReader& param) override;
status_t getParameter(utils::EffectParamWriter& param) override;
+ status_t visualizerCapture(uint32_t* replySize, void* pReplyData) override;
+ status_t visualizerMeasure(uint32_t* replySize, void* pReplyData) override;
};
} // namespace effect
diff --git a/media/libaudiohal/include/media/audiohal/AudioEffectUuid.h b/media/libaudiohal/include/media/audiohal/AudioEffectUuid.h
index b21e4c9..5d491a4 100644
--- a/media/libaudiohal/include/media/audiohal/AudioEffectUuid.h
+++ b/media/libaudiohal/include/media/audiohal/AudioEffectUuid.h
@@ -29,6 +29,12 @@
0x11e0,
0xbd61,
{0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
+// ae3c653b-be18-4ab8-8938-418f0a7f06ac
+static const AudioUuid kAutomaticGainControl1TypeUUID = {static_cast<int32_t>(0xae3c653b),
+ 0xbe18,
+ 0x4ab8,
+ 0x8938,
+ {0x41, 0x8f, 0x0a, 0x7f, 0x06, 0xac}};
// 0xae3c653b-be18-4ab8-8938-418f0a7f06ac
static const AudioUuid kAutomaticGainControl2TypeUUID = {static_cast<int32_t>(0xae3c653b),
0xbe18,
@@ -42,11 +48,11 @@
0xa0fc,
{0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
// fa81862a-588b-11ed-9b6a-0242ac120002
-static const AudioUuid kDownmixTypeUUID = {static_cast<int32_t>(0xfa81862a),
- 0x588b,
- 0x11ed,
- 0x9b6a,
- {0x02, 0x42, 0xac, 0x12, 0x00, 0x02}};
+static const AudioUuid kDownmixTypeUUID = {static_cast<int32_t>(0x381e49cc),
+ 0xa858,
+ 0x4aa2,
+ 0x87f6,
+ {0xe8, 0x38, 0x8e, 0x76, 0x01, 0xb2}};
// 7261676f-6d75-7369-6364-28e2fd3ac39e
static const AudioUuid kDynamicsProcessingTypeUUID = {static_cast<int32_t>(0x7261676f),
0x6d75,
@@ -101,12 +107,12 @@
0x11db,
0x8577,
{0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
-// fa819f3e-588b-11ed-9b6a-0242ac120002
-static const AudioUuid kVisualizerTypeUUID = {static_cast<int32_t>(0xfa819f3e),
- 0x588b,
- 0x11ed,
- 0x9b6a,
- {0x02, 0x42, 0xac, 0x12, 0x00, 0x02}};
+// e46b26a0-dddd-11db-8afd-0002a5d5c51b
+static const AudioUuid kVisualizerTypeUUID = {static_cast<int32_t>(0xe46b26a0),
+ 0xdddd,
+ 0x11db,
+ 0x8afd,
+ {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
// fa81a2b8-588b-11ed-9b6a-0242ac120002
static const AudioUuid kVolumeTypeUUID = {static_cast<int32_t>(0xfa81a2b8),
0x588b,
diff --git a/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp b/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
index 71c7586..c076ccc 100644
--- a/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
+++ b/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
@@ -27,6 +27,7 @@
#include <media/audiohal/EffectsFactoryHalInterface.h>
#include <system/audio_effects/audio_effects_utils.h>
#include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_agc.h>
#include <system/audio_effects/effect_agc2.h>
#include <system/audio_effects/effect_bassboost.h>
#include <system/audio_effects/effect_downmix.h>
@@ -157,6 +158,9 @@
std::make_tuple(FX_IID_AEC,
createEffectParamCombination(AEC_PARAM_ECHO_DELAY, 0xff /* echoDelayMs */,
sizeof(int32_t) /* returnValueSize */)),
+ std::make_tuple(FX_IID_AGC,
+ createEffectParamCombination(AGC_PARAM_TARGET_LEVEL, 20 /* targetLevel */,
+ sizeof(int16_t) /* returnValueSize */)),
std::make_tuple(FX_IID_AGC2, createEffectParamCombination(
AGC2_PARAM_FIXED_DIGITAL_GAIN, 15 /* digitalGainDb */,
sizeof(int32_t) /* returnValueSize */)),
@@ -165,7 +169,7 @@
sizeof(int32_t) /* returnValueSize */)),
std::make_tuple(EFFECT_UIID_DOWNMIX,
createEffectParamCombination(DOWNMIX_PARAM_TYPE, DOWNMIX_TYPE_FOLD,
- sizeof(int32_t) /* returnValueSize */)),
+ sizeof(int16_t) /* returnValueSize */)),
std::make_tuple(SL_IID_DYNAMICSPROCESSING,
createEffectParamCombination(
std::array<uint32_t, 2>({DP_PARAM_INPUT_GAIN, 0 /* channel */}),
@@ -264,7 +268,8 @@
if (mCombination->valueSize) {
std::vector<uint8_t> response(mCombination->valueSize);
EXPECT_EQ(OK, parameterGet.readFromValue(response.data(), mCombination->valueSize))
- << parameterGet.toString();
+ << " try get valueSize " << mCombination->valueSize << " from "
+ << parameterGet.toString();
EXPECT_EQ(response, mExpectedValue);
}
}
diff --git a/media/libeffects/downmix/aidl/DownmixContext.cpp b/media/libeffects/downmix/aidl/DownmixContext.cpp
index 43bfeed..ac893d8 100644
--- a/media/libeffects/downmix/aidl/DownmixContext.cpp
+++ b/media/libeffects/downmix/aidl/DownmixContext.cpp
@@ -21,8 +21,8 @@
#include "DownmixContext.h"
using aidl::android::hardware::audio::effect::IEffect;
-using ::aidl::android::media::audio::common::AudioChannelLayout;
-using ::android::hardware::audio::common::getChannelCount;
+using aidl::android::hardware::audio::common::getChannelCount;
+using aidl::android::media::audio::common::AudioChannelLayout;
namespace aidl::android::hardware::audio::effect {
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.cpp b/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.cpp
index 4af5fd8..ca578eb 100644
--- a/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.cpp
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.cpp
@@ -221,7 +221,7 @@
EX_ILLEGAL_ARGUMENT, "setInputGainFailed");
return ndk::ScopedAStatus::ok();
}
- case DynamicsProcessing::vendorExtension: {
+ case DynamicsProcessing::vendor: {
LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
EX_ILLEGAL_ARGUMENT, "DPVendorExtensionTagNotSupported");
@@ -301,7 +301,7 @@
mContext->getInputGain()));
return ndk::ScopedAStatus::ok();
}
- case DynamicsProcessing::vendorExtension: {
+ case DynamicsProcessing::vendor: {
LOG(ERROR) << __func__ << " wrong vendor tag in CommonTag: " << toString(tag);
return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
EX_ILLEGAL_ARGUMENT, "DPVendorExtensionTagInWrongId");
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
index 7978cc5..69ff522 100644
--- a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
@@ -64,6 +64,7 @@
RetCode DynamicsProcessingContext::setCommon(const Parameter::Common& common) {
mCommon = common;
init();
+ LOG(INFO) << __func__ << common.toString();
return RetCode::SUCCESS;
}
@@ -287,8 +288,8 @@
void DynamicsProcessingContext::init() {
std::lock_guard lg(mMutex);
mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
- mChannelCount =
- ::android::hardware::audio::common::getChannelCount(mCommon.input.base.channelMask);
+ mChannelCount = ::aidl::android::hardware::audio::common::getChannelCount(
+ mCommon.input.base.channelMask);
}
dp_fx::DPChannel* DynamicsProcessingContext::getChannel_l(int channel) {
diff --git a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
index 8ed579b..de44e05 100644
--- a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
+++ b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "AHAL_HapticGeneratorContext"
#include <Utils.h>
+#include <android-base/logging.h>
#include <android-base/parsedouble.h>
#include <android-base/properties.h>
@@ -193,9 +194,9 @@
mParams.mVibratorInfo.resonantFrequencyHz = DEFAULT_RESONANT_FREQUENCY;
mParams.mVibratorInfo.qFactor = DEFAULT_BSF_ZERO_Q;
- mParams.mAudioChannelCount = ::android::hardware::audio::common::getChannelCount(
+ mParams.mAudioChannelCount = ::aidl::android::hardware::audio::common::getChannelCount(
inputChMask, ~media::audio::common::AudioChannelLayout::LAYOUT_HAPTIC_AB);
- mParams.mHapticChannelCount = ::android::hardware::audio::common::getChannelCount(
+ mParams.mHapticChannelCount = ::aidl::android::hardware::audio::common::getChannelCount(
outputChMask, media::audio::common::AudioChannelLayout::LAYOUT_HAPTIC_AB);
LOG_ALWAYS_FATAL_IF(mParams.mHapticChannelCount > 2, "haptic channel count is too large");
for (size_t i = 0; i < mParams.mHapticChannelCount; ++i) {
diff --git a/media/libeffects/lvm/tests/Android.bp b/media/libeffects/lvm/tests/Android.bp
index de6d30b..0568fbd 100644
--- a/media/libeffects/lvm/tests/Android.bp
+++ b/media/libeffects/lvm/tests/Android.bp
@@ -14,10 +14,6 @@
defaults: [
"libeffects-test-defaults",
],
- // TODO(b/269868814)
- test_options: {
- unit_test: false,
- },
srcs: [
"EffectReverbTest.cpp",
],
diff --git a/media/libeffects/lvm/tests/EffectReverbTest.cpp b/media/libeffects/lvm/tests/EffectReverbTest.cpp
index 59453eb..aaac782 100644
--- a/media/libeffects/lvm/tests/EffectReverbTest.cpp
+++ b/media/libeffects/lvm/tests/EffectReverbTest.cpp
@@ -33,6 +33,27 @@
constexpr size_t kNumEffectUuids = std::size(kEffectUuids);
+static constexpr audio_channel_mask_t kChMasks[] = {
+ AUDIO_CHANNEL_OUT_MONO, AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_CHANNEL_OUT_2POINT1, AUDIO_CHANNEL_OUT_5POINT1,
+ AUDIO_CHANNEL_OUT_7POINT1POINT4, AUDIO_CHANNEL_INDEX_MASK_23,
+ AUDIO_CHANNEL_OUT_22POINT2,
+};
+
+static constexpr size_t kNumChMasks = std::size(kChMasks);
+
+static constexpr size_t kSampleRates[] = {8000, 11025, 44100, 48000, 192000};
+
+static constexpr size_t kNumSampleRates = std::size(kSampleRates);
+
+static constexpr size_t kFrameCounts[] = {4, 512};
+
+static constexpr size_t kNumFrameCounts = std::size(kFrameCounts);
+
+static constexpr size_t kLoopCounts[] = {1, 4};
+
+static constexpr size_t kNumLoopCounts = std::size(kLoopCounts);
+
static bool isAuxMode(const effect_uuid_t* uuid) {
// Update this, if the order of effects in kEffectUuids is updated
return (uuid == &kEffectUuids[2] || uuid == &kEffectUuids[3]);
@@ -50,15 +71,15 @@
class SingleEffectTest : public ::testing::TestWithParam<SingleEffectTestParam> {
public:
SingleEffectTest()
- : mSampleRate(EffectTestHelper::kSampleRates[std::get<1>(GetParam())]),
- mFrameCount(EffectTestHelper::kFrameCounts[std::get<2>(GetParam())]),
- mLoopCount(EffectTestHelper::kLoopCounts[std::get<3>(GetParam())]),
+ : mSampleRate(kSampleRates[std::get<1>(GetParam())]),
+ mFrameCount(kFrameCounts[std::get<2>(GetParam())]),
+ mLoopCount(kLoopCounts[std::get<3>(GetParam())]),
mTotalFrameCount(mFrameCount * mLoopCount),
mUuid(&kEffectUuids[std::get<4>(GetParam())]),
mInChMask(isAuxMode(mUuid) ? AUDIO_CHANNEL_OUT_MONO
- : EffectTestHelper::kChMasks[std::get<0>(GetParam())]),
+ : kChMasks[std::get<0>(GetParam())]),
mInChannelCount(audio_channel_count_from_out_mask(mInChMask)),
- mOutChMask(EffectTestHelper::kChMasks[std::get<0>(GetParam())]),
+ mOutChMask(kChMasks[std::get<0>(GetParam())]),
mOutChannelCount(audio_channel_count_from_out_mask(mOutChMask)),
mPreset(kPresets[std::get<5>(GetParam())]) {}
@@ -100,10 +121,10 @@
INSTANTIATE_TEST_SUITE_P(
EffectReverbTestAll, SingleEffectTest,
- ::testing::Combine(::testing::Range(0, (int)EffectTestHelper::kNumChMasks),
- ::testing::Range(0, (int)EffectTestHelper::kNumSampleRates),
- ::testing::Range(0, (int)EffectTestHelper::kNumFrameCounts),
- ::testing::Range(0, (int)EffectTestHelper::kNumLoopCounts),
+ ::testing::Combine(::testing::Range(0, (int)kNumChMasks),
+ ::testing::Range(0, (int)kNumSampleRates),
+ ::testing::Range(0, (int)kNumFrameCounts),
+ ::testing::Range(0, (int)kNumLoopCounts),
::testing::Range(0, (int)kNumEffectUuids),
::testing::Range(0, (int)kNumPresets)));
@@ -112,9 +133,9 @@
: public ::testing::TestWithParam<SingleEffectComparisonTestParam> {
public:
SingleEffectComparisonTest()
- : mSampleRate(EffectTestHelper::kSampleRates[std::get<0>(GetParam())]),
- mFrameCount(EffectTestHelper::kFrameCounts[std::get<1>(GetParam())]),
- mLoopCount(EffectTestHelper::kLoopCounts[std::get<2>(GetParam())]),
+ : mSampleRate(kSampleRates[std::get<0>(GetParam())]),
+ mFrameCount(kFrameCounts[std::get<1>(GetParam())]),
+ mLoopCount(kLoopCounts[std::get<2>(GetParam())]),
mTotalFrameCount(mFrameCount * mLoopCount),
mUuid(&kEffectUuids[std::get<3>(GetParam())]),
mPreset(kPresets[std::get<4>(GetParam())]) {}
@@ -173,7 +194,7 @@
std::vector<int16_t> monoRefI16(mTotalFrameCount);
memcpy_to_i16_from_float(monoRefI16.data(), monoOutput.data(), mTotalFrameCount);
- for (size_t outChMask : EffectTestHelper::kChMasks) {
+ for (size_t outChMask : kChMasks) {
size_t outChannelCount = audio_channel_count_from_out_mask(outChMask);
size_t inChMask = isAuxMode(mUuid) ? AUDIO_CHANNEL_OUT_MONO : outChMask;
@@ -225,9 +246,9 @@
INSTANTIATE_TEST_SUITE_P(
EffectReverbTestAll, SingleEffectComparisonTest,
- ::testing::Combine(::testing::Range(0, (int)EffectTestHelper::kNumSampleRates),
- ::testing::Range(0, (int)EffectTestHelper::kNumFrameCounts),
- ::testing::Range(0, (int)EffectTestHelper::kNumLoopCounts),
+ ::testing::Combine(::testing::Range(0, (int)kNumSampleRates),
+ ::testing::Range(0, (int)kNumFrameCounts),
+ ::testing::Range(0, (int)kNumLoopCounts),
::testing::Range(0, (int)kNumEffectUuids),
::testing::Range(0, (int)kNumPresets)));
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
index 6124356..d026e2b 100644
--- a/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
@@ -15,7 +15,9 @@
*/
#include <cstddef>
+
#define LOG_TAG "BundleContext"
+#include <android-base/logging.h>
#include <Utils.h>
#include "BundleContext.h"
@@ -690,7 +692,7 @@
std::vector<Virtualizer::ChannelAngle> BundleContext::getSpeakerAngles(
const Virtualizer::SpeakerAnglesPayload payload) {
std::vector<Virtualizer::ChannelAngle> angles;
- auto chCount = ::android::hardware::audio::common::getChannelCount(payload.layout);
+ auto chCount = ::aidl::android::hardware::audio::common::getChannelCount(payload.layout);
RETURN_VALUE_IF(!isConfigSupportedVirtualizer(chCount, payload.device), angles,
"payloadNotSupported");
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h b/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h
index 4652d8d..520371b 100644
--- a/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleTypes.h
@@ -70,12 +70,11 @@
const std::vector<Range::EqualizerRange> kEqRanges = {
MAKE_RANGE(Equalizer, preset, 0, MAX_NUM_PRESETS - 1),
- MAKE_RANGE(
- Equalizer, bandLevels,
- std::vector<Equalizer::BandLevel>{Equalizer::BandLevel(
- {.index = 0, .levelMb = std::numeric_limits<int>::min()})},
- std::vector<Equalizer::BandLevel>{Equalizer::BandLevel(
- {.index = MAX_NUM_BANDS - 1, .levelMb = std::numeric_limits<int>::max()})}),
+ MAKE_RANGE(Equalizer, bandLevels,
+ std::vector<Equalizer::BandLevel>{
+ Equalizer::BandLevel({.index = 0, .levelMb = -15})},
+ std::vector<Equalizer::BandLevel>{
+ Equalizer::BandLevel({.index = MAX_NUM_BANDS - 1, .levelMb = 15})}),
/* capability definition */
MAKE_RANGE(Equalizer, bandFrequencies, kEqBandFrequency, kEqBandFrequency),
MAKE_RANGE(Equalizer, presets, kEqPresets, kEqPresets),
diff --git a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
index dc52c16..b1a9ef0 100644
--- a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
+++ b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
@@ -296,11 +296,19 @@
eqParam.set<Equalizer::preset>(mContext->getEqualizerPreset());
break;
}
+ case Equalizer::bandFrequencies: {
+ eqParam.set<Equalizer::bandFrequencies>(lvm::kEqBandFrequency);
+ break;
+ }
+ case Equalizer::presets: {
+ eqParam.set<Equalizer::presets>(lvm::kEqPresets);
+ break;
+ }
case Equalizer::centerFreqMh: {
eqParam.set<Equalizer::centerFreqMh>(mContext->getEqualizerCenterFreqs());
break;
}
- default: {
+ case Equalizer::vendor: {
LOG(ERROR) << __func__ << " not handled tag: " << toString(tag);
return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
EX_ILLEGAL_ARGUMENT, "unsupportedTag");
@@ -365,8 +373,9 @@
ndk::ScopedAStatus EffectBundleAidl::getParameterVirtualizer(const Virtualizer::Id& id,
Parameter::Specific* specific) {
- RETURN_IF(id.getTag() != Virtualizer::Id::commonTag, EX_ILLEGAL_ARGUMENT,
- "VirtualizerTagNotSupported");
+ RETURN_IF((id.getTag() != Virtualizer::Id::commonTag) &&
+ (id.getTag() != Virtualizer::Id::speakerAnglesPayload),
+ EX_ILLEGAL_ARGUMENT, "VirtualizerTagNotSupported");
RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
Virtualizer vrParam;
diff --git a/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp
index 87aa12b..79e67f2 100644
--- a/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp
+++ b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp
@@ -15,7 +15,9 @@
*/
#include <cstddef>
+
#define LOG_TAG "ReverbContext"
+#include <android-base/logging.h>
#include <Utils.h>
#include "ReverbContext.h"
@@ -301,7 +303,7 @@
/* General parameters */
params.OperatingMode = LVM_MODE_ON;
params.SampleRate = LVM_FS_44100;
- params.SourceFormat = (::android::hardware::audio::common::getChannelCount(
+ params.SourceFormat = (::aidl::android::hardware::audio::common::getChannelCount(
mCommon.input.base.channelMask) == 1
? LVM_MONO
: LVM_STEREO);
@@ -363,10 +365,10 @@
LOG(DEBUG) << __func__ << " start processing";
std::lock_guard lg(mMutex);
- int channels =
- ::android::hardware::audio::common::getChannelCount(mCommon.input.base.channelMask);
- int outChannels =
- ::android::hardware::audio::common::getChannelCount(mCommon.output.base.channelMask);
+ int channels = ::aidl::android::hardware::audio::common::getChannelCount(
+ mCommon.input.base.channelMask);
+ int outChannels = ::aidl::android::hardware::audio::common::getChannelCount(
+ mCommon.output.base.channelMask);
int frameCount = mCommon.input.frameCount;
// Reverb only effects the stereo channels in multichannel source.
diff --git a/media/libeffects/preprocessing/Android.bp b/media/libeffects/preprocessing/Android.bp
index c6e036a..d018c47 100644
--- a/media/libeffects/preprocessing/Android.bp
+++ b/media/libeffects/preprocessing/Android.bp
@@ -58,3 +58,39 @@
"libwebrtc_absl_headers",
],
}
+
+cc_library_shared {
+ name: "libpreprocessingaidl",
+ srcs: [
+ "aidl/PreProcessingContext.cpp",
+ "aidl/EffectPreProcessing.cpp",
+ ":effectCommonFile",
+ ],
+ defaults: [
+ "aidlaudioservice_defaults",
+ "latest_android_hardware_audio_effect_ndk_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
+ ],
+ local_include_dirs: ["aidl"],
+ shared_libs: [
+ "liblog",
+ "libutils",
+ "libaudioutils",
+ ],
+ static_libs: [
+ "webrtc_audio_processing",
+ ],
+ header_libs: [
+ "libwebrtc_absl_headers",
+ "libaudioeffects",
+ "libhardware_headers",
+ ],
+ cflags: [
+ "-Wthread-safety",
+ "-Wno-unused-parameter",
+ ],
+ relative_install_path: "soundfx",
+ visibility: [
+ "//hardware/interfaces/audio/aidl/default",
+ ],
+}
diff --git a/media/libeffects/preprocessing/aidl/EffectPreProcessing.cpp b/media/libeffects/preprocessing/aidl/EffectPreProcessing.cpp
new file mode 100644
index 0000000..b9df915
--- /dev/null
+++ b/media/libeffects/preprocessing/aidl/EffectPreProcessing.cpp
@@ -0,0 +1,453 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectPreProcessing"
+#include <algorithm>
+#include <unordered_set>
+
+#include <Utils.h>
+#include <android-base/logging.h>
+#include <fmq/AidlMessageQueue.h>
+
+#include "EffectPreProcessing.h"
+
+using aidl::android::hardware::audio::effect::Descriptor;
+using aidl::android::hardware::audio::effect::EffectPreProcessing;
+using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::kAcousticEchoCancelerSwImplUUID;
+using aidl::android::hardware::audio::effect::kAutomaticGainControlV1SwImplUUID;
+using aidl::android::hardware::audio::effect::kAutomaticGainControlV2SwImplUUID;
+using aidl::android::hardware::audio::effect::kNoiseSuppressionSwImplUUID;
+using aidl::android::hardware::audio::effect::State;
+using aidl::android::media::audio::common::AudioUuid;
+
+bool isPreProcessingUuidSupported(const AudioUuid& uuid) {
+ return (uuid == kAcousticEchoCancelerSwImplUUID || uuid == kAutomaticGainControlV1SwImplUUID ||
+ uuid == kAutomaticGainControlV2SwImplUUID || uuid == kNoiseSuppressionSwImplUUID);
+}
+
+extern "C" binder_exception_t createEffect(const AudioUuid* uuid,
+ std::shared_ptr<IEffect>* instanceSpp) {
+ if (!uuid || !isPreProcessingUuidSupported(*uuid)) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (instanceSpp) {
+ *instanceSpp = ndk::SharedRefBase::make<EffectPreProcessing>(*uuid);
+ LOG(DEBUG) << __func__ << " instance " << instanceSpp->get() << " created";
+ return EX_NONE;
+ } else {
+ LOG(ERROR) << __func__ << " invalid input parameter!";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+}
+
+extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
+ if (!in_impl_uuid || !isPreProcessingUuidSupported(*in_impl_uuid)) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (*in_impl_uuid == kAcousticEchoCancelerSwImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::kAcousticEchoCancelerDesc;
+ } else if (*in_impl_uuid == kAutomaticGainControlV1SwImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::kAutomaticGainControlV1Desc;
+ } else if (*in_impl_uuid == kAutomaticGainControlV2SwImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::kAutomaticGainControlV2Desc;
+ } else if (*in_impl_uuid == kNoiseSuppressionSwImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::kNoiseSuppressionDesc;
+ }
+ return EX_NONE;
+}
+
+namespace aidl::android::hardware::audio::effect {
+
+EffectPreProcessing::EffectPreProcessing(const AudioUuid& uuid) {
+ LOG(DEBUG) << __func__ << uuid.toString();
+ if (uuid == kAcousticEchoCancelerSwImplUUID) {
+ mType = PreProcessingEffectType::ACOUSTIC_ECHO_CANCELLATION;
+ mDescriptor = &kAcousticEchoCancelerDesc;
+ mEffectName = &kAcousticEchoCancelerEffectName;
+ } else if (uuid == kAutomaticGainControlV1SwImplUUID) {
+ mType = PreProcessingEffectType::AUTOMATIC_GAIN_CONTROL_V1;
+ mDescriptor = &kAutomaticGainControlV1Desc;
+ mEffectName = &kAutomaticGainControlV1EffectName;
+ } else if (uuid == kAutomaticGainControlV2SwImplUUID) {
+ mType = PreProcessingEffectType::AUTOMATIC_GAIN_CONTROL_V2;
+ mDescriptor = &kAutomaticGainControlV2Desc;
+ mEffectName = &kAutomaticGainControlV2EffectName;
+ } else if (uuid == kNoiseSuppressionSwImplUUID) {
+ mType = PreProcessingEffectType::NOISE_SUPPRESSION;
+ mDescriptor = &kNoiseSuppressionDesc;
+ mEffectName = &kNoiseSuppressionEffectName;
+ } else {
+ LOG(ERROR) << __func__ << uuid.toString() << " not supported!";
+ }
+}
+
+EffectPreProcessing::~EffectPreProcessing() {
+ cleanUp();
+ LOG(DEBUG) << __func__;
+}
+
+ndk::ScopedAStatus EffectPreProcessing::getDescriptor(Descriptor* _aidl_return) {
+ RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
+ LOG(DEBUG) << _aidl_return->toString();
+ *_aidl_return = *mDescriptor;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectPreProcessing::setParameterSpecific(const Parameter::Specific& specific) {
+ LOG(DEBUG) << __func__ << " specific " << specific.toString();
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto tag = specific.getTag();
+ switch (tag) {
+ case Parameter::Specific::acousticEchoCanceler:
+ return setParameterAcousticEchoCanceler(specific);
+ case Parameter::Specific::automaticGainControlV1:
+ return setParameterAutomaticGainControlV1(specific);
+ case Parameter::Specific::automaticGainControlV2:
+ return setParameterAutomaticGainControlV2(specific);
+ case Parameter::Specific::noiseSuppression:
+ return setParameterNoiseSuppression(specific);
+ default:
+ LOG(ERROR) << __func__ << " unsupported tag " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "specificParamNotSupported");
+ }
+}
+
+ndk::ScopedAStatus EffectPreProcessing::setParameterAcousticEchoCanceler(
+ const Parameter::Specific& specific) {
+ auto& param = specific.get<Parameter::Specific::acousticEchoCanceler>();
+ RETURN_IF(!inRange(param, kAcousticEchoCancelerRanges), EX_ILLEGAL_ARGUMENT, "outOfRange");
+ auto tag = param.getTag();
+
+ switch (tag) {
+ case AcousticEchoCanceler::echoDelayUs: {
+ RETURN_IF(mContext->setAcousticEchoCancelerEchoDelay(
+ param.get<AcousticEchoCanceler::echoDelayUs>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "echoDelayNotSupported");
+ return ndk::ScopedAStatus::ok();
+ }
+ case AcousticEchoCanceler::mobileMode: {
+ RETURN_IF(mContext->setAcousticEchoCancelerMobileMode(
+ param.get<AcousticEchoCanceler::mobileMode>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "SettingMobileModeNotSupported");
+ return ndk::ScopedAStatus::ok();
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "AcousticEchoCancelerTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus EffectPreProcessing::setParameterAutomaticGainControlV1(
+ const Parameter::Specific& specific) {
+ auto& param = specific.get<Parameter::Specific::automaticGainControlV1>();
+ RETURN_IF(!inRange(param, kAutomaticGainControlV1Ranges), EX_ILLEGAL_ARGUMENT, "outOfRange");
+ auto tag = param.getTag();
+
+ switch (tag) {
+ case AutomaticGainControlV1::targetPeakLevelDbFs: {
+ RETURN_IF(mContext->setAutomaticGainControlV1TargetPeakLevel(
+ param.get<AutomaticGainControlV1::targetPeakLevelDbFs>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "targetPeakLevelNotSupported");
+ return ndk::ScopedAStatus::ok();
+ }
+ case AutomaticGainControlV1::maxCompressionGainDb: {
+ RETURN_IF(mContext->setAutomaticGainControlV1MaxCompressionGain(
+ param.get<AutomaticGainControlV1::maxCompressionGainDb>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "maxCompressionGainNotSupported");
+ return ndk::ScopedAStatus::ok();
+ }
+ case AutomaticGainControlV1::enableLimiter: {
+ RETURN_IF(
+ mContext->setAutomaticGainControlV1EnableLimiter(
+ param.get<AutomaticGainControlV1::enableLimiter>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "enableLimiterNotSupported");
+ return ndk::ScopedAStatus::ok();
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "AutomaticGainControlV1TagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus EffectPreProcessing::setParameterAutomaticGainControlV2(
+ const Parameter::Specific& specific) {
+ auto& param = specific.get<Parameter::Specific::automaticGainControlV2>();
+ RETURN_IF(!inRange(param, kAutomaticGainControlV2Ranges), EX_ILLEGAL_ARGUMENT, "outOfRange");
+ auto tag = param.getTag();
+
+ switch (tag) {
+ case AutomaticGainControlV2::fixedDigitalGainMb: {
+ RETURN_IF(mContext->setAutomaticGainControlV2DigitalGain(
+ param.get<AutomaticGainControlV2::fixedDigitalGainMb>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "digitalGainNotSupported");
+ return ndk::ScopedAStatus::ok();
+ }
+ case AutomaticGainControlV2::levelEstimator: {
+ RETURN_IF(mContext->setAutomaticGainControlV2LevelEstimator(
+ param.get<AutomaticGainControlV2::levelEstimator>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "levelEstimatorNotSupported");
+ return ndk::ScopedAStatus::ok();
+ }
+ case AutomaticGainControlV2::saturationMarginMb: {
+ RETURN_IF(mContext->setAutomaticGainControlV2SaturationMargin(
+ param.get<AutomaticGainControlV2::saturationMarginMb>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "saturationMarginNotSupported");
+ return ndk::ScopedAStatus::ok();
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "AutomaticGainControlV2TagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus EffectPreProcessing::setParameterNoiseSuppression(
+ const Parameter::Specific& specific) {
+ auto& param = specific.get<Parameter::Specific::noiseSuppression>();
+ auto tag = param.getTag();
+
+ switch (tag) {
+ case NoiseSuppression::level: {
+ RETURN_IF(mContext->setNoiseSuppressionLevel(param.get<NoiseSuppression::level>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "levelNotSupported");
+ return ndk::ScopedAStatus::ok();
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "NoiseSuppressionTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus EffectPreProcessing::getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
+ auto tag = id.getTag();
+
+ switch (tag) {
+ case Parameter::Id::acousticEchoCancelerTag:
+ return getParameterAcousticEchoCanceler(
+ id.get<Parameter::Id::acousticEchoCancelerTag>(), specific);
+ case Parameter::Id::automaticGainControlV1Tag:
+ return getParameterAutomaticGainControlV1(
+ id.get<Parameter::Id::automaticGainControlV1Tag>(), specific);
+ case Parameter::Id::automaticGainControlV2Tag:
+ return getParameterAutomaticGainControlV2(
+ id.get<Parameter::Id::automaticGainControlV2Tag>(), specific);
+ case Parameter::Id::noiseSuppressionTag:
+ return getParameterNoiseSuppression(id.get<Parameter::Id::noiseSuppressionTag>(),
+ specific);
+ default:
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "wrongIdTag");
+ }
+}
+
+ndk::ScopedAStatus EffectPreProcessing::getParameterAcousticEchoCanceler(
+ const AcousticEchoCanceler::Id& id, Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != AcousticEchoCanceler::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "AcousticEchoCancelerTagNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ AcousticEchoCanceler param;
+ auto tag = id.get<AcousticEchoCanceler::Id::commonTag>();
+ switch (tag) {
+ case AcousticEchoCanceler::echoDelayUs: {
+ param.set<AcousticEchoCanceler::echoDelayUs>(
+ mContext->getAcousticEchoCancelerEchoDelay());
+ break;
+ }
+ case AcousticEchoCanceler::mobileMode: {
+ param.set<AcousticEchoCanceler::mobileMode>(
+ mContext->getAcousticEchoCancelerMobileMode());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "AcousticEchoCancelerTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::acousticEchoCanceler>(param);
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectPreProcessing::getParameterAutomaticGainControlV1(
+ const AutomaticGainControlV1::Id& id, Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != AutomaticGainControlV1::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "AutomaticGainControlV1TagNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ AutomaticGainControlV1 param;
+
+ auto tag = id.get<AutomaticGainControlV1::Id::commonTag>();
+ switch (tag) {
+ case AutomaticGainControlV1::targetPeakLevelDbFs: {
+ param.set<AutomaticGainControlV1::targetPeakLevelDbFs>(
+ mContext->getAutomaticGainControlV1TargetPeakLevel());
+ break;
+ }
+ case AutomaticGainControlV1::maxCompressionGainDb: {
+ param.set<AutomaticGainControlV1::maxCompressionGainDb>(
+ mContext->getAutomaticGainControlV1MaxCompressionGain());
+ break;
+ }
+ case AutomaticGainControlV1::enableLimiter: {
+ param.set<AutomaticGainControlV1::enableLimiter>(
+ mContext->getAutomaticGainControlV1EnableLimiter());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "AutomaticGainControlV1TagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::automaticGainControlV1>(param);
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectPreProcessing::getParameterAutomaticGainControlV2(
+ const AutomaticGainControlV2::Id& id, Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != AutomaticGainControlV2::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "AutomaticGainControlV2TagNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ AutomaticGainControlV2 param;
+
+ auto tag = id.get<AutomaticGainControlV2::Id::commonTag>();
+ switch (tag) {
+ case AutomaticGainControlV2::fixedDigitalGainMb: {
+ param.set<AutomaticGainControlV2::fixedDigitalGainMb>(
+ mContext->getAutomaticGainControlV2DigitalGain());
+ break;
+ }
+ case AutomaticGainControlV2::levelEstimator: {
+ param.set<AutomaticGainControlV2::levelEstimator>(
+ mContext->getAutomaticGainControlV2LevelEstimator());
+ break;
+ }
+ case AutomaticGainControlV2::saturationMarginMb: {
+ param.set<AutomaticGainControlV2::saturationMarginMb>(
+ mContext->getAutomaticGainControlV2SaturationMargin());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "AutomaticGainControlV2TagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::automaticGainControlV2>(param);
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectPreProcessing::getParameterNoiseSuppression(
+ const NoiseSuppression::Id& id, Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != NoiseSuppression::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "NoiseSuppressionTagNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ NoiseSuppression param;
+
+ auto tag = id.get<NoiseSuppression::Id::commonTag>();
+ switch (tag) {
+ case NoiseSuppression::level: {
+ param.set<NoiseSuppression::level>(mContext->getNoiseSuppressionLevel());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "NoiseSuppressionTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::noiseSuppression>(param);
+ return ndk::ScopedAStatus::ok();
+}
+
+std::shared_ptr<EffectContext> EffectPreProcessing::createContext(const Parameter::Common& common) {
+ if (mContext) {
+ LOG(DEBUG) << __func__ << " context already exist";
+ } else {
+ // PreProcessingSession is a singleton
+ mContext = PreProcessingSession::getPreProcessingSession().createSession(
+ mType, 1 /* statusFmqDepth */, common);
+ }
+
+ return mContext;
+}
+
+std::shared_ptr<EffectContext> EffectPreProcessing::getContext() {
+ return mContext;
+}
+
+RetCode EffectPreProcessing::releaseContext() {
+ if (mContext) {
+ PreProcessingSession::getPreProcessingSession().releaseSession(mType,
+ mContext->getSessionId());
+ mContext.reset();
+ }
+ return RetCode::SUCCESS;
+}
+
+ndk::ScopedAStatus EffectPreProcessing::commandImpl(CommandId command) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ switch (command) {
+ case CommandId::START:
+ mContext->enable();
+ break;
+ case CommandId::STOP:
+ mContext->disable();
+ break;
+ case CommandId::RESET:
+ mContext->disable();
+ mContext->resetBuffer();
+ break;
+ default:
+ LOG(ERROR) << __func__ << " commandId " << toString(command) << " not supported";
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commandIdNotSupported");
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+// Processing method running in EffectWorker thread.
+IEffect::Status EffectPreProcessing::effectProcessImpl(float* in, float* out, int sampleToProcess) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!mContext, status, "nullContext");
+ return mContext->lvmProcess(in, out, sampleToProcess);
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/preprocessing/aidl/EffectPreProcessing.h b/media/libeffects/preprocessing/aidl/EffectPreProcessing.h
new file mode 100644
index 0000000..fad848a
--- /dev/null
+++ b/media/libeffects/preprocessing/aidl/EffectPreProcessing.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+
+#include "PreProcessingContext.h"
+#include "PreProcessingSession.h"
+#include "effect-impl/EffectImpl.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class EffectPreProcessing final : public EffectImpl {
+ public:
+ explicit EffectPreProcessing(const AudioUuid& uuid);
+ ~EffectPreProcessing() override;
+
+ ndk::ScopedAStatus getDescriptor(Descriptor* _aidl_return) override;
+
+ ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
+ ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) override;
+
+ std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+ std::shared_ptr<EffectContext> getContext() override;
+ RetCode releaseContext() override;
+
+ IEffect::Status effectProcessImpl(float* in, float* out, int samples) override;
+
+ ndk::ScopedAStatus commandImpl(CommandId command) override;
+
+ std::string getEffectName() override { return *mEffectName; }
+
+ private:
+ std::shared_ptr<PreProcessingContext> mContext;
+ const Descriptor* mDescriptor;
+ const std::string* mEffectName;
+ PreProcessingEffectType mType;
+
+ ndk::ScopedAStatus setParameterAcousticEchoCanceler(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterAcousticEchoCanceler(const AcousticEchoCanceler::Id& id,
+ Parameter::Specific* specific);
+
+ ndk::ScopedAStatus setParameterAutomaticGainControlV1(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterAutomaticGainControlV1(const AutomaticGainControlV1::Id& id,
+ Parameter::Specific* specific);
+
+ ndk::ScopedAStatus setParameterAutomaticGainControlV2(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterAutomaticGainControlV2(const AutomaticGainControlV2::Id& id,
+ Parameter::Specific* specific);
+
+ ndk::ScopedAStatus setParameterNoiseSuppression(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterNoiseSuppression(const NoiseSuppression::Id& id,
+ Parameter::Specific* specific);
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/preprocessing/aidl/PreProcessingContext.cpp b/media/libeffects/preprocessing/aidl/PreProcessingContext.cpp
new file mode 100644
index 0000000..c1e4eda
--- /dev/null
+++ b/media/libeffects/preprocessing/aidl/PreProcessingContext.cpp
@@ -0,0 +1,311 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstddef>
+#define LOG_TAG "PreProcessingContext"
+#include <Utils.h>
+
+#include "PreProcessingContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+using aidl::android::media::audio::common::AudioDeviceDescription;
+using aidl::android::media::audio::common::AudioDeviceType;
+
+RetCode PreProcessingContext::init(const Parameter::Common& common) {
+ std::lock_guard lg(mMutex);
+ webrtc::AudioProcessingBuilder apBuilder;
+ mAudioProcessingModule = apBuilder.Create();
+ if (mAudioProcessingModule == nullptr) {
+ LOG(ERROR) << "init could not get apm engine";
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+
+ updateConfigs(common);
+
+ mEnabledMsk = 0;
+ mProcessedMsk = 0;
+ mRevEnabledMsk = 0;
+ mRevProcessedMsk = 0;
+
+ auto config = mAudioProcessingModule->GetConfig();
+ switch (mType) {
+ case PreProcessingEffectType::ACOUSTIC_ECHO_CANCELLATION:
+ config.echo_canceller.mobile_mode = true;
+ break;
+ case PreProcessingEffectType::AUTOMATIC_GAIN_CONTROL_V1:
+ config.gain_controller1.target_level_dbfs = kAgcDefaultTargetLevel;
+ config.gain_controller1.compression_gain_db = kAgcDefaultCompGain;
+ config.gain_controller1.enable_limiter = kAgcDefaultLimiter;
+ break;
+ case PreProcessingEffectType::AUTOMATIC_GAIN_CONTROL_V2:
+ config.gain_controller2.fixed_digital.gain_db = 0.f;
+ break;
+ case PreProcessingEffectType::NOISE_SUPPRESSION:
+ config.noise_suppression.level = kNsDefaultLevel;
+ break;
+ }
+ mAudioProcessingModule->ApplyConfig(config);
+ mState = PRE_PROC_STATE_INITIALIZED;
+ return RetCode::SUCCESS;
+}
+
+RetCode PreProcessingContext::deInit() {
+ std::lock_guard lg(mMutex);
+ mAudioProcessingModule = nullptr;
+ mState = PRE_PROC_STATE_UNINITIALIZED;
+ return RetCode::SUCCESS;
+}
+
+RetCode PreProcessingContext::enable() {
+ if (mState != PRE_PROC_STATE_INITIALIZED) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ int typeMsk = (1 << int(mType));
+ std::lock_guard lg(mMutex);
+ // Check if effect is already enabled.
+ if ((mEnabledMsk & typeMsk) == typeMsk) {
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+ mEnabledMsk |= typeMsk;
+ auto config = mAudioProcessingModule->GetConfig();
+ switch (mType) {
+ case PreProcessingEffectType::ACOUSTIC_ECHO_CANCELLATION:
+ config.echo_canceller.enabled = true;
+ // AEC has reverse stream
+ mRevEnabledMsk |= typeMsk;
+ mRevProcessedMsk = 0;
+ break;
+ case PreProcessingEffectType::AUTOMATIC_GAIN_CONTROL_V1:
+ config.gain_controller1.enabled = true;
+ break;
+ case PreProcessingEffectType::AUTOMATIC_GAIN_CONTROL_V2:
+ config.gain_controller2.enabled = true;
+ break;
+ case PreProcessingEffectType::NOISE_SUPPRESSION:
+ config.noise_suppression.enabled = true;
+ break;
+ }
+ mProcessedMsk = 0;
+ mAudioProcessingModule->ApplyConfig(config);
+ mState = PRE_PROC_STATE_ACTIVE;
+ return RetCode::SUCCESS;
+}
+
+RetCode PreProcessingContext::disable() {
+ if (mState != PRE_PROC_STATE_ACTIVE) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ int typeMsk = (1 << int(mType));
+ std::lock_guard lg(mMutex);
+ // Check if effect is already disabled.
+ if ((mEnabledMsk & typeMsk) != typeMsk) {
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+ mEnabledMsk &= ~typeMsk;
+ auto config = mAudioProcessingModule->GetConfig();
+ switch (mType) {
+ case PreProcessingEffectType::ACOUSTIC_ECHO_CANCELLATION:
+ config.echo_canceller.enabled = false;
+ // AEC has reverse stream
+ mRevEnabledMsk &= ~typeMsk;
+ mRevProcessedMsk = 0;
+ break;
+ case PreProcessingEffectType::AUTOMATIC_GAIN_CONTROL_V1:
+ config.gain_controller1.enabled = false;
+ break;
+ case PreProcessingEffectType::AUTOMATIC_GAIN_CONTROL_V2:
+ config.gain_controller2.enabled = false;
+ break;
+ case PreProcessingEffectType::NOISE_SUPPRESSION:
+ config.noise_suppression.enabled = false;
+ break;
+ }
+ mProcessedMsk = 0;
+ mAudioProcessingModule->ApplyConfig(config);
+ mState = PRE_PROC_STATE_INITIALIZED;
+ return RetCode::SUCCESS;
+}
+
+RetCode PreProcessingContext::setCommon(const Parameter::Common& common) {
+ mCommon = common;
+ updateConfigs(common);
+ return RetCode::SUCCESS;
+}
+
+void PreProcessingContext::updateConfigs(const Parameter::Common& common) {
+ mInputConfig.set_sample_rate_hz(common.input.base.sampleRate);
+ mInputConfig.set_num_channels(::aidl::android::hardware::audio::common::getChannelCount(
+ common.input.base.channelMask));
+ mOutputConfig.set_sample_rate_hz(common.input.base.sampleRate);
+ mOutputConfig.set_num_channels(::aidl::android::hardware::audio::common::getChannelCount(
+ common.output.base.channelMask));
+}
+
+RetCode PreProcessingContext::setAcousticEchoCancelerEchoDelay(int echoDelayUs) {
+ mEchoDelayUs = echoDelayUs;
+ std::lock_guard lg(mMutex);
+ mAudioProcessingModule->set_stream_delay_ms(mEchoDelayUs / 1000);
+ return RetCode::SUCCESS;
+}
+
+int PreProcessingContext::getAcousticEchoCancelerEchoDelay() const {
+ return mEchoDelayUs;
+}
+
+RetCode PreProcessingContext::setAcousticEchoCancelerMobileMode(bool mobileMode) {
+ mMobileMode = mobileMode;
+ std::lock_guard lg(mMutex);
+ auto config = mAudioProcessingModule->GetConfig();
+ config.echo_canceller.mobile_mode = mobileMode;
+ mAudioProcessingModule->ApplyConfig(config);
+ return RetCode::SUCCESS;
+}
+
+bool PreProcessingContext::getAcousticEchoCancelerMobileMode() const {
+ return mMobileMode;
+}
+
+RetCode PreProcessingContext::setAutomaticGainControlV1TargetPeakLevel(int targetPeakLevel) {
+ mTargetPeakLevel = targetPeakLevel;
+ std::lock_guard lg(mMutex);
+ auto config = mAudioProcessingModule->GetConfig();
+ config.gain_controller1.target_level_dbfs = -(mTargetPeakLevel / 100);
+ mAudioProcessingModule->ApplyConfig(config);
+ return RetCode::SUCCESS;
+}
+
+int PreProcessingContext::getAutomaticGainControlV1TargetPeakLevel() const {
+ return mTargetPeakLevel;
+}
+
+RetCode PreProcessingContext::setAutomaticGainControlV1MaxCompressionGain(int maxCompressionGain) {
+ mMaxCompressionGain = maxCompressionGain;
+ std::lock_guard lg(mMutex);
+ auto config = mAudioProcessingModule->GetConfig();
+ config.gain_controller1.compression_gain_db = mMaxCompressionGain / 100;
+ mAudioProcessingModule->ApplyConfig(config);
+ return RetCode::SUCCESS;
+}
+
+int PreProcessingContext::getAutomaticGainControlV1MaxCompressionGain() const {
+ return mMaxCompressionGain;
+}
+
+RetCode PreProcessingContext::setAutomaticGainControlV1EnableLimiter(bool enableLimiter) {
+ mEnableLimiter = enableLimiter;
+ std::lock_guard lg(mMutex);
+ auto config = mAudioProcessingModule->GetConfig();
+ config.gain_controller1.enable_limiter = mEnableLimiter;
+ mAudioProcessingModule->ApplyConfig(config);
+ return RetCode::SUCCESS;
+}
+
+bool PreProcessingContext::getAutomaticGainControlV1EnableLimiter() const {
+ return mEnableLimiter;
+}
+
+RetCode PreProcessingContext::setAutomaticGainControlV2DigitalGain(int gain) {
+ mDigitalGain = gain;
+ std::lock_guard lg(mMutex);
+ auto config = mAudioProcessingModule->GetConfig();
+ config.gain_controller2.fixed_digital.gain_db = mDigitalGain;
+ mAudioProcessingModule->ApplyConfig(config);
+ return RetCode::SUCCESS;
+}
+
+int PreProcessingContext::getAutomaticGainControlV2DigitalGain() const {
+ return mDigitalGain;
+}
+
+RetCode PreProcessingContext::setAutomaticGainControlV2LevelEstimator(
+ AutomaticGainControlV2::LevelEstimator levelEstimator) {
+ mLevelEstimator = levelEstimator;
+ return RetCode::SUCCESS;
+}
+
+AutomaticGainControlV2::LevelEstimator
+PreProcessingContext::getAutomaticGainControlV2LevelEstimator() const {
+ return mLevelEstimator;
+}
+
+RetCode PreProcessingContext::setAutomaticGainControlV2SaturationMargin(int saturationMargin) {
+ mSaturationMargin = saturationMargin;
+ return RetCode::SUCCESS;
+}
+
+int PreProcessingContext::getAutomaticGainControlV2SaturationMargin() const {
+ return mSaturationMargin;
+}
+
+RetCode PreProcessingContext::setNoiseSuppressionLevel(NoiseSuppression::Level level) {
+ mLevel = level;
+ std::lock_guard lg(mMutex);
+ auto config = mAudioProcessingModule->GetConfig();
+ config.noise_suppression.level =
+ (webrtc::AudioProcessing::Config::NoiseSuppression::Level)level;
+ mAudioProcessingModule->ApplyConfig(config);
+ return RetCode::SUCCESS;
+}
+
+NoiseSuppression::Level PreProcessingContext::getNoiseSuppressionLevel() const {
+ return mLevel;
+}
+
+IEffect::Status PreProcessingContext::lvmProcess(float* in, float* out, int samples) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!in, status, "nullInput");
+ RETURN_VALUE_IF(!out, status, "nullOutput");
+ status = {EX_ILLEGAL_STATE, 0, 0};
+ int64_t inputFrameCount = getCommon().input.frameCount;
+ int64_t outputFrameCount = getCommon().output.frameCount;
+ RETURN_VALUE_IF(inputFrameCount != outputFrameCount, status, "FrameCountMismatch");
+ RETURN_VALUE_IF(0 == getInputFrameSize(), status, "zeroFrameSize");
+
+ LOG(DEBUG) << __func__ << " start processing";
+ std::lock_guard lg(mMutex);
+
+ mProcessedMsk |= (1 << int(mType));
+
+ // webrtc implementation clear out was_stream_delay_set every time after ProcessStream() call
+ mAudioProcessingModule->set_stream_delay_ms(mEchoDelayUs / 1000);
+
+ if ((mProcessedMsk & mEnabledMsk) == mEnabledMsk) {
+ mProcessedMsk = 0;
+ int processStatus = mAudioProcessingModule->ProcessStream(
+ (const int16_t* const)in, mInputConfig, mOutputConfig, (int16_t* const)out);
+ if (processStatus != 0) {
+ LOG(ERROR) << "Process stream failed with error " << processStatus;
+ return status;
+ }
+ }
+
+ mRevProcessedMsk |= (1 << int(mType));
+
+ if ((mRevProcessedMsk & mRevEnabledMsk) == mRevEnabledMsk) {
+ mRevProcessedMsk = 0;
+ int revProcessStatus = mAudioProcessingModule->ProcessReverseStream(
+ (const int16_t* const)in, mInputConfig, mInputConfig, (int16_t* const)out);
+ if (revProcessStatus != 0) {
+ LOG(ERROR) << "Process reverse stream failed with error " << revProcessStatus;
+ return status;
+ }
+ }
+
+ return {STATUS_OK, samples, samples};
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/preprocessing/aidl/PreProcessingContext.h b/media/libeffects/preprocessing/aidl/PreProcessingContext.h
new file mode 100644
index 0000000..9ba1bbe
--- /dev/null
+++ b/media/libeffects/preprocessing/aidl/PreProcessingContext.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+#include <audio_processing.h>
+#include <unordered_map>
+
+#include "PreProcessingTypes.h"
+#include "effect-impl/EffectContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+enum PreProcEffectState {
+ PRE_PROC_STATE_UNINITIALIZED,
+ PRE_PROC_STATE_INITIALIZED,
+ PRE_PROC_STATE_ACTIVE,
+};
+
+class PreProcessingContext final : public EffectContext {
+ public:
+ PreProcessingContext(int statusDepth, const Parameter::Common& common,
+ const PreProcessingEffectType& type)
+ : EffectContext(statusDepth, common), mType(type) {
+ LOG(DEBUG) << __func__ << type;
+ mState = PRE_PROC_STATE_UNINITIALIZED;
+ }
+ ~PreProcessingContext() override { LOG(DEBUG) << __func__; }
+
+ RetCode init(const Parameter::Common& common);
+ RetCode deInit();
+
+ PreProcessingEffectType getPreProcessingType() const { return mType; }
+
+ RetCode enable();
+ RetCode disable();
+
+ RetCode setCommon(const Parameter::Common& common) override;
+ void updateConfigs(const Parameter::Common& common);
+
+ RetCode setAcousticEchoCancelerEchoDelay(int echoDelayUs);
+ int getAcousticEchoCancelerEchoDelay() const;
+ RetCode setAcousticEchoCancelerMobileMode(bool mobileMode);
+ bool getAcousticEchoCancelerMobileMode() const;
+
+ RetCode setAutomaticGainControlV1TargetPeakLevel(int targetPeakLevel);
+ int getAutomaticGainControlV1TargetPeakLevel() const;
+ RetCode setAutomaticGainControlV1MaxCompressionGain(int maxCompressionGain);
+ int getAutomaticGainControlV1MaxCompressionGain() const;
+ RetCode setAutomaticGainControlV1EnableLimiter(bool enableLimiter);
+ bool getAutomaticGainControlV1EnableLimiter() const;
+
+ RetCode setAutomaticGainControlV2DigitalGain(int gain);
+ int getAutomaticGainControlV2DigitalGain() const;
+ RetCode setAutomaticGainControlV2LevelEstimator(
+ AutomaticGainControlV2::LevelEstimator levelEstimator);
+ AutomaticGainControlV2::LevelEstimator getAutomaticGainControlV2LevelEstimator() const;
+ RetCode setAutomaticGainControlV2SaturationMargin(int saturationMargin);
+ int getAutomaticGainControlV2SaturationMargin() const;
+
+ RetCode setNoiseSuppressionLevel(NoiseSuppression::Level level);
+ NoiseSuppression::Level getNoiseSuppressionLevel() const;
+
+ IEffect::Status lvmProcess(float* in, float* out, int samples);
+
+ private:
+ static constexpr inline int kAgcDefaultTargetLevel = 3;
+ static constexpr inline int kAgcDefaultCompGain = 9;
+ static constexpr inline bool kAgcDefaultLimiter = true;
+ static constexpr inline webrtc::AudioProcessing::Config::NoiseSuppression::Level
+ kNsDefaultLevel = webrtc::AudioProcessing::Config::NoiseSuppression::kModerate;
+
+ std::mutex mMutex;
+ const PreProcessingEffectType mType;
+ PreProcEffectState mState; // current state
+
+ // handle on webRTC audio processing module (APM)
+ rtc::scoped_refptr<webrtc::AudioProcessing> mAudioProcessingModule GUARDED_BY(mMutex);
+
+ int mEnabledMsk GUARDED_BY(mMutex); // bit field containing IDs of enabled pre processors
+ int mProcessedMsk GUARDED_BY(mMutex); // bit field containing IDs of pre processors already
+ // processed in current round
+ int mRevEnabledMsk GUARDED_BY(mMutex); // bit field containing IDs of enabled pre processors
+ // with reverse channel
+ int mRevProcessedMsk GUARDED_BY(mMutex); // bit field containing IDs of pre processors with
+ // reverse channel already processed in current round
+
+ webrtc::StreamConfig mInputConfig; // input stream configuration
+ webrtc::StreamConfig mOutputConfig; // output stream configuration
+
+ // Acoustic Echo Canceler
+ int mEchoDelayUs = 0;
+ bool mMobileMode = false;
+
+ // Automatic Gain Control V1
+ int mTargetPeakLevel = 0;
+ int mMaxCompressionGain = 0;
+ bool mEnableLimiter = false;
+
+ // Automatic Gain Control V2
+ int mDigitalGain = 0;
+ AutomaticGainControlV2::LevelEstimator mLevelEstimator =
+ AutomaticGainControlV2::LevelEstimator::RMS;
+ int mSaturationMargin = 2;
+
+ // NoiseSuppression
+ NoiseSuppression::Level mLevel = NoiseSuppression::Level::LOW;
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/preprocessing/aidl/PreProcessingSession.h b/media/libeffects/preprocessing/aidl/PreProcessingSession.h
new file mode 100644
index 0000000..877292f
--- /dev/null
+++ b/media/libeffects/preprocessing/aidl/PreProcessingSession.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <algorithm>
+#include <memory>
+#include <unordered_map>
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+
+#include "PreProcessingContext.h"
+#include "PreProcessingTypes.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+/**
+ * @brief Maintain all effect pre-processing sessions.
+ *
+ * Sessions are identified with the session ID, maximum of MAX_BUNDLE_SESSIONS is supported by the
+ * pre-processing implementation.
+ */
+class PreProcessingSession {
+ public:
+ static PreProcessingSession& getPreProcessingSession() {
+ static PreProcessingSession instance;
+ return instance;
+ }
+
+ static bool findPreProcessingTypeInList(
+ std::vector<std::shared_ptr<PreProcessingContext>>& list,
+ const PreProcessingEffectType& type, bool remove = false) {
+ auto itor = std::find_if(list.begin(), list.end(),
+ [type](const std::shared_ptr<PreProcessingContext>& bundle) {
+ return bundle->getPreProcessingType() == type;
+ });
+ if (itor == list.end()) {
+ return false;
+ }
+ if (remove) {
+ (*itor)->deInit();
+ list.erase(itor);
+ }
+ return true;
+ }
+
+ /**
+ * Create a certain type of PreProcessingContext in shared_ptr container, each session must not
+ * have more than one session for each type.
+ */
+ std::shared_ptr<PreProcessingContext> createSession(const PreProcessingEffectType& type,
+ int statusDepth,
+ const Parameter::Common& common) {
+ int sessionId = common.session;
+ LOG(DEBUG) << __func__ << type << " with sessionId " << sessionId;
+ std::lock_guard lg(mMutex);
+ if (mSessionMap.count(sessionId) == 0 && mSessionMap.size() >= MAX_PRE_PROC_SESSIONS) {
+ LOG(ERROR) << __func__ << " exceed max bundle session";
+ return nullptr;
+ }
+
+ if (mSessionMap.count(sessionId)) {
+ if (findPreProcessingTypeInList(mSessionMap[sessionId], type)) {
+ LOG(ERROR) << __func__ << type << " already exist in session " << sessionId;
+ return nullptr;
+ }
+ }
+
+ auto& list = mSessionMap[sessionId];
+ auto context = std::make_shared<PreProcessingContext>(statusDepth, common, type);
+ RETURN_VALUE_IF(!context, nullptr, "failedToCreateContext");
+
+ RetCode ret = context->init(common);
+ if (RetCode::SUCCESS != ret) {
+ LOG(ERROR) << __func__ << " context init ret " << ret;
+ return nullptr;
+ }
+ list.push_back(context);
+ return context;
+ }
+
+ void releaseSession(const PreProcessingEffectType& type, int sessionId) {
+ LOG(DEBUG) << __func__ << type << " sessionId " << sessionId;
+ std::lock_guard lg(mMutex);
+ if (mSessionMap.count(sessionId)) {
+ auto& list = mSessionMap[sessionId];
+ if (!findPreProcessingTypeInList(list, type, true /* remove */)) {
+ LOG(ERROR) << __func__ << " can't find " << type << "in session " << sessionId;
+ return;
+ }
+ if (list.empty()) {
+ mSessionMap.erase(sessionId);
+ }
+ }
+ }
+
+ private:
+ // Lock for mSessionMap access.
+ std::mutex mMutex;
+ // Max session number supported.
+ static constexpr int MAX_PRE_PROC_SESSIONS = 8;
+ std::unordered_map<int /* session ID */, std::vector<std::shared_ptr<PreProcessingContext>>>
+ mSessionMap GUARDED_BY(mMutex);
+};
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/preprocessing/aidl/PreProcessingTypes.h b/media/libeffects/preprocessing/aidl/PreProcessingTypes.h
new file mode 100644
index 0000000..2c880d4
--- /dev/null
+++ b/media/libeffects/preprocessing/aidl/PreProcessingTypes.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+
+#include <audio_effects/effect_aec.h>
+#include <audio_effects/effect_agc.h>
+#include <audio_effects/effect_agc2.h>
+#include <audio_effects/effect_ns.h>
+
+#include "effect-impl/EffectTypes.h"
+#include "effect-impl/EffectUUID.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+// Acoustic Echo Cancellation
+static const std::string kAcousticEchoCancelerEffectName = "Acoustic Echo Canceler";
+static const std::vector<Range::AcousticEchoCancelerRange> kAcousticEchoCancelerRanges = {
+ MAKE_RANGE(AcousticEchoCanceler, AcousticEchoCanceler::echoDelayUs, 0, 500)};
+static const Capability kAcousticEchoCancelerCap = {.range = kAcousticEchoCancelerRanges};
+static const Descriptor kAcousticEchoCancelerDesc = {
+ .common = {.id = {.type = kAcousticEchoCancelerTypeUUID,
+ .uuid = kAcousticEchoCancelerSwImplUUID,
+ .proxy = kEffectNullUuid},
+ .flags = {.type = Flags::Type::PRE_PROC, .deviceIndication = true},
+ .name = kAcousticEchoCancelerEffectName,
+ .implementor = "The Android Open Source Project"},
+ .capability = kAcousticEchoCancelerCap};
+
+// Automatic Gain Control 1
+static const std::string kAutomaticGainControlV1EffectName = "Automatic Gain Control V1";
+static const std::vector<Range::AutomaticGainControlV1Range> kAutomaticGainControlV1Ranges = {
+ MAKE_RANGE(AutomaticGainControlV1, AutomaticGainControlV1::targetPeakLevelDbFs, -3100, 0),
+ MAKE_RANGE(AutomaticGainControlV1, AutomaticGainControlV1::maxCompressionGainDb, 0, 9000)};
+static const Capability kAutomaticGainControlV1Cap = {.range = kAutomaticGainControlV1Ranges};
+static const Descriptor kAutomaticGainControlV1Desc = {
+ .common = {.id = {.type = kAutomaticGainControlV1TypeUUID,
+ .uuid = kAutomaticGainControlV1SwImplUUID,
+ .proxy = kEffectNullUuid},
+ .flags = {.type = Flags::Type::PRE_PROC, .deviceIndication = true},
+ .name = kAutomaticGainControlV1EffectName,
+ .implementor = "The Android Open Source Project"},
+ .capability = kAutomaticGainControlV1Cap};
+
+// Automatic Gain Control 2
+static const std::string kAutomaticGainControlV2EffectName = "Automatic Gain Control V2";
+const std::vector<Range::AutomaticGainControlV2Range> kAutomaticGainControlV2Ranges = {
+ MAKE_RANGE(AutomaticGainControlV2, AutomaticGainControlV2::fixedDigitalGainMb, 0, 90),
+ // extra_staturation_margin_db is no longer configurable in webrtc
+ MAKE_RANGE(AutomaticGainControlV2, AutomaticGainControlV2::saturationMarginMb, 2, 2),
+ // WebRTC only supports RMS level estimator now
+ MAKE_RANGE(AutomaticGainControlV2, AutomaticGainControlV2::levelEstimator,
+ AutomaticGainControlV2::LevelEstimator::RMS,
+ AutomaticGainControlV2::LevelEstimator::RMS)};
+static const Capability kAutomaticGainControlV2Cap = {.range = kAutomaticGainControlV2Ranges};
+static const Descriptor kAutomaticGainControlV2Desc = {
+ .common = {.id = {.type = kAutomaticGainControlV2TypeUUID,
+ .uuid = kAutomaticGainControlV2SwImplUUID,
+ .proxy = kEffectNullUuid},
+ .flags = {.type = Flags::Type::PRE_PROC, .deviceIndication = true},
+ .name = kAutomaticGainControlV2EffectName,
+ .implementor = "The Android Open Source Project"},
+ .capability = kAutomaticGainControlV2Cap};
+
+// Noise suppression
+static const std::string kNoiseSuppressionEffectName = "Noise Suppression";
+static const Descriptor kNoiseSuppressionDesc = {
+ .common = {.id = {.type = kNoiseSuppressionTypeUUID,
+ .uuid = kNoiseSuppressionSwImplUUID,
+ .proxy = kEffectNullUuid},
+ .flags = {.type = Flags::Type::PRE_PROC, .deviceIndication = true},
+ .name = kNoiseSuppressionEffectName,
+ .implementor = "The Android Open Source Project"}};
+
+enum class PreProcessingEffectType {
+ ACOUSTIC_ECHO_CANCELLATION,
+ AUTOMATIC_GAIN_CONTROL_V1,
+ AUTOMATIC_GAIN_CONTROL_V2,
+ NOISE_SUPPRESSION,
+};
+
+inline std::ostream& operator<<(std::ostream& out, const PreProcessingEffectType& type) {
+ switch (type) {
+ case PreProcessingEffectType::ACOUSTIC_ECHO_CANCELLATION:
+ return out << kAcousticEchoCancelerEffectName;
+ case PreProcessingEffectType::AUTOMATIC_GAIN_CONTROL_V1:
+ return out << kAutomaticGainControlV1EffectName;
+ case PreProcessingEffectType::AUTOMATIC_GAIN_CONTROL_V2:
+ return out << kAutomaticGainControlV2EffectName;
+ case PreProcessingEffectType::NOISE_SUPPRESSION:
+ return out << kNoiseSuppressionEffectName;
+ }
+ return out << "EnumPreProcessingEffectTypeError";
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/visualizer/aidl/VisualizerContext.cpp b/media/libeffects/visualizer/aidl/VisualizerContext.cpp
index 4405407..5d0d08d 100644
--- a/media/libeffects/visualizer/aidl/VisualizerContext.cpp
+++ b/media/libeffects/visualizer/aidl/VisualizerContext.cpp
@@ -17,18 +17,19 @@
#include "VisualizerContext.h"
#include <algorithm>
+#include <math.h>
+#include <time.h>
+
#include <android/binder_status.h>
#include <audio_utils/primitives.h>
-#include <math.h>
#include <system/audio.h>
-#include <time.h>
#include <Utils.h>
#ifndef BUILD_FLOAT
#error AIDL Visualizer only support float 32bits, make sure add cflags -DBUILD_FLOAT,
#endif
-using android::hardware::audio::common::getChannelCount;
+using aidl::android::hardware::audio::common::getChannelCount;
namespace aidl::android::hardware::audio::effect {
@@ -191,9 +192,15 @@
std::vector<uint8_t> VisualizerContext::capture() {
std::vector<uint8_t> result;
std::lock_guard lg(mMutex);
- RETURN_VALUE_IF(mState != State::ACTIVE, result, "illegalState");
- const uint32_t deltaMs = getDeltaTimeMsFromUpdatedTime_l();
+ // cts android.media.audio.cts.VisualizerTest expecting silence data when effect not running
+ // RETURN_VALUE_IF(mState != State::ACTIVE, result, "illegalState");
+ if (mState != State::ACTIVE) {
+ result.resize(mCaptureSamples);
+ memset(result.data(), 0x80, mCaptureSamples);
+ return result;
+ }
+ const uint32_t deltaMs = getDeltaTimeMsFromUpdatedTime_l();
// if audio framework has stopped playing audio although the effect is still active we must
// clear the capture buffer to return silence
if ((mLastCaptureIdx == mCaptureIdx) && (mBufferUpdateTime.tv_sec != 0) &&
diff --git a/media/libeffects/visualizer/aidl/VisualizerContext.h b/media/libeffects/visualizer/aidl/VisualizerContext.h
index 3cb711e..958035f 100644
--- a/media/libeffects/visualizer/aidl/VisualizerContext.h
+++ b/media/libeffects/visualizer/aidl/VisualizerContext.h
@@ -83,7 +83,7 @@
uint32_t mLastCaptureIdx GUARDED_BY(mMutex) = 0;
Visualizer::ScalingMode mScalingMode GUARDED_BY(mMutex) = Visualizer::ScalingMode::NORMALIZED;
struct timespec mBufferUpdateTime GUARDED_BY(mMutex);
- // capture buf with 8 bits PCM
+ // capture buf with 8 bits mono PCM samples
std::array<uint8_t, kMaxCaptureBufSize> mCaptureBuf GUARDED_BY(mMutex);
uint32_t mDownstreamLatency GUARDED_BY(mMutex) = 0;
uint32_t mCaptureSamples GUARDED_BY(mMutex) = kMaxCaptureBufSize;
diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp
index f64aedf..9955862 100644
--- a/media/libheadtracking/Android.bp
+++ b/media/libheadtracking/Android.bp
@@ -16,11 +16,13 @@
"Pose.cpp",
"PoseBias.cpp",
"PoseDriftCompensator.cpp",
+ "PosePredictor.cpp",
"PoseRateLimiter.cpp",
"QuaternionUtil.cpp",
"ScreenHeadFusion.cpp",
"StillnessDetector.cpp",
"Twist.cpp",
+ "VectorRecorder.cpp",
],
shared_libs: [
"libaudioutils",
@@ -35,6 +37,15 @@
export_header_lib_headers: [
"libeigen",
],
+ cflags: [
+ "-Wthread-safety",
+ ],
+ product_variables: {
+ debuggable: {
+ // enable experiments only in userdebug and eng builds
+ cflags: ["-DENABLE_VERIFICATION"],
+ },
+ },
}
cc_library {
@@ -76,6 +87,7 @@
"Pose-test.cpp",
"PoseBias-test.cpp",
"PoseDriftCompensator-test.cpp",
+ "PosePredictor.cpp",
"PoseRateLimiter-test.cpp",
"QuaternionUtil-test.cpp",
"ScreenHeadFusion-test.cpp",
@@ -84,6 +96,7 @@
],
shared_libs: [
"libaudioutils",
+ "libbase", // StringAppendF
"libheadtracking",
],
}
diff --git a/media/libheadtracking/HeadTrackingProcessor-test.cpp b/media/libheadtracking/HeadTrackingProcessor-test.cpp
index 299192f..5190f52 100644
--- a/media/libheadtracking/HeadTrackingProcessor-test.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor-test.cpp
@@ -15,10 +15,10 @@
*/
#include "media/HeadTrackingProcessor.h"
+#include "media/QuaternionUtil.h"
#include <gtest/gtest.h>
-#include "QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
@@ -82,6 +82,8 @@
std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcessor(
Options{.predictionDuration = 2.f}, HeadTrackingMode::WORLD_RELATIVE);
+ processor->setPosePredictorType(PosePredictorType::TWIST);
+
// Establish a baseline for the drift compensators.
processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
processor->setWorldToScreenPose(0, Pose3f());
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
index 101b825..8502af0 100644
--- a/media/libheadtracking/HeadTrackingProcessor.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -18,10 +18,11 @@
#include <android-base/stringprintf.h>
#include <audio_utils/SimpleLog.h>
#include "media/HeadTrackingProcessor.h"
+#include "media/QuaternionUtil.h"
#include "ModeSelector.h"
#include "PoseBias.h"
-#include "QuaternionUtil.h"
+#include "PosePredictor.h"
#include "ScreenHeadFusion.h"
#include "StillnessDetector.h"
@@ -59,8 +60,8 @@
void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead,
const Twist3f& headTwist) override {
- Pose3f predictedWorldToHead =
- worldToHead * integrate(headTwist, mOptions.predictionDuration);
+ const Pose3f predictedWorldToHead = mPosePredictor.predict(
+ timestamp, worldToHead, headTwist, mOptions.predictionDuration);
mHeadPoseBias.setInput(predictedWorldToHead);
mHeadStillnessDetector.setInput(timestamp, predictedWorldToHead);
mWorldToHeadTimestamp = timestamp;
@@ -161,6 +162,10 @@
}
}
+ void setPosePredictorType(PosePredictorType type) override {
+ mPosePredictor.setPosePredictorType(type);
+ }
+
std::string toString_l(unsigned level) const override {
std::string prefixSpace(level, ' ');
std::string ss = prefixSpace + "HeadTrackingProcessor:\n";
@@ -186,6 +191,7 @@
prefixSpace.c_str(), mOptions.screenStillnessRotationalThreshold);
ss += mModeSelector.toString(level + 1);
ss += mRateLimiter.toString(level + 1);
+ ss += mPosePredictor.toString(level + 1);
ss.append(prefixSpace + "ReCenterHistory:\n");
ss += mLocalLog.dumpToString((prefixSpace + " ").c_str(), mMaxLocalLogLine);
return ss;
@@ -207,6 +213,7 @@
ScreenHeadFusion mScreenHeadFusion;
ModeSelector mModeSelector;
PoseRateLimiter mRateLimiter;
+ PosePredictor mPosePredictor;
static constexpr std::size_t mMaxLocalLogLine = 10;
SimpleLog mLocalLog{mMaxLocalLogLine};
};
@@ -230,5 +237,26 @@
return "EnumNotImplemented";
};
+std::string toString(PosePredictorType posePredictorType) {
+ switch (posePredictorType) {
+ case PosePredictorType::AUTO: return "AUTO";
+ case PosePredictorType::LAST: return "LAST";
+ case PosePredictorType::TWIST: return "TWIST";
+ case PosePredictorType::LEAST_SQUARES: return "LEAST_SQUARES";
+ }
+ return "UNKNOWN" + std::to_string((int)posePredictorType);
+}
+
+bool isValidPosePredictorType(PosePredictorType posePredictorType) {
+ switch (posePredictorType) {
+ case PosePredictorType::AUTO:
+ case PosePredictorType::LAST:
+ case PosePredictorType::TWIST:
+ case PosePredictorType::LEAST_SQUARES:
+ return true;
+ }
+ return false;
+}
+
} // namespace media
} // namespace android
diff --git a/media/libheadtracking/ModeSelector-test.cpp b/media/libheadtracking/ModeSelector-test.cpp
index a136e6b..6925908 100644
--- a/media/libheadtracking/ModeSelector-test.cpp
+++ b/media/libheadtracking/ModeSelector-test.cpp
@@ -18,7 +18,7 @@
#include <gtest/gtest.h>
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
diff --git a/media/libheadtracking/ModeSelector.cpp b/media/libheadtracking/ModeSelector.cpp
index 6277090..7ee21b3 100644
--- a/media/libheadtracking/ModeSelector.cpp
+++ b/media/libheadtracking/ModeSelector.cpp
@@ -117,10 +117,12 @@
std::string ModeSelector::toString(unsigned level) const {
std::string prefixSpace(level, ' ');
std::string ss(prefixSpace);
- StringAppendF(&ss, "ModeSelector: ScreenToStage %s\n",
- mScreenToStage.toString().c_str());
- ss.append(prefixSpace + "Mode downgrade history:\n");
- ss += mLocalLog.dumpToString((prefixSpace + " ").c_str(), sMaxLocalLogLine);
+ ss.append("ModeSelector: ScreenToStage ")
+ .append(mScreenToStage.toString())
+ .append("\n")
+ .append(prefixSpace)
+ .append("Mode change history:\n")
+ .append(mLocalLog.dumpToString((prefixSpace + " ").c_str(), sMaxLocalLogLine));
return ss;
}
diff --git a/media/libheadtracking/Pose-test.cpp b/media/libheadtracking/Pose-test.cpp
index a9e18ce..29dba29 100644
--- a/media/libheadtracking/Pose-test.cpp
+++ b/media/libheadtracking/Pose-test.cpp
@@ -18,7 +18,7 @@
#include <gtest/gtest.h>
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
using android::media::Pose3f;
diff --git a/media/libheadtracking/Pose.cpp b/media/libheadtracking/Pose.cpp
index 4a4b56a..e03725b 100644
--- a/media/libheadtracking/Pose.cpp
+++ b/media/libheadtracking/Pose.cpp
@@ -16,8 +16,8 @@
#include <android-base/stringprintf.h>
#include "media/Pose.h"
+#include "media/QuaternionUtil.h"
#include "media/Twist.h"
-#include "QuaternionUtil.h"
namespace android {
namespace media {
diff --git a/media/libheadtracking/PoseBias-test.cpp b/media/libheadtracking/PoseBias-test.cpp
index 9f42a2c..659dda0 100644
--- a/media/libheadtracking/PoseBias-test.cpp
+++ b/media/libheadtracking/PoseBias-test.cpp
@@ -17,7 +17,8 @@
#include <gtest/gtest.h>
#include "PoseBias.h"
-#include "QuaternionUtil.h"
+
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
diff --git a/media/libheadtracking/PoseDriftCompensator-test.cpp b/media/libheadtracking/PoseDriftCompensator-test.cpp
index df0a05f..521e3eb 100644
--- a/media/libheadtracking/PoseDriftCompensator-test.cpp
+++ b/media/libheadtracking/PoseDriftCompensator-test.cpp
@@ -18,7 +18,8 @@
#include <cmath>
#include "PoseDriftCompensator.h"
-#include "QuaternionUtil.h"
+
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
diff --git a/media/libheadtracking/PoseDriftCompensator.cpp b/media/libheadtracking/PoseDriftCompensator.cpp
index 0e90cad..2775790 100644
--- a/media/libheadtracking/PoseDriftCompensator.cpp
+++ b/media/libheadtracking/PoseDriftCompensator.cpp
@@ -18,7 +18,7 @@
#include <cmath>
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
namespace android {
namespace media {
diff --git a/media/libheadtracking/PosePredictor.cpp b/media/libheadtracking/PosePredictor.cpp
new file mode 100644
index 0000000..f67a966
--- /dev/null
+++ b/media/libheadtracking/PosePredictor.cpp
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PosePredictor.h"
+
+namespace android::media {
+
+namespace {
+#ifdef ENABLE_VERIFICATION
+constexpr bool kEnableVerification = true;
+constexpr std::array<int, 3> kLookAheadMs{ 50, 100, 200 };
+#else
+constexpr bool kEnableVerification = false;
+constexpr std::array<int, 0> kLookAheadMs{};
+#endif
+
+} // namespace
+
+void LeastSquaresPredictor::add(int64_t atNs, const Pose3f& pose, const Twist3f& twist)
+{
+ (void)twist;
+ mLastAtNs = atNs;
+ mLastPose = pose;
+ const auto q = pose.rotation();
+ const double datNs = static_cast<double>(atNs);
+ mRw.add({datNs, q.w()});
+ mRx.add({datNs, q.x()});
+ mRy.add({datNs, q.y()});
+ mRz.add({datNs, q.z()});
+}
+
+Pose3f LeastSquaresPredictor::predict(int64_t atNs) const
+{
+ if (mRw.getN() < kMinimumSamplesForPrediction) return mLastPose;
+
+ /*
+ * Using parametric form, we have q(t) = { w(t), x(t), y(t), z(t) }.
+ * We compute the least squares prediction of w, x, y, z.
+ */
+ const double dLookahead = static_cast<double>(atNs);
+ Eigen::Quaternionf lsq(
+ mRw.getYFromX(dLookahead),
+ mRx.getYFromX(dLookahead),
+ mRy.getYFromX(dLookahead),
+ mRz.getYFromX(dLookahead));
+
+ /*
+ * We cheat here, since the result lsq is the least squares prediction
+ * in H (arbitrary quaternion), not the least squares prediction in
+ * SO(3) (unit quaternion).
+ *
+ * In other words, the result for lsq is most likely not a unit quaternion.
+ * To solve this, we normalize, thereby selecting the closest unit quaternion
+ * in SO(3) to the prediction in H.
+ */
+ lsq.normalize();
+ return Pose3f(lsq);
+}
+
+void LeastSquaresPredictor::reset() {
+ mLastAtNs = {};
+ mLastPose = {};
+ mRw.reset();
+ mRx.reset();
+ mRy.reset();
+ mRz.reset();
+}
+
+std::string LeastSquaresPredictor::toString(size_t index) const {
+ std::string s(index, ' ');
+ s.append("LeastSquaresPredictor using alpha: ")
+ .append(std::to_string(mAlpha))
+ .append(" last pose: ")
+ .append(mLastPose.toString())
+ .append("\n");
+ return s;
+}
+
+// Formatting
+static inline std::vector<size_t> createDelimiterIdx(size_t predictors, size_t lookaheads) {
+ if (predictors == 0) return {};
+ --predictors;
+ std::vector<size_t> delimiterIdx(predictors);
+ for (size_t i = 0; i < predictors; ++i) {
+ delimiterIdx[i] = (i + 1) * lookaheads;
+ }
+ return delimiterIdx;
+}
+
+PosePredictor::PosePredictor()
+ : mPredictors{ // must match switch in getCurrentPredictor()
+ std::make_shared<LastPredictor>(),
+ std::make_shared<TwistPredictor>(),
+ std::make_shared<LeastSquaresPredictor>(),
+ }
+ , mLookaheadMs(kLookAheadMs.begin(), kLookAheadMs.end())
+ , mVerifiers(std::size(mLookaheadMs) * std::size(mPredictors))
+ , mDelimiterIdx(createDelimiterIdx(std::size(mPredictors), std::size(mLookaheadMs)))
+ , mPredictionRecorder(
+ std::size(mVerifiers) /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */,
+ mDelimiterIdx)
+ , mPredictionDurableRecorder(
+ std::size(mVerifiers) /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */,
+ mDelimiterIdx)
+ {
+}
+
+Pose3f PosePredictor::predict(
+ int64_t timestampNs, const Pose3f& pose, const Twist3f& twist, float predictionDurationNs)
+{
+ if (timestampNs - mLastTimestampNs > kMaximumSampleIntervalBeforeResetNs) {
+ for (const auto& predictor : mPredictors) {
+ predictor->reset();
+ }
+ ++mResets;
+ }
+ mLastTimestampNs = timestampNs;
+
+ auto selectedPredictor = getCurrentPredictor();
+ if constexpr (kEnableVerification) {
+ // Update all Predictors
+ for (const auto& predictor : mPredictors) {
+ predictor->add(timestampNs, pose, twist);
+ }
+
+ // Update Verifiers and calculate errors
+ std::vector<float> error(std::size(mVerifiers));
+ for (size_t i = 0; i < mLookaheadMs.size(); ++i) {
+ constexpr float RADIAN_TO_DEGREES = 180 / M_PI;
+ const int64_t atNs =
+ timestampNs + mLookaheadMs[i] * PosePredictorVerifier::kMillisToNanos;
+
+ for (size_t j = 0; j < mPredictors.size(); ++j) {
+ const size_t idx = i * std::size(mPredictors) + j;
+ mVerifiers[idx].verifyActualPose(timestampNs, pose);
+ mVerifiers[idx].addPredictedPose(atNs, mPredictors[j]->predict(atNs));
+ error[idx] = RADIAN_TO_DEGREES * mVerifiers[idx].lastError();
+ }
+ }
+ // Record errors
+ mPredictionRecorder.record(error);
+ mPredictionDurableRecorder.record(error);
+ } else /* constexpr */ {
+ selectedPredictor->add(timestampNs, pose, twist);
+ }
+
+ // Deliver prediction
+ const int64_t predictionTimeNs = timestampNs + (int64_t)predictionDurationNs;
+ return selectedPredictor->predict(predictionTimeNs);
+}
+
+void PosePredictor::setPosePredictorType(PosePredictorType type) {
+ if (!isValidPosePredictorType(type)) return;
+ if (type == mSetType) return;
+ mSetType = type;
+ if (type == android::media::PosePredictorType::AUTO) {
+ type = android::media::PosePredictorType::LEAST_SQUARES;
+ }
+ if (type != mCurrentType) {
+ mCurrentType = type;
+ if constexpr (!kEnableVerification) {
+ // Verification keeps all predictors up-to-date.
+ // If we don't enable verification, we must reset the current predictor.
+ getCurrentPredictor()->reset();
+ }
+ }
+}
+
+std::string PosePredictor::toString(size_t index) const {
+ std::string prefixSpace(index, ' ');
+ std::string ss(prefixSpace);
+ ss.append("PosePredictor:\n")
+ .append(prefixSpace)
+ .append(" Current Prediction Type: ")
+ .append(android::media::toString(mCurrentType))
+ .append("\n")
+ .append(prefixSpace)
+ .append(" Resets: ")
+ .append(std::to_string(mResets))
+ .append("\n")
+ .append(getCurrentPredictor()->toString(index + 1));
+ if constexpr (kEnableVerification) {
+ // dump verification
+ ss.append(prefixSpace)
+ .append(" Prediction abs error (L1) degrees [ type (last twist least-squares) x ( ");
+ for (size_t i = 0; i < mLookaheadMs.size(); ++i) {
+ if (i > 0) ss.append(" : ");
+ ss.append(std::to_string(mLookaheadMs[i]));
+ }
+ std::vector<float> cumulativeAverageErrors(std::size(mVerifiers));
+ for (size_t i = 0; i < cumulativeAverageErrors.size(); ++i) {
+ cumulativeAverageErrors[i] = mVerifiers[i].cumulativeAverageError();
+ }
+ ss.append(" ) ms ]\n")
+ .append(prefixSpace)
+ .append(" Cumulative Average Error:\n")
+ .append(prefixSpace)
+ .append(" ")
+ .append(VectorRecorder::toString(cumulativeAverageErrors, mDelimiterIdx, "%.3g"))
+ .append("\n")
+ .append(prefixSpace)
+ .append(" PerMinuteHistory:\n")
+ .append(mPredictionDurableRecorder.toString(index + 3))
+ .append(prefixSpace)
+ .append(" PerSecondHistory:\n")
+ .append(mPredictionRecorder.toString(index + 3));
+ }
+ return ss;
+}
+
+std::shared_ptr<PredictorBase> PosePredictor::getCurrentPredictor() const {
+ // we don't use a map here, we look up directly
+ switch (mCurrentType) {
+ default:
+ case android::media::PosePredictorType::LAST:
+ return mPredictors[0];
+ case android::media::PosePredictorType::TWIST:
+ return mPredictors[1];
+ case android::media::PosePredictorType::AUTO: // shouldn't occur here.
+ case android::media::PosePredictorType::LEAST_SQUARES:
+ return mPredictors[2];
+ }
+}
+
+} // namespace android::media
diff --git a/media/libheadtracking/PosePredictor.h b/media/libheadtracking/PosePredictor.h
new file mode 100644
index 0000000..06983cc
--- /dev/null
+++ b/media/libheadtracking/PosePredictor.h
@@ -0,0 +1,203 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "PosePredictorVerifier.h"
+#include <memory>
+#include <audio_utils/Statistics.h>
+#include <media/PosePredictorType.h>
+#include <media/Twist.h>
+#include <media/VectorRecorder.h>
+
+namespace android::media {
+
+// Interface for generic pose predictors
+class PredictorBase {
+public:
+ virtual ~PredictorBase() = default;
+ virtual void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) = 0;
+ virtual Pose3f predict(int64_t atNs) const = 0;
+ virtual void reset() = 0;
+ virtual std::string toString(size_t index) const = 0;
+};
+
+/**
+ * LastPredictor uses the last sample Pose for prediction
+ *
+ * This class is not thread-safe.
+ */
+class LastPredictor : public PredictorBase {
+public:
+ void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) override {
+ (void)atNs;
+ (void)twist;
+ mLastPose = pose;
+ }
+
+ Pose3f predict(int64_t atNs) const override {
+ (void)atNs;
+ return mLastPose;
+ }
+
+ void reset() override {
+ mLastPose = {};
+ }
+
+ std::string toString(size_t index) const override {
+ std::string s(index, ' ');
+ s.append("LastPredictor using last pose: ")
+ .append(mLastPose.toString())
+ .append("\n");
+ return s;
+ }
+
+private:
+ Pose3f mLastPose;
+};
+
+/**
+ * TwistPredictor uses the last sample Twist and Pose for prediction
+ *
+ * This class is not thread-safe.
+ */
+class TwistPredictor : public PredictorBase {
+public:
+ void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) override {
+ mLastAtNs = atNs;
+ mLastPose = pose;
+ mLastTwist = twist;
+ }
+
+ Pose3f predict(int64_t atNs) const override {
+ return mLastPose * integrate(mLastTwist, atNs - mLastAtNs);
+ }
+
+ void reset() override {
+ mLastAtNs = {};
+ mLastPose = {};
+ mLastTwist = {};
+ }
+
+ std::string toString(size_t index) const override {
+ std::string s(index, ' ');
+ s.append("TwistPredictor using last pose: ")
+ .append(mLastPose.toString())
+ .append(" last twist: ")
+ .append(mLastTwist.toString())
+ .append("\n");
+ return s;
+ }
+
+private:
+ int64_t mLastAtNs{};
+ Pose3f mLastPose;
+ Twist3f mLastTwist;
+};
+
+
+/**
+ * LeastSquaresPredictor uses the Pose history for prediction.
+ *
+ * A exponential weighted least squares is used.
+ *
+ * This class is not thread-safe.
+ */
+class LeastSquaresPredictor : public PredictorBase {
+public:
+ // alpha is the exponential decay.
+ LeastSquaresPredictor(double alpha = kDefaultAlphaEstimator)
+ : mAlpha(alpha)
+ , mRw(alpha)
+ , mRx(alpha)
+ , mRy(alpha)
+ , mRz(alpha)
+ {}
+
+ void add(int64_t atNs, const Pose3f& pose, const Twist3f& twist) override;
+ Pose3f predict(int64_t atNs) const override;
+ void reset() override;
+ std::string toString(size_t index) const override;
+
+private:
+ const double mAlpha;
+ int64_t mLastAtNs{};
+ Pose3f mLastPose;
+ static constexpr double kDefaultAlphaEstimator = 0.5;
+ static constexpr size_t kMinimumSamplesForPrediction = 4;
+ audio_utils::LinearLeastSquaresFit<double> mRw;
+ audio_utils::LinearLeastSquaresFit<double> mRx;
+ audio_utils::LinearLeastSquaresFit<double> mRy;
+ audio_utils::LinearLeastSquaresFit<double> mRz;
+};
+
+/*
+ * PosePredictor predicts the pose given sensor input at a time in the future.
+ *
+ * This class is not thread safe.
+ */
+class PosePredictor {
+public:
+ PosePredictor();
+
+ Pose3f predict(int64_t timestampNs, const Pose3f& pose, const Twist3f& twist,
+ float predictionDurationNs);
+
+ void setPosePredictorType(PosePredictorType type);
+
+ // convert predictions to a printable string
+ std::string toString(size_t index) const;
+
+private:
+ static constexpr int64_t kMaximumSampleIntervalBeforeResetNs =
+ 300'000'000;
+
+ // Predictors
+ const std::vector<std::shared_ptr<PredictorBase>> mPredictors;
+
+ // Verifiers, create one for an array of future lookaheads for comparison.
+ const std::vector<int> mLookaheadMs;
+
+ std::vector<PosePredictorVerifier> mVerifiers;
+
+ const std::vector<size_t> mDelimiterIdx;
+
+ // Recorders
+ media::VectorRecorder mPredictionRecorder{
+ std::size(mVerifiers) /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */,
+ mDelimiterIdx};
+ media::VectorRecorder mPredictionDurableRecorder{
+ std::size(mVerifiers) /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */,
+ mDelimiterIdx};
+
+ // Status
+
+ // SetType is the externally set predictor type. It may include AUTO.
+ PosePredictorType mSetType = PosePredictorType::LEAST_SQUARES;
+
+ // CurrentType is the actual predictor type used by this class.
+ // It does not include AUTO because that metatype means the class
+ // chooses the best predictor type based on sensor statistics.
+ PosePredictorType mCurrentType = PosePredictorType::LEAST_SQUARES;
+
+ int64_t mResets{};
+ int64_t mLastTimestampNs{};
+
+ // Returns current predictor
+ std::shared_ptr<PredictorBase> getCurrentPredictor() const;
+};
+
+} // namespace android::media
diff --git a/media/libheadtracking/PosePredictorVerifier.h b/media/libheadtracking/PosePredictorVerifier.h
new file mode 100644
index 0000000..6b4a357
--- /dev/null
+++ b/media/libheadtracking/PosePredictorVerifier.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+
+#include <audio_utils/Statistics.h>
+#include <media/Pose.h>
+
+namespace android::media {
+
+/**
+ * PosePredictorVerifier is used to validate predictions
+ *
+ * This class is not thread-safe
+ */
+class PosePredictorVerifier {
+public:
+ std::string toString() const {
+ return mErrorStats.toString();
+ }
+
+ static constexpr int64_t kMillisToNanos = 1000000;
+
+ void verifyActualPose(int64_t timestampNs, const Pose3f& pose) {
+ for (auto it = mPredictions.begin(); it != mPredictions.end();) {
+ if (it->first < timestampNs) {
+ it = mPredictions.erase(it);
+ } else {
+ int64_t dt = it->first - timestampNs;
+ if (std::abs(dt) < 10 * kMillisToNanos) {
+ const float angle = pose.rotation().angularDistance(it->second.rotation());
+ const float error = std::abs(angle); // L1 (absolute difference) here.
+ mLastError = error;
+ mErrorStats.add(error);
+ }
+ break;
+ }
+ }
+ }
+
+ void addPredictedPose(int64_t atNs, const Pose3f& pose) {
+ mPredictions.emplace_back(atNs, pose);
+ }
+
+ float lastError() const {
+ return mLastError;
+ }
+
+ float cumulativeAverageError() const {
+ return mErrorStats.getMean();
+ }
+
+private:
+ static constexpr double kCumulativeErrorAlpha = 0.999;
+ std::deque<std::pair<int64_t, Pose3f>> mPredictions;
+ float mLastError{};
+ android::audio_utils::Statistics<double> mErrorStats{kCumulativeErrorAlpha};
+};
+
+} // namespace androd::media
diff --git a/media/libheadtracking/PoseRateLimiter-test.cpp b/media/libheadtracking/PoseRateLimiter-test.cpp
index f306183..ded874a 100644
--- a/media/libheadtracking/PoseRateLimiter-test.cpp
+++ b/media/libheadtracking/PoseRateLimiter-test.cpp
@@ -17,7 +17,8 @@
#include <gtest/gtest.h>
#include "PoseRateLimiter.h"
-#include "QuaternionUtil.h"
+
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
diff --git a/media/libheadtracking/QuaternionUtil-test.cpp b/media/libheadtracking/QuaternionUtil-test.cpp
index e79e54a..cfeca00 100644
--- a/media/libheadtracking/QuaternionUtil-test.cpp
+++ b/media/libheadtracking/QuaternionUtil-test.cpp
@@ -16,7 +16,7 @@
#include <gtest/gtest.h>
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
using Eigen::Quaternionf;
@@ -51,6 +51,92 @@
EXPECT_EQ(vec, quaternionToRotationVector(rotationVectorToQuaternion(vec)));
}
+// Float precision necessitates this precision (1e-4f fails)
+constexpr float NEAR = 1e-3f;
+
+TEST(QuaternionUtil, quaternionToAngles_basic) {
+ float pitch, roll, yaw;
+
+ // angles as reported.
+ // choose 11 angles between -M_PI / 2 to M_PI / 2
+ for (int step = -5; step <= 5; ++step) {
+ const float angle = M_PI * step * 0.1f;
+
+ quaternionToAngles(rotationVectorToQuaternion({angle, 0.f, 0.f}), &pitch, &roll, &yaw);
+ EXPECT_NEAR(angle, pitch, NEAR);
+ EXPECT_NEAR(0.f, roll, NEAR);
+ EXPECT_NEAR(0.f, yaw, NEAR);
+
+ quaternionToAngles(rotationVectorToQuaternion({0.f, angle, 0.f}), &pitch, &roll, &yaw);
+ EXPECT_NEAR(0.f, pitch, NEAR);
+ EXPECT_NEAR(angle, roll, NEAR);
+ EXPECT_NEAR(0.f, yaw, NEAR);
+
+ quaternionToAngles(rotationVectorToQuaternion({0.f, 0.f, angle}), &pitch, &roll, &yaw);
+ EXPECT_NEAR(0.f, pitch, NEAR);
+ EXPECT_NEAR(0.f, roll, NEAR);
+ EXPECT_NEAR(angle, yaw, NEAR);
+ }
+
+ // Generates a debug string
+ const std::string s = quaternionToAngles<true /* DEBUG */>(
+ rotationVectorToQuaternion({M_PI, 0.f, 0.f}), &pitch, &roll, &yaw);
+ ASSERT_FALSE(s.empty());
+}
+
+TEST(QuaternionUtil, quaternionToAngles_zaxis) {
+ float pitch, roll, yaw;
+
+ for (int rot_step = -10; rot_step <= 10; ++rot_step) {
+ const float rot_angle = M_PI * rot_step * 0.1f;
+ // pitch independent of world Z rotation
+
+ // We don't test the boundaries of pitch +-M_PI/2 as roll can become
+ // degenerate and atan(0, 0) may report 0, PI, or -PI.
+ for (int step = -4; step <= 4; ++step) {
+ const float angle = M_PI * step * 0.1f;
+ auto q = rotationVectorToQuaternion({angle, 0.f, 0.f});
+ auto world_z = rotationVectorToQuaternion({0.f, 0.f, rot_angle});
+
+ // Sequential active rotations (on world frame) compose as R_2 * R_1.
+ quaternionToAngles(world_z * q, &pitch, &roll, &yaw);
+
+ EXPECT_NEAR(angle, pitch, NEAR);
+ EXPECT_NEAR(0.f, roll, NEAR);
+ }
+
+ // roll independent of world Z rotation
+ for (int step = -5; step <= 5; ++step) {
+ const float angle = M_PI * step * 0.1f;
+ auto q = rotationVectorToQuaternion({0.f, angle, 0.f});
+ auto world_z = rotationVectorToQuaternion({0.f, 0.f, rot_angle});
+
+ // Sequential active rotations (on world frame) compose as R_2 * R_1.
+ quaternionToAngles(world_z * q, &pitch, &roll, &yaw);
+
+ EXPECT_NEAR(0.f, pitch, NEAR);
+ EXPECT_NEAR(angle, roll, NEAR);
+
+ // Convert extrinsic (world-based) active rotations to a sequence of
+ // intrinsic rotations (each rotation based off of previous rotation
+ // frame).
+ //
+ // R_1 * R_intrinsic = R_extrinsic * R_1
+ // implies
+ // R_intrinsic = (R_1)^-1 R_extrinsic R_1
+ //
+ auto world_z_intrinsic = rotationVectorToQuaternion(
+ q.inverse() * Vector3f(0.f, 0.f, rot_angle));
+
+ // Sequential intrinsic rotations compose as R_1 * R_2.
+ quaternionToAngles(q * world_z_intrinsic, &pitch, &roll, &yaw);
+
+ EXPECT_NEAR(0.f, pitch, NEAR);
+ EXPECT_NEAR(angle, roll, NEAR);
+ }
+ }
+}
+
} // namespace
} // namespace media
} // namespace android
diff --git a/media/libheadtracking/QuaternionUtil.cpp b/media/libheadtracking/QuaternionUtil.cpp
index 5d090de..e245c80 100644
--- a/media/libheadtracking/QuaternionUtil.cpp
+++ b/media/libheadtracking/QuaternionUtil.cpp
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
#include <cassert>
diff --git a/media/libheadtracking/QuaternionUtil.h b/media/libheadtracking/QuaternionUtil.h
deleted file mode 100644
index f7a2ca9..0000000
--- a/media/libheadtracking/QuaternionUtil.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2021 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#pragma once
-
-#include <Eigen/Geometry>
-
-namespace android {
-namespace media {
-
-/**
- * Converts a rotation vector to an equivalent quaternion.
- * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
- * magnitude the rotation angle (in radians) around that axis.
- */
-Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector);
-
-/**
- * Converts a quaternion to an equivalent rotation vector.
- * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
- * magnitude the rotation angle (in radians) around that axis.
- */
-Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion);
-
-/**
- * Returns a quaternion representing a rotation around the X-axis with the given amount (in
- * radians).
- */
-Eigen::Quaternionf rotateX(float angle);
-
-/**
- * Returns a quaternion representing a rotation around the Y-axis with the given amount (in
- * radians).
- */
-Eigen::Quaternionf rotateY(float angle);
-
-/**
- * Returns a quaternion representing a rotation around the Z-axis with the given amount (in
- * radians).
- */
-Eigen::Quaternionf rotateZ(float angle);
-
-} // namespace media
-} // namespace android
diff --git a/media/libheadtracking/SensorPoseProvider.cpp b/media/libheadtracking/SensorPoseProvider.cpp
index 31d469c..8a29027 100644
--- a/media/libheadtracking/SensorPoseProvider.cpp
+++ b/media/libheadtracking/SensorPoseProvider.cpp
@@ -32,7 +32,7 @@
#include <sensor/SensorManager.h>
#include <utils/Looper.h>
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
namespace android {
namespace media {
diff --git a/media/libheadtracking/StillnessDetector-test.cpp b/media/libheadtracking/StillnessDetector-test.cpp
index b6cd479..56e7b4e 100644
--- a/media/libheadtracking/StillnessDetector-test.cpp
+++ b/media/libheadtracking/StillnessDetector-test.cpp
@@ -16,8 +16,9 @@
#include <gtest/gtest.h>
-#include "QuaternionUtil.h"
#include "StillnessDetector.h"
+
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
namespace android {
diff --git a/media/libheadtracking/Twist-test.cpp b/media/libheadtracking/Twist-test.cpp
index 7984e1e..9fbf81f 100644
--- a/media/libheadtracking/Twist-test.cpp
+++ b/media/libheadtracking/Twist-test.cpp
@@ -16,9 +16,7 @@
#include "media/Twist.h"
-#include <gtest/gtest.h>
-
-#include "QuaternionUtil.h"
+#include "media/QuaternionUtil.h"
#include "TestUtil.h"
using Eigen::Quaternionf;
diff --git a/media/libheadtracking/Twist.cpp b/media/libheadtracking/Twist.cpp
index 664c4d5..fdec694 100644
--- a/media/libheadtracking/Twist.cpp
+++ b/media/libheadtracking/Twist.cpp
@@ -15,8 +15,8 @@
*/
#include "media/Twist.h"
-
-#include "QuaternionUtil.h"
+#include <android-base/stringprintf.h>
+#include "media/QuaternionUtil.h"
namespace android {
namespace media {
@@ -39,5 +39,11 @@
return os;
}
+std::string Twist3f::toString() const {
+ return base::StringPrintf("[%0.2f, %0.2f, %0.2f, %0.2f, %0.2f, %0.2f]",
+ mTranslationalVelocity[0], mTranslationalVelocity[1], mTranslationalVelocity[2],
+ mRotationalVelocity[0], mRotationalVelocity[1], mRotationalVelocity[2]);
+}
+
} // namespace media
} // namespace android
diff --git a/media/libheadtracking/VectorRecorder.cpp b/media/libheadtracking/VectorRecorder.cpp
new file mode 100644
index 0000000..5c87d05
--- /dev/null
+++ b/media/libheadtracking/VectorRecorder.cpp
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/VectorRecorder.h"
+
+namespace android::media {
+
+// Convert data to string with level indentation.
+// No need for a lock as the SimpleLog is thread-safe.
+std::string VectorRecorder::toString(size_t indent) const {
+ return mRecordLog.dumpToString(std::string(indent, ' ').c_str(), mMaxLocalLogLine);
+}
+
+// Record into local log when it is time.
+void VectorRecorder::record(const std::vector<float>& record) {
+ if (record.size() != mVectorSize) return;
+
+ // Protect against concurrent calls to record().
+ std::lock_guard lg(mLock);
+
+ // if it is time, record average data and reset.
+ if (shouldRecordLog_l()) {
+ sumToAverage_l();
+ mRecordLog.log(
+ "mean: %s, min: %s, max %s, calculated %zu samples in %0.4f second(s)",
+ toString(mSum, mDelimiterIdx, mFormatString.c_str()).c_str(),
+ toString(mMin, mDelimiterIdx, mFormatString.c_str()).c_str(),
+ toString(mMax, mDelimiterIdx, mFormatString.c_str()).c_str(),
+ mNumberOfSamples,
+ mNumberOfSecondsSinceFirstSample.count());
+ resetRecord_l();
+ }
+
+ // update stream average.
+ if (mNumberOfSamples++ == 0) {
+ mFirstSampleTimestamp = std::chrono::steady_clock::now();
+ for (size_t i = 0; i < mVectorSize; ++i) {
+ const float value = record[i];
+ mSum[i] += value;
+ mMax[i] = value;
+ mMin[i] = value;
+ }
+ } else {
+ for (size_t i = 0; i < mVectorSize; ++i) {
+ const float value = record[i];
+ mSum[i] += value;
+ mMax[i] = std::max(mMax[i], value);
+ mMin[i] = std::min(mMin[i], value);
+ }
+ }
+}
+
+bool VectorRecorder::shouldRecordLog_l() {
+ mNumberOfSecondsSinceFirstSample = std::chrono::duration_cast<std::chrono::seconds>(
+ std::chrono::steady_clock::now() - mFirstSampleTimestamp);
+ return mNumberOfSecondsSinceFirstSample >= mRecordThreshold;
+}
+
+void VectorRecorder::resetRecord_l() {
+ mSum.assign(mVectorSize, 0);
+ mMax.assign(mVectorSize, 0);
+ mMin.assign(mVectorSize, 0);
+ mNumberOfSamples = 0;
+ mNumberOfSecondsSinceFirstSample = std::chrono::seconds(0);
+}
+
+void VectorRecorder::sumToAverage_l() {
+ if (mNumberOfSamples == 0) return;
+ const float reciprocal = 1.f / mNumberOfSamples;
+ for (auto& p : mSum) {
+ p *= reciprocal;
+ }
+}
+
+} // namespace android::media
diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h
index 8ef8ab0..a3c1e97 100644
--- a/media/libheadtracking/include/media/HeadTrackingProcessor.h
+++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h
@@ -19,6 +19,7 @@
#include "HeadTrackingMode.h"
#include "Pose.h"
+#include "PosePredictorType.h"
#include "Twist.h"
namespace android {
@@ -98,6 +99,11 @@
virtual void recenter(bool recenterHead = true, bool recenterScreen = true) = 0;
/**
+ * Set the predictor type.
+ */
+ virtual void setPosePredictorType(PosePredictorType type) = 0;
+
+ /**
* Dump HeadTrackingProcessor parameters under caller lock.
*/
virtual std::string toString_l(unsigned level) const = 0;
diff --git a/media/libheadtracking/include/media/PosePredictorType.h b/media/libheadtracking/include/media/PosePredictorType.h
new file mode 100644
index 0000000..aa76d5d
--- /dev/null
+++ b/media/libheadtracking/include/media/PosePredictorType.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <string>
+
+namespace android::media {
+
+enum class PosePredictorType {
+ /** Use best predictor determined from sensor input */
+ AUTO,
+
+ /** Use last pose for future prediction */
+ LAST,
+
+ /** Use twist angular velocity for future prediction */
+ TWIST,
+
+ /** Use weighted least squares history of prior poses (ignoring twist) */
+ LEAST_SQUARES,
+};
+
+std::string toString(PosePredictorType posePredictorType);
+bool isValidPosePredictorType(PosePredictorType posePredictorType);
+
+} // namespace android::media
diff --git a/media/libheadtracking/include/media/QuaternionUtil.h b/media/libheadtracking/include/media/QuaternionUtil.h
new file mode 100644
index 0000000..a711d17
--- /dev/null
+++ b/media/libheadtracking/include/media/QuaternionUtil.h
@@ -0,0 +1,297 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <android-base/stringprintf.h>
+#include <Eigen/Geometry>
+#include <media/Pose.h>
+
+namespace android {
+namespace media {
+
+/**
+ * Converts a rotation vector to an equivalent quaternion.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector);
+
+/**
+ * Converts a quaternion to an equivalent rotation vector.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion);
+
+/**
+ * Returns a quaternion representing a rotation around the X-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateX(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Y-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateY(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Z-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateZ(float angle);
+
+/**
+ * Compute separate roll, pitch, and yaw angles from a quaternion
+ *
+ * The roll, pitch, and yaw follow standard 3DOF virtual reality definitions
+ * with angles increasing counter-clockwise by the right hand rule.
+ *
+ * https://en.wikipedia.org/wiki/Six_degrees_of_freedom
+ *
+ * The roll, pitch, and yaw angles are calculated separately from the device frame
+ * rotation from the world frame. This is not to be confused with the
+ * intrinsic Euler xyz roll, pitch, yaw 'nautical' angles.
+ *
+ * The input quarternion is the active rotation that transforms the
+ * World/Stage frame to the Head/Screen frame.
+ *
+ * The input quaternion may come from two principal sensors: DEVICE and HEADSET
+ * and are interpreted as below.
+ *
+ * DEVICE SENSOR
+ *
+ * Android sensor stack assumes device coordinates along the x/y axis.
+ *
+ * https://developer.android.com/reference/android/hardware/SensorEvent#sensor.type_rotation_vector:
+ *
+ * Looking down from the clouds. Android Device coordinate system (not used)
+ * DEVICE --> X (Y goes through top speaker towards the observer)
+ * | Z
+ * V
+ * USER
+ *
+ * Internally within this library, we transform the device sensor coordinate
+ * system by rotating the coordinate system around the X axis by -M_PI/2.
+ * This aligns the device coordinate system to match that of the
+ * Head Tracking sensor (see below), should the user be facing the device in
+ * natural (phone == portrait, tablet == ?) orientation.
+ *
+ * Looking down from the clouds. Spatializer device frame.
+ * Y
+ * ^
+ * |
+ * DEVICE --> X (Z goes through top of the DEVICE towards the observer)
+ *
+ * USER
+ *
+ * The reference world frame is the device in vertical
+ * natural (phone == portrait) orientation with the top pointing straight
+ * up from the ground and the front-to-back direction facing north.
+ * The world frame is presumed locally fixed by magnetic and gravitational reference.
+ *
+ * HEADSET SENSOR
+ * https://developer.android.com/reference/android/hardware/SensorEvent#sensor.type_head_tracker:
+ *
+ * Looking down from the clouds. Headset frame.
+ * Y
+ * ^
+ * |
+ * USER ---> X
+ * (Z goes through the top of the USER head towards the observer)
+ *
+ * The Z axis goes from the neck to the top of the head, the X axis goes
+ * from the left ear to the right ear, the Y axis goes from the back of the
+ * head through the nose.
+ *
+ * Typically for a headset sensor, the X and Y axes have some arbitrary fixed
+ * reference.
+ *
+ * ROLL
+ * Roll is the counter-clockwise L/R motion around the Y axis (hence ZX plane).
+ * The right hand convention means the plane is ZX not XZ.
+ * This can be considered the azimuth angle in spherical coordinates
+ * with Pitch being the elevation angle.
+ *
+ * Roll has a range of -M_PI to M_PI radians.
+ *
+ * Rolling a device changes between portrait and landscape
+ * modes, and for L/R speakers will limit the amount of crosstalk cancellation.
+ * Roll increases as the device (if vertical like a coin) rolls from left to right.
+ *
+ * By this definition, Roll is less accurate when the device is flat
+ * on a table rather than standing on edge.
+ * When perfectly flat on the table, roll may report as 0, M_PI, or -M_PI
+ * due ambiguity / degeneracy of atan(0, 0) in this case (the device Y axis aligns with
+ * the world Z axis), but exactly flat rarely occurs.
+ *
+ * Roll for a headset is the angle the head is inclined to the right side
+ * (like sleeping).
+ *
+ * PITCH
+ * Pitch is the Surface normal Y deviation (along the Z axis away from the earth).
+ * This can be considered the elevation angle in spherical coordinates using
+ * Roll as the azimuth angle.
+ *
+ * Pitch for a device determines whether the device is "upright" or lying
+ * flat on the table (i.e. surface normal). Pitch is 0 when upright, decreases
+ * as the device top moves away from the user to -M_PI/2 when lying down face up.
+ * Pitch increases from 0 to M_PI/2 when the device tilts towards the user, and is
+ * M_PI/2 degrees when face down.
+ *
+ * Pitch for a headset is the user tilting the head/chin up or down,
+ * like nodding.
+ *
+ * Pitch has a range of -M_PI/2, M_PI/2 radians.
+ *
+ * YAW
+ * Yaw is the rotational component along the earth's XY tangential plane,
+ * where the Z axis points radially away from the earth.
+ *
+ * Yaw has a range of -M_PI to M_PI radians. If used for azimuth angle in
+ * spherical coordinates, the elevation angle may be derived from the Z axis.
+ *
+ * A positive increase means the phone is rotating from right to left
+ * when considered flat on the table.
+ * (headset: the user is rotating their head to look left).
+ * If left speaker or right earbud is pointing straight up or down,
+ * this value is imprecise and Pitch or Roll is a more useful measure.
+ *
+ * Yaw for a device is like spinning a vertical device along the axis of
+ * gravity, like spinning a coin. Yaw increases as the coin / device
+ * spins from right to left, rotating around the Z axis.
+ *
+ * Yaw for a headset is the user turning the head to look left or right
+ * like shaking the head for no. Yaw is the primary angle for a binaural
+ * head tracking device.
+ *
+ * @param q input active rotation Eigen quaternion.
+ * @param pitch output set to pitch if not nullptr
+ * @param roll output set to roll if not nullptr
+ * @param yaw output set to yaw if not nullptr
+ * @return (DEBUG==true) a debug string with intermediate transformation matrix
+ * interpreted as the unit basis vectors.
+ */
+
+// DEBUG returns a debug string for analysis.
+// We save unneeded rotation matrix computation by keeping the DEBUG option constexpr.
+template <bool DEBUG = false>
+auto quaternionToAngles(const Eigen::Quaternionf& q, float *pitch, float *roll, float *yaw) {
+ /*
+ * The quaternion here is the active rotation that transforms from the world frame
+ * to the device frame: the observer remains in the world frame,
+ * and the device (frame) moves.
+ *
+ * We use this to map device coordinates to world coordinates.
+ *
+ * Device: We transform the device right speaker (X == 1), top speaker (Z == 1),
+ * and surface inwards normal (Y == 1) positions to the world frame.
+ *
+ * Headset: We transform the headset right bud (X == 1), top (Z == 1) and
+ * nose normal (Y == 1) positions to the world frame.
+ *
+ * This is the same as the world frame coordinates of the
+ * unit device vector in the X dimension (ux),
+ * unit device vector in the Y dimension (uy),
+ * unit device vector in the Z dimension (uz).
+ *
+ * Rather than doing the rotation on unit vectors individually,
+ * one can simply use the columns of the rotation matrix of
+ * the world-to-body quaternion, so the computation is exceptionally fast.
+ *
+ * Furthermore, Eigen inlines the "toRotationMatrix" method
+ * and we rely on unused expression removal for efficiency
+ * and any elements not used should not be computed.
+ *
+ * Side note: For applying a rotation to several points,
+ * it is more computationally efficient to extract and
+ * use the rotation matrix form than the quaternion.
+ * So use of the rotation matrix is good for many reasons.
+ */
+ const auto rotation = q.toRotationMatrix();
+
+ /*
+ * World location of unit vector right speaker assuming the phone is situated
+ * natural (phone == portrait) mode.
+ * (headset: right bud).
+ *
+ * auto ux = q.rotation() * Eigen::Vector3f{1.f, 0.f, 0.f};
+ * = rotation.col(0);
+ */
+ [[maybe_unused]] const auto ux_0 = rotation.coeff(0, 0);
+ [[maybe_unused]] const auto ux_1 = rotation.coeff(1, 0);
+ [[maybe_unused]] const auto ux_2 = rotation.coeff(2, 0);
+
+ [[maybe_unused]] std::string coordinates;
+ if constexpr (DEBUG) {
+ base::StringAppendF(&coordinates, "ux: %f %f %f", ux_0, ux_1, ux_2);
+ }
+
+ /*
+ * World location of screen-inwards normal assuming the phone is situated
+ * in natural (phone == portrait) mode.
+ * (headset: user nose).
+ *
+ * auto uy = q.rotation() * Eigen::Vector3f{0.f, 1.f, 0.f};
+ * = rotation.col(1);
+ */
+ [[maybe_unused]] const auto uy_0 = rotation.coeff(0, 1);
+ [[maybe_unused]] const auto uy_1 = rotation.coeff(1, 1);
+ [[maybe_unused]] const auto uy_2 = rotation.coeff(2, 1);
+ if constexpr (DEBUG) {
+ base::StringAppendF(&coordinates, "uy: %f %f %f", uy_0, uy_1, uy_2);
+ }
+
+ /*
+ * World location of unit vector top speaker.
+ * (headset: top of head).
+ * auto uz = q.rotation() * Eigen::Vector3f{0.f, 0.f, 1.f};
+ * = rotation.col(2);
+ */
+ [[maybe_unused]] const auto uz_0 = rotation.coeff(0, 2);
+ [[maybe_unused]] const auto uz_1 = rotation.coeff(1, 2);
+ [[maybe_unused]] const auto uz_2 = rotation.coeff(2, 2);
+ if constexpr (DEBUG) {
+ base::StringAppendF(&coordinates, "uz: %f %f %f", uz_0, uz_1, uz_2);
+ }
+
+ // pitch computed from nose world Z coordinate;
+ // hence independent of rotation around world Z.
+ if (pitch != nullptr) {
+ *pitch = asin(std::clamp(uy_2, -1.f, 1.f));
+ }
+
+ // roll computed from head/right world Z coordinate;
+ // hence independent of rotation around world Z.
+ if (roll != nullptr) {
+ // atan2 takes care of implicit scale normalization of Z, X.
+ *roll = -atan2(ux_2, uz_2);
+ }
+
+ // yaw computed from right ear angle projected onto world XY plane
+ // where world Z == 0. This is the rotation around world Z.
+ if (yaw != nullptr) {
+ // atan2 takes care of implicit scale normalization of X, Y.
+ *yaw = atan2(ux_1, ux_0);
+ }
+
+ if constexpr (DEBUG) {
+ return coordinates;
+ }
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/Twist.h b/media/libheadtracking/include/media/Twist.h
index 291cea3..51b83d8 100644
--- a/media/libheadtracking/include/media/Twist.h
+++ b/media/libheadtracking/include/media/Twist.h
@@ -66,6 +66,9 @@
return Twist3f(mTranslationalVelocity / s, mRotationalVelocity / s);
}
+ // Convert instance to a string representation.
+ std::string toString() const;
+
private:
Eigen::Vector3f mTranslationalVelocity;
Eigen::Vector3f mRotationalVelocity;
diff --git a/media/libheadtracking/include/media/VectorRecorder.h b/media/libheadtracking/include/media/VectorRecorder.h
new file mode 100644
index 0000000..4103a7d
--- /dev/null
+++ b/media/libheadtracking/include/media/VectorRecorder.h
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/stringprintf.h>
+#include <android-base/thread_annotations.h>
+#include <audio_utils/SimpleLog.h>
+#include <chrono>
+#include <math.h>
+#include <mutex>
+#include <vector>
+
+namespace android::media {
+
+/**
+ * VectorRecorder records a vector of floats computing the average, max, and min
+ * over given time periods.
+ *
+ * The class is thread-safe.
+ */
+class VectorRecorder {
+ public:
+ /**
+ * @param vectorSize is the size of the vector input.
+ * If the input does not match this size, it is ignored.
+ * @param threshold is the time interval we bucket for averaging.
+ * @param maxLogLine is the number of lines we log. At this
+ * threshold, the oldest line will expire when the new line comes in.
+ * @param delimiterIdx is an optional array of delimiter indices that
+ * replace the ',' with a ':'. For example if delimiterIdx = { 3 } then
+ * the above example would format as [0.00, 0.00, 0.00 : -1.29, -0.50, 15.27].
+ * @param formatString is the sprintf format string for the double converted data
+ * to use.
+ */
+ VectorRecorder(
+ size_t vectorSize, std::chrono::duration<double> threshold, int maxLogLine,
+ std::vector<size_t> delimiterIdx = {},
+ const std::string_view formatString = {})
+ : mVectorSize(vectorSize)
+ , mDelimiterIdx(std::move(delimiterIdx))
+ , mFormatString(formatString)
+ , mRecordLog(maxLogLine)
+ , mRecordThreshold(threshold)
+ {
+ resetRecord_l(); // OK to call - we're in the constructor.
+ }
+
+ /** Convert recorded vector data to string with level indentation */
+ std::string toString(size_t indent) const;
+
+ /**
+ * @brief Record a vector of floats.
+ *
+ * @param record a vector of floats.
+ */
+ void record(const std::vector<float>& record);
+
+ /**
+ * Format vector to a string, [0.00, 0.00, 0.00, -1.29, -0.50, 15.27].
+ *
+ * @param delimiterIdx is an optional array of delimiter indices that
+ * replace the ',' with a ':'. For example if delimiterIdx = { 3 } then
+ * the above example would format as [0.00, 0.00, 0.00 : -1.29, -0.50, 15.27].
+ * @param formatString is the sprintf format string for the double converted data
+ * to use.
+ */
+ template <typename T>
+ static std::string toString(const std::vector<T>& record,
+ const std::vector<size_t>& delimiterIdx = {},
+ const char * const formatString = nullptr) {
+ if (record.size() == 0) {
+ return "[]";
+ }
+
+ std::string ss = "[";
+ auto nextDelimiter = delimiterIdx.begin();
+ for (size_t i = 0; i < record.size(); ++i) {
+ if (i > 0) {
+ if (nextDelimiter != delimiterIdx.end()
+ && *nextDelimiter <= i) {
+ ss.append(" : ");
+ ++nextDelimiter;
+ } else {
+ ss.append(", ");
+ }
+ }
+ if (formatString != nullptr && *formatString) {
+ base::StringAppendF(&ss, formatString, static_cast<double>(record[i]));
+ } else {
+ base::StringAppendF(&ss, "%5.2lf", static_cast<double>(record[i]));
+ }
+ }
+ ss.append("]");
+ return ss;
+ }
+
+ private:
+ static constexpr int mMaxLocalLogLine = 10;
+
+ const size_t mVectorSize;
+ const std::vector<size_t> mDelimiterIdx;
+ const std::string mFormatString;
+
+ // Local log for historical vector data.
+ // Locked internally, so does not need mutex below.
+ SimpleLog mRecordLog{mMaxLocalLogLine};
+
+ std::mutex mLock;
+
+ // Time threshold to record vectors in the local log.
+ // Vector data will be recorded into log at least every mRecordThreshold.
+ std::chrono::duration<double> mRecordThreshold GUARDED_BY(mLock);
+
+ // Number of seconds since first sample in mSum.
+ std::chrono::duration<double> mNumberOfSecondsSinceFirstSample GUARDED_BY(mLock);
+
+ // Timestamp of first sample recorded in mSum.
+ std::chrono::time_point<std::chrono::steady_clock> mFirstSampleTimestamp GUARDED_BY(mLock);
+
+ // Number of samples in mSum.
+ size_t mNumberOfSamples GUARDED_BY(mLock) = 0;
+
+ std::vector<double> mSum GUARDED_BY(mLock);
+ std::vector<float> mMax GUARDED_BY(mLock);
+ std::vector<float> mMin GUARDED_BY(mLock);
+
+ // Computes mNumberOfSecondsSinceFirstSample, returns true if time to record.
+ bool shouldRecordLog_l() REQUIRES(mLock);
+
+ // Resets the running mNumberOfSamples, mSum, mMax, mMin.
+ void resetRecord_l() REQUIRES(mLock);
+
+ // Convert mSum to an average.
+ void sumToAverage_l() REQUIRES(mLock);
+}; // VectorRecorder
+
+} // namespace android::media
diff --git a/media/libmediahelper/AudioParameter.cpp b/media/libmediahelper/AudioParameter.cpp
index 382a920..9a8156e 100644
--- a/media/libmediahelper/AudioParameter.cpp
+++ b/media/libmediahelper/AudioParameter.cpp
@@ -61,6 +61,12 @@
AUDIO_PARAMETER_DEVICE_ADDITIONAL_OUTPUT_DELAY;
const char * const AudioParameter::keyMaxAdditionalOutputDeviceDelay =
AUDIO_PARAMETER_DEVICE_MAX_ADDITIONAL_OUTPUT_DELAY;
+const char * const AudioParameter::keyOffloadCodecAverageBitRate = AUDIO_OFFLOAD_CODEC_AVG_BIT_RATE;
+const char * const AudioParameter::keyOffloadCodecSampleRate = AUDIO_OFFLOAD_CODEC_SAMPLE_RATE;
+const char * const AudioParameter::keyOffloadCodecChannels = AUDIO_OFFLOAD_CODEC_NUM_CHANNEL;
+const char * const AudioParameter::keyOffloadCodecDelaySamples = AUDIO_OFFLOAD_CODEC_DELAY_SAMPLES;
+const char * const AudioParameter::keyOffloadCodecPaddingSamples =
+ AUDIO_OFFLOAD_CODEC_PADDING_SAMPLES;
AudioParameter::AudioParameter(const String8& keyValuePairs)
{
@@ -226,4 +232,9 @@
}
}
+bool AudioParameter::containsKey(const String8& key) const
+{
+ return mParameters.indexOfKey(key) >= 0;
+}
+
} // namespace android
diff --git a/media/libmediahelper/include/media/AudioParameter.h b/media/libmediahelper/include/media/AudioParameter.h
index 9a6ca8a..350f472 100644
--- a/media/libmediahelper/include/media/AudioParameter.h
+++ b/media/libmediahelper/include/media/AudioParameter.h
@@ -107,6 +107,12 @@
static const char * const keyAdditionalOutputDeviceDelay;
static const char * const keyMaxAdditionalOutputDeviceDelay;
+ static const char * const keyOffloadCodecAverageBitRate;
+ static const char * const keyOffloadCodecSampleRate;
+ static const char * const keyOffloadCodecChannels;
+ static const char * const keyOffloadCodecDelaySamples;
+ static const char * const keyOffloadCodecPaddingSamples;
+
String8 toString() const { return toStringImpl(true); }
String8 keysToString() const { return toStringImpl(false); }
@@ -125,6 +131,7 @@
size_t size() const { return mParameters.size(); }
+ bool containsKey(const String8& key) const;
private:
String8 mKeyValuePairs;
KeyedVector <String8, String8> mParameters;
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 1a1c55f..e794750 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -88,6 +88,7 @@
using aidl::android::media::BnResourceManagerClient;
using aidl::android::media::IResourceManagerClient;
using aidl::android::media::IResourceManagerService;
+using aidl::android::media::ClientInfoParcel;
// key for media statistics
static const char *kCodecKeyName = "codec";
@@ -209,8 +210,8 @@
////////////////////////////////////////////////////////////////////////////////
struct ResourceManagerClient : public BnResourceManagerClient {
- explicit ResourceManagerClient(MediaCodec* codec, int32_t pid) :
- mMediaCodec(codec), mPid(pid) {}
+ explicit ResourceManagerClient(MediaCodec* codec, int32_t pid, int32_t uid) :
+ mMediaCodec(codec), mPid(pid), mUid(uid) {}
Status reclaimResource(bool* _aidl_return) override {
sp<MediaCodec> codec = mMediaCodec.promote();
@@ -222,7 +223,10 @@
if (service == nullptr) {
ALOGW("MediaCodec::ResourceManagerClient unable to find ResourceManagerService");
}
- service->removeClient(mPid, getId(this));
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(mPid),
+ .uid = static_cast<int32_t>(mUid),
+ .id = getId(this)};
+ service->removeClient(clientInfo);
*_aidl_return = true;
return Status::ok();
}
@@ -260,6 +264,7 @@
private:
wp<MediaCodec> mMediaCodec;
int32_t mPid;
+ int32_t mUid;
DISALLOW_EVIL_CONSTRUCTORS(ResourceManagerClient);
};
@@ -285,10 +290,15 @@
void markClientForPendingRemoval();
bool reclaimResource(const std::vector<MediaResourceParcel> &resources);
+ inline void setCodecName(const char* name) {
+ mCodecName = name;
+ }
+
private:
Mutex mLock;
pid_t mPid;
uid_t mUid;
+ std::string mCodecName;
std::shared_ptr<IResourceManagerService> mService;
std::shared_ptr<IResourceManagerClient> mClient;
::ndk::ScopedAIBinder_DeathRecipient mDeathRecipient;
@@ -392,7 +402,11 @@
if (mService == nullptr) {
return;
}
- mService->addResource(mPid, mUid, getId(mClient), mClient, resources);
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(mPid),
+ .uid = static_cast<int32_t>(mUid),
+ .id = getId(mClient),
+ .name = mCodecName};
+ mService->addResource(clientInfo, mClient, resources);
}
void MediaCodec::ResourceManagerServiceProxy::removeResource(
@@ -404,7 +418,11 @@
if (mService == nullptr) {
return;
}
- mService->removeResource(mPid, getId(mClient), resources);
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(mPid),
+ .uid = static_cast<int32_t>(mUid),
+ .id = getId(mClient),
+ .name = mCodecName};
+ mService->removeResource(clientInfo, resources);
}
void MediaCodec::ResourceManagerServiceProxy::removeClient() {
@@ -412,7 +430,11 @@
if (mService == nullptr) {
return;
}
- mService->removeClient(mPid, getId(mClient));
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(mPid),
+ .uid = static_cast<int32_t>(mUid),
+ .id = getId(mClient),
+ .name = mCodecName};
+ mService->removeClient(clientInfo);
}
void MediaCodec::ResourceManagerServiceProxy::markClientForPendingRemoval() {
@@ -420,7 +442,11 @@
if (mService == nullptr) {
return;
}
- mService->markClientForPendingRemoval(mPid, getId(mClient));
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(mPid),
+ .uid = static_cast<int32_t>(mUid),
+ .id = getId(mClient),
+ .name = mCodecName};
+ mService->markClientForPendingRemoval(clientInfo);
}
bool MediaCodec::ResourceManagerServiceProxy::reclaimResource(
@@ -430,7 +456,11 @@
return false;
}
bool success;
- Status status = mService->reclaimResource(mPid, resources, &success);
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(mPid),
+ .uid = static_cast<int32_t>(mUid),
+ .id = getId(mClient),
+ .name = mCodecName};
+ Status status = mService->reclaimResource(clientInfo, resources, &success);
return status.isOk() && success;
}
@@ -836,7 +866,7 @@
mGetCodecBase(getCodecBase),
mGetCodecInfo(getCodecInfo) {
mResourceManagerProxy = new ResourceManagerServiceProxy(pid, uid,
- ::ndk::SharedRefBase::make<ResourceManagerClient>(this, pid));
+ ::ndk::SharedRefBase::make<ResourceManagerClient>(this, pid, uid));
if (!mGetCodecBase) {
mGetCodecBase = [](const AString &name, const char *owner) {
return GetCodecBase(name, owner);
@@ -1607,6 +1637,11 @@
std::vector<MediaResourceParcel> resources;
resources.push_back(MediaResource::CodecResource(secureCodec, toMediaResourceSubType(mDomain)));
+
+ // If the ComponentName is not set yet, use the name passed by the user.
+ if (mComponentName.empty()) {
+ mResourceManagerProxy->setCodecName(name.c_str());
+ }
for (int i = 0; i <= kMaxRetry; ++i) {
if (i > 0) {
// Don't try to reclaim resource for the first time.
@@ -3388,6 +3423,8 @@
if (mComponentName.c_str()) {
mediametrics_setCString(mMetricsHandle, kCodecCodec,
mComponentName.c_str());
+ // Update the codec name.
+ mResourceManagerProxy->setCodecName(mComponentName.c_str());
}
const char *owner = mCodecInfo ? mCodecInfo->getOwnerName() : "";
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index c5b5199..863177d 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -798,6 +798,8 @@
{ "dvb-audio-description", kKeyDvbAudioDescription},
{ "dvb-teletext-magazine-number", kKeyDvbTeletextMagazineNumber},
{ "dvb-teletext-page-number", kKeyDvbTeletextPageNumber},
+ { "profile", kKeyAudioProfile },
+ { "level", kKeyAudioLevel },
}
};
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index 33f224c..80a1170 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -117,6 +117,12 @@
kKeyVideoProfile = 'vprf', // int32_t
kKeyVideoLevel = 'vlev', // int32_t
+ // audio profile and level
+ // The codec framework doesn't distinguish between video and audio profiles,
+ // so there is no need to define a separate key
+ kKeyAudioProfile = 'vprf', // int32_t
+ kKeyAudioLevel = 'vlev', // int32_t
+
kKey2ByteNalLength = '2NAL', // int32_t (bool)
// Identify the file output format for authoring
diff --git a/media/module/extractors/ogg/OggExtractor.cpp b/media/module/extractors/ogg/OggExtractor.cpp
index 23ad1b1..4c106b2 100644
--- a/media/module/extractors/ogg/OggExtractor.cpp
+++ b/media/module/extractors/ogg/OggExtractor.cpp
@@ -1048,16 +1048,21 @@
size_t numerator = mTableOfContents.size();
if (numerator > kMaxNumTOCEntries) {
- size_t denom = numerator - kMaxNumTOCEntries;
+ Vector<TOCEntry> maxTOC;
+ maxTOC.setCapacity(kMaxNumTOCEntries);
+ size_t denom = numerator - kMaxNumTOCEntries;
size_t accum = 0;
- for (ssize_t i = mTableOfContents.size(); i > 0; --i) {
+ for (ssize_t i = 0; i < mTableOfContents.size(); i++) {
accum += denom;
if (accum >= numerator) {
- mTableOfContents.removeAt(i);
accum -= numerator;
+ } else {
+ maxTOC.push(mTableOfContents.itemAt(i));
}
}
+
+ mTableOfContents = maxTOC;
}
}
diff --git a/media/module/foundation/MediaDefs.cpp b/media/module/foundation/MediaDefs.cpp
index 4a75f90..7abab63 100644
--- a/media/module/foundation/MediaDefs.cpp
+++ b/media/module/foundation/MediaDefs.cpp
@@ -72,6 +72,7 @@
const char *MEDIA_MIMETYPE_AUDIO_DTS = "audio/vnd.dts";
const char *MEDIA_MIMETYPE_AUDIO_DTS_HD = "audio/vnd.dts.hd";
const char *MEDIA_MIMETYPE_AUDIO_DTS_HD_MA = "audio/vnd.dts.hd;profile=dtsma";
+const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD = "audio/vnd.dts.uhd";
const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD_P1 = "audio/vnd.dts.uhd;profile=p1";
const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD_P2 = "audio/vnd.dts.uhd;profile=p2";
const char *MEDIA_MIMETYPE_AUDIO_EVRC = "audio/evrc";
diff --git a/media/module/foundation/include/media/stagefright/foundation/MediaDefs.h b/media/module/foundation/include/media/stagefright/foundation/MediaDefs.h
index 740336a..05ee7fc 100644
--- a/media/module/foundation/include/media/stagefright/foundation/MediaDefs.h
+++ b/media/module/foundation/include/media/stagefright/foundation/MediaDefs.h
@@ -74,6 +74,7 @@
extern const char *MEDIA_MIMETYPE_AUDIO_DTS;
extern const char *MEDIA_MIMETYPE_AUDIO_DTS_HD;
extern const char *MEDIA_MIMETYPE_AUDIO_DTS_HD_MA;
+extern const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD;
extern const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD_P1;
extern const char *MEDIA_MIMETYPE_AUDIO_DTS_UHD_P2;
extern const char *MEDIA_MIMETYPE_AUDIO_EVRC;
diff --git a/media/module/mpeg2ts/ATSParser.cpp b/media/module/mpeg2ts/ATSParser.cpp
index 1482072..6aeea3b 100644
--- a/media/module/mpeg2ts/ATSParser.cpp
+++ b/media/module/mpeg2ts/ATSParser.cpp
@@ -556,7 +556,15 @@
if (descriptor_length > ES_info_length) {
return ERROR_MALFORMED;
}
- if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
+
+ // The DTS descriptor is used in the PSI PMT to identify streams which carry
+ // DTS audio(core only). If a DTS descriptor is present, a DTS-HD or DTS-UHD
+ // descriptors shall not be present in the same ES_info descriptor loop.
+ if (descriptor_tag == DESCRIPTOR_DTS) {
+ info.mType = STREAMTYPE_DTS;
+ ES_info_length -= descriptor_length;
+ br->skipBits(descriptor_length * 8);
+ } else if (descriptor_tag == DESCRIPTOR_CA && descriptor_length >= 4) {
hasStreamCA = true;
streamCA.mSystemID = br->getBits(16);
streamCA.mPID = br->getBits(16) & 0x1fff;
@@ -575,6 +583,16 @@
if (descTagExt == EXT_DESCRIPTOR_DVB_AC4) {
info.mTypeExt = EXT_DESCRIPTOR_DVB_AC4;
br->skipBits(descriptor_length * 8);
+ } else if (descTagExt == EXT_DESCRIPTOR_DVB_DTS_HD) {
+ // DTS HD extended descriptor which can accommodate core only formats
+ // as well as extension only and core + extension combinations.
+ info.mTypeExt = EXT_DESCRIPTOR_DVB_DTS_HD;
+ br->skipBits(descriptor_length * 8);
+ } else if (descTagExt == EXT_DESCRIPTOR_DVB_DTS_UHD) {
+ // The DTS-UHD descriptor is used in the PSI PMT to identify streams
+ // which carry DTS-UHD audio
+ info.mTypeExt = EXT_DESCRIPTOR_DVB_DTS_UHD;
+ br->skipBits(descriptor_length * 8);
} else if (descTagExt == EXT_DESCRIPTOR_DVB_AUDIO_PRESELECTION &&
descriptor_length >= 1) {
// DVB BlueBook A038 Table 110
@@ -920,9 +938,17 @@
mode = ElementaryStreamQueue::EAC3;
break;
+ case STREAMTYPE_DTS:
+ mode = ElementaryStreamQueue::DTS;
+ break;
+
case STREAMTYPE_PES_PRIVATE_DATA:
if (mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4) {
mode = ElementaryStreamQueue::AC4;
+ } else if (mStreamTypeExt == EXT_DESCRIPTOR_DVB_DTS_HD) {
+ mode = ElementaryStreamQueue::DTS_HD;
+ } else if (mStreamTypeExt == EXT_DESCRIPTOR_DVB_DTS_UHD) {
+ mode = ElementaryStreamQueue::DTS_UHD;
}
break;
@@ -1158,9 +1184,12 @@
case STREAMTYPE_EAC3:
case STREAMTYPE_AAC_ENCRYPTED:
case STREAMTYPE_AC3_ENCRYPTED:
+ case STREAMTYPE_DTS:
return true;
case STREAMTYPE_PES_PRIVATE_DATA:
- return mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4;
+ return (mStreamTypeExt == EXT_DESCRIPTOR_DVB_AC4
+ || mStreamTypeExt == EXT_DESCRIPTOR_DVB_DTS_HD
+ || mStreamTypeExt == EXT_DESCRIPTOR_DVB_DTS_UHD);
default:
return false;
diff --git a/media/module/mpeg2ts/ESQueue.cpp b/media/module/mpeg2ts/ESQueue.cpp
index 192ba77..2dc7b0a 100644
--- a/media/module/mpeg2ts/ESQueue.cpp
+++ b/media/module/mpeg2ts/ESQueue.cpp
@@ -362,6 +362,436 @@
return OK;
}
+#define RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bitstream, size) \
+ do { \
+ if ((bitstream).numBitsLeft() < (size)) { \
+ ALOGE("Not enough bits left for further parsing"); \
+ return ERROR_MALFORMED; } \
+ } while (0)
+
+// Parse DTS Digital Surround and DTS Express(LBR) stream header
+static status_t parseDTSHDSyncFrame(
+ const uint8_t *ptr, size_t size, unsigned &frameSize, sp<MetaData> *metaData) {
+ static const unsigned channelCountTable[] = {1, 2, 2, 2, 2, 3, 3, 4,
+ 4, 5, 6, 6, 6, 7, 8, 8};
+ static const unsigned samplingRateTableCoreSS[] = {0, 8000, 16000, 32000, 0, 0, 11025, 22050,
+ 44100, 0, 0, 12000, 24000, 48000, 0, 0};
+ static const unsigned samplingRateTableExtSS[] = {8000, 16000, 32000, 64000, 128000,
+ 22050, 44100, 88200, 176400, 352800,
+ 12000, 24000, 48000, 96000, 192000, 384000};
+
+ const uint32_t DTSHD_SYNC_CORE_16BIT_BE = 0x7ffe8001;
+ const uint32_t DTSHD_SYNC_EXSS_16BIT_BE = 0x64582025;
+
+ uint32_t numChannels = 0, samplingRate = 0;
+ bool isLBR = false;
+
+ ABitReader bits(ptr, size);
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, 32);
+ uint32_t dtshdSyncWord = bits.getBits(32);
+
+ // Expecting DTS Digital Surround or DTS Express(LBR) streams only
+ if (dtshdSyncWord == DTSHD_SYNC_CORE_16BIT_BE) { // DTS Digital Surround Header
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (1 + 5 + 1 + 7 + 14 + 6 + 4 + 15 + 2));
+
+ // FTYPE, SHORT, CRC, NBLKS
+ bits.skipBits(1 + 5 + 1 + 7);
+
+ frameSize = bits.getBits(14) + 1;
+ uint32_t amode = bits.getBits(6);
+ uint32_t freqIndex = bits.getBits(4);
+
+ // RATE, FIXEDBIT, DYNF, TIMEF, AUXF, HDCD, EXT_AUDIO_ID, EXT_AUDIO, ASPF
+ bits.skipBits(5 + 1 + 1 + 1 + 1 + 1 + 3 + 1 + 1);
+
+ uint32_t lfeFlag = bits.getBits(2);
+ numChannels = (amode <= 15) ? channelCountTable[amode] : 0;
+ numChannels += ((lfeFlag == 1) || (lfeFlag == 2)) ? 1 : 0;
+ samplingRate = (freqIndex <= 15) ? samplingRateTableCoreSS[freqIndex] : 0;
+
+ isLBR = false;
+ } else if (dtshdSyncWord == DTSHD_SYNC_EXSS_16BIT_BE) { // DTS Express(LBR) Header
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (8 + 2 + 1));
+
+ uint32_t extHeadersize, extSSFsize;
+ uint32_t numAudioPresent = 1, numAssets = 1;
+ uint32_t nuActiveExSSMask[8];
+
+ // userDefinedBits
+ bits.skipBits(8);
+
+ uint32_t extSSIndex = bits.getBits(2);
+ uint32_t headerSizeType = bits.getBits(1);
+
+ if (headerSizeType == 0) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (8 + 16));
+
+ extHeadersize = bits.getBits(8) + 1;
+ extSSFsize = bits.getBits(16) + 1;
+ } else {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (12 + 20));
+
+ extHeadersize = bits.getBits(12) + 1;
+ extSSFsize = bits.getBits(20) + 1;
+ }
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (1));
+
+ uint32_t staticFieldsPresent = bits.getBits(1);
+
+ if (staticFieldsPresent) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (2 + 3 + 1));
+
+ // nuRefClockCode, nuExSSFrameDurationCode
+ bits.skipBits(2 + 3);
+
+ if (bits.getBits(1)) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (32 + 4));
+
+ bits.skipBits(32 + 4);
+ }
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (3 + 3));
+
+ // numAudioPresent, numAssets
+ bits.skipBits(3 + 3);
+
+ for (uint32_t nAuPr = 0; nAuPr < numAudioPresent; nAuPr++) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (extSSIndex + 1));
+
+ nuActiveExSSMask[nAuPr] = bits.getBits(extSSIndex + 1);
+ }
+
+ for (uint32_t nAuPr = 0; nAuPr < numAudioPresent; nAuPr++) {
+ for (uint32_t nSS = 0; nSS < extSSIndex + 1; nSS++) {
+ if (((nuActiveExSSMask[nAuPr] >> nSS) & 0x1) == 1) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, 8);
+
+ // nuActiveAssetMask
+ bits.skipBits(8);
+ }
+ }
+ }
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, 1);
+
+ // bMixMetadataEnbl
+ if (bits.getBits(1)) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (2 + 2 + 2));
+
+ // nuMixMetadataAdjLevel
+ bits.skipBits(2);
+
+ uint32_t bits4MixOutMask = (bits.getBits(2) + 1) << 2;
+ uint32_t numMixOutConfigs = bits.getBits(2) + 1;
+
+ for (int ns = 0; ns < numMixOutConfigs; ns++) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, bits4MixOutMask);
+
+ // nuMixOutChMask
+ bits.skipBits(bits4MixOutMask);
+ }
+ }
+ }
+
+ for (int nAst = 0; nAst < numAssets; nAst++) {
+ int bits4ExSSFsize = (headerSizeType == 0) ? 16 : 20;
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, bits4ExSSFsize);
+
+ bits.skipBits(bits4ExSSFsize);
+ }
+
+ /* Asset descriptor */
+ for (int nAst = 0; nAst < numAssets; nAst++) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (9 + 3));
+
+ // nuAssetDescriptFsize, nuAssetIndex
+ bits.skipBits(9 + 3);
+
+ if (staticFieldsPresent) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, 1);
+
+ // bAssetTypeDescrPresent
+ if (bits.getBits(1)) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, 4);
+
+ // nuAssetTypeDescriptor
+ bits.skipBits(4);
+ }
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, 1);
+
+ // bLanguageDescrPresent
+ if (bits.getBits(1)) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, 24);
+
+ // LanguageDescriptor
+ bits.skipBits(24);
+ }
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, 1);
+
+ // bInfoTextPresent
+ if (bits.getBits(1)) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, 10);
+
+ uint32_t nuInfoTextByteSize = bits.getBits(10) + 1;
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (nuInfoTextByteSize * 8));
+
+ // InfoTextString
+ bits.skipBits(nuInfoTextByteSize * 8);
+ }
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (5 + 4 + 8));
+
+ // nuBitResolution
+ bits.skipBits(5);
+
+ samplingRate = samplingRateTableExtSS[bits.getBits(4)];
+ numChannels = bits.getBits(8) + 1;
+ }
+ }
+
+ frameSize = extHeadersize + extSSFsize;
+ isLBR = true;
+ } else {
+ ALOGE("No valid sync word in DTS/DTSHD header");
+ return ERROR_MALFORMED;
+ }
+
+ if (metaData != NULL) {
+ if (isLBR) {
+ (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_DTS_HD);
+ (*metaData)->setInt32(kKeyAudioProfile, 0x2); // CodecProfileLevel.DTS_HDProfileLBR
+ } else {
+ (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_DTS);
+ }
+ (*metaData)->setInt32(kKeyChannelCount, numChannels);
+ (*metaData)->setInt32(kKeySampleRate, samplingRate);
+ }
+ return OK;
+}
+
+static status_t extractVarLenBitFields(
+ ABitReader *bits, size_t *bitsUsed, uint32_t *value,
+ unsigned ucTable[], bool extractAndAddFlag) {
+
+ static const unsigned bitsUsedTbl[8] = {1, 1, 1, 1, 2, 2, 3, 3}; // prefix code lengths
+ static const unsigned indexTbl[8] = {0, 0, 0, 0, 1, 1, 2, 3}; // code to prefix code index map
+
+ /* Clone the bitstream */
+ ABitReader bitStream(bits->data(), bits->numBitsLeft() / 8);
+ ABitReader bitstreamClone(bits->data(), bits->numBitsLeft() / 8);
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bitstreamClone, 3);
+
+ unsigned code = bitstreamClone.getBits(3);
+ unsigned totalBitsUsed = bitsUsedTbl[code];
+ unsigned unIndex = indexTbl[code];
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bitStream, totalBitsUsed);
+
+ bitStream.skipBits(totalBitsUsed);
+
+ uint32_t unValue = 0;
+ if (ucTable[unIndex] > 0) {
+ if (extractAndAddFlag) {
+ for (unsigned un = 0; un < unIndex; un++) {
+ unValue += (1 << ucTable[un]);
+ }
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bitStream, ucTable[unIndex]);
+
+ unValue += bitStream.getBits(ucTable[unIndex]);
+ totalBitsUsed += ucTable[unIndex];
+ } else {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bitStream, ucTable[unIndex]);
+
+ unValue += bitStream.getBits(ucTable[unIndex]);
+ totalBitsUsed += ucTable[unIndex];
+ }
+ }
+
+ *bitsUsed = (size_t)totalBitsUsed;
+ *value = unValue;
+ return OK;
+}
+
+// Parse DTS UHD Profile-2 stream header
+static status_t parseDTSUHDSyncFrame(
+ const uint8_t *ptr, size_t size, unsigned &frameSize, sp<MetaData> *metaData) {
+
+ static const uint32_t DTSUHD_SYNC_CORE_16BIT_BE = 0x40411BF2;
+ static const uint32_t DTSUHD_NONSYNC_CORE_16BIT_BE = 0x71C442E8;
+
+ unsigned audioSamplRate = 0;
+
+ ABitReader bits(ptr, size);
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, 32);
+
+ uint32_t syncWord = bits.getBits(32);
+
+ bool isSyncFrameFlag = false;
+ switch (syncWord) {
+ case DTSUHD_SYNC_CORE_16BIT_BE:
+ isSyncFrameFlag = true;
+ break;
+ case DTSUHD_NONSYNC_CORE_16BIT_BE:
+ isSyncFrameFlag = false;
+ break;
+ default:
+ ALOGE("No valid sync word in DTSUHD header");
+ return ERROR_MALFORMED; // invalid sync word
+ }
+
+ unsigned uctable1[4] = { 5, 8, 10, 12 };
+ uint32_t sizeOfFTOCPayload = 0;
+ size_t nuBitsUsed = 0;
+ status_t status = OK;
+
+ status = extractVarLenBitFields(&bits, &nuBitsUsed, &sizeOfFTOCPayload, uctable1, true);
+
+ if (status != OK) {
+ ALOGE("Failed to extractVarLenBitFields from DTSUHD header");
+ return ERROR_MALFORMED;
+ }
+
+ bits.skipBits(nuBitsUsed);
+
+ if (isSyncFrameFlag) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (1 + 2 + 3 + 2 + 1));
+
+ // FullChannelBasedMixFlag, ETSI TS 103 491 V1.2.1, Section 6.4.6.1
+ if (!(bits.getBits(1))) {
+ // This implementation only supports full channel mask-based
+ // audio presentation (i.e. 2.0, 5.1, 11.1 mix without objects)
+ ALOGE("Objects not supported, only DTSUHD full channel mask-based mix");
+ return ERROR_MALFORMED;
+ }
+
+ // BaseDuration, FrameDuration
+ bits.skipBits(2 + 3);
+
+ unsigned clockRateIndex = bits.getBits(2);
+ unsigned clockRateHertz = 0;
+
+ switch (clockRateIndex) {
+ case 0:
+ clockRateHertz = 32000;
+ break;
+ case 1:
+ clockRateHertz = 44100;
+ break;
+ case 2:
+ clockRateHertz = 48000;
+ break;
+ default:
+ ALOGE("Invalid clockRateIndex in DTSUHD header");
+ return ERROR_MALFORMED;
+ }
+
+ if (bits.getBits(1)) {
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, (32 + 4));
+
+ bits.skipBits(32 + 4);
+ }
+
+ RETURN_ERROR_IF_NOT_ENOUGH_BYTES_LEFT(bits, 2);
+
+ unsigned samplRateMultiplier = (1 << bits.getBits(2));
+ audioSamplRate = clockRateHertz * samplRateMultiplier;
+ }
+
+ uint32_t chunkPayloadBytes = 0;
+ int numOfMDChunks = isSyncFrameFlag ? 1 : 0; // Metadata chunks
+ for (int nmdc = 0; nmdc < numOfMDChunks; nmdc++) {
+ unsigned uctable2[4] = {6, 9, 12, 15};
+ uint32_t nuMDChunkSize = 0;
+ nuBitsUsed = 0;
+
+ status = extractVarLenBitFields(&bits, &nuBitsUsed, &nuMDChunkSize, uctable2, true);
+ if (status != OK) {
+ ALOGE("Failed to extractVarLenBitFields from DTSUHD header");
+ return ERROR_MALFORMED;
+ }
+
+ bits.skipBits(nuBitsUsed);
+
+ if (nuMDChunkSize > 32767) {
+ ALOGE("Unsupported number of metadata chunks in DTSUHD header");
+ return ERROR_MALFORMED;
+ }
+ chunkPayloadBytes += nuMDChunkSize;
+ }
+
+ // Ony one audio chunk is supported
+ int numAudioChunks = 1;
+ for (int nac = 0; nac < numAudioChunks; nac++) {
+ uint32_t acID = 256, nuAudioChunkSize = 0;
+
+ // isSyncFrameFlag means that ACID is present
+ if (isSyncFrameFlag) {
+ unsigned uctable3[4] = {2, 4, 6, 8};
+ nuBitsUsed = 0;
+
+ status = extractVarLenBitFields(&bits, &nuBitsUsed, &acID, uctable3, true);
+
+ if (status != OK) {
+ ALOGE("Failed to extractVarLenBitFields from DTSUHD header");
+ return ERROR_MALFORMED;
+ }
+
+ bits.skipBits(nuBitsUsed);
+ }
+
+ nuBitsUsed = 0;
+ if (acID == 0) {
+ nuAudioChunkSize = 0;
+ } else {
+ unsigned uctable4[4] = {9, 11, 13, 16};
+
+ status = extractVarLenBitFields(&bits, &nuBitsUsed, &nuAudioChunkSize, uctable4, true);
+
+ if (status != OK) {
+ ALOGE("Failed to extractVarLenBitFields from DTSUHD header");
+ return ERROR_MALFORMED;
+ }
+ }
+
+ if (nuAudioChunkSize > 65535){
+ ALOGE("Unsupported number of audio chunks in DTSUHD header");
+ return ERROR_MALFORMED;
+ }
+
+ chunkPayloadBytes += nuAudioChunkSize;
+ }
+
+ frameSize = (sizeOfFTOCPayload + 1) + chunkPayloadBytes;
+
+ if (metaData != NULL) {
+ (*metaData)->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_DTS_UHD);
+ (*metaData)->setInt32(kKeyAudioProfile, 0x2); // CodecProfileLevel.DTS_UHDProfileP2
+ (*metaData)->setInt32(kKeyChannelCount, 2); // Setting default channel count as stereo
+ (*metaData)->setInt32(kKeySampleRate, audioSamplRate);
+ }
+
+ return OK;
+}
+
+static status_t isSeeminglyValidDTSHDHeader(const uint8_t *ptr, size_t size,unsigned &frameSize)
+{
+ return parseDTSHDSyncFrame(ptr, size, frameSize, NULL);
+}
+
+static status_t isSeeminglyValidDTSUHDHeader(const uint8_t *ptr, size_t size,unsigned &frameSize)
+{
+ return parseDTSUHDSyncFrame(ptr, size, frameSize, NULL);
+}
+
static status_t IsSeeminglyValidAC4Header(const uint8_t *ptr, size_t size, unsigned &frameSize) {
return parseAC4SyncFrame(ptr, size, frameSize, NULL);
}
@@ -655,6 +1085,70 @@
break;
}
+ case DTS: // Checking for DTS or DTS-HD syncword
+ case DTS_HD:
+ {
+ uint8_t *ptr = (uint8_t *)data;
+ unsigned frameSize = 0;
+ ssize_t startOffset = -1;
+
+ for (size_t i = 0; i < size; ++i) {
+ if (isSeeminglyValidDTSHDHeader(&ptr[i], size - i, frameSize) == OK) {
+ startOffset = i;
+ break;
+ }
+ }
+
+ if (startOffset < 0) {
+ return ERROR_MALFORMED;
+ }
+ if (startOffset > 0) {
+ ALOGI("found something resembling a DTS-HD syncword at "
+ "offset %zd",
+ startOffset);
+ }
+
+ if (frameSize != size - startOffset) {
+ ALOGV("DTS-HD frame size is %u bytes, while the buffer size is %zd bytes.",
+ frameSize, size - startOffset);
+ }
+
+ data = &ptr[startOffset];
+ size -= startOffset;
+ break;
+ }
+
+ case DTS_UHD:
+ {
+ uint8_t *ptr = (uint8_t *)data;
+ ssize_t startOffset = -1;
+ unsigned frameSize = 0;
+
+ for (size_t i = 0; i < size; ++i) {
+ if (isSeeminglyValidDTSUHDHeader(&ptr[i], size - i, frameSize) == OK) {
+ startOffset = i;
+ break;
+ }
+ }
+
+ if (startOffset < 0) {
+ return ERROR_MALFORMED;
+ }
+ if (startOffset >= 0) {
+ ALOGI("found something resembling a DTS UHD syncword"
+ "syncword at offset %zd",
+ startOffset);
+ }
+
+ if (frameSize != size - startOffset) {
+ ALOGV("DTS-UHD frame size is %u bytes, while the buffer size is %zd bytes.",
+ frameSize, size - startOffset);
+ }
+ data = &ptr[startOffset];
+ size -= startOffset;
+ break;
+ }
+
case PCM_AUDIO:
case METADATA:
{
@@ -928,6 +1422,11 @@
return dequeueAccessUnitPCMAudio();
case METADATA:
return dequeueAccessUnitMetadata();
+ case DTS: // Using same dequeue function for both DTS and DTS-HD types.
+ case DTS_HD:
+ return dequeueAccessUnitDTSOrDTSHD();
+ case DTS_UHD:
+ return dequeueAccessUnitDTSUHD();
default:
if (mMode != MPEG_AUDIO) {
ALOGE("Unknown mode");
@@ -937,6 +1436,113 @@
}
}
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitDTSOrDTSHD() {
+ unsigned syncStartPos = 0; // in bytes
+ unsigned payloadSize = 0;
+ sp<MetaData> format = new MetaData;
+
+ ALOGV("dequeueAccessUnitDTSOrDTSHD[%d]: mBuffer %p(%zu)", mAUIndex,
+ mBuffer->data(), mBuffer->size());
+
+ while (true) {
+ if (syncStartPos + 4 >= mBuffer->size()) {
+ return NULL;
+ }
+ uint8_t *ptr = mBuffer->data() + syncStartPos;
+ size_t size = mBuffer->size() - syncStartPos;
+ status_t status = parseDTSHDSyncFrame(ptr, size, payloadSize, &format);
+ if (status == 0) {
+ break;
+ }
+ ++syncStartPos;
+ }
+
+ if (mBuffer->size() < syncStartPos + payloadSize) {
+ ALOGV("Not enough buffer size for DTS/DTS-HD");
+ return NULL;
+ }
+
+ if (mFormat == NULL) {
+ mFormat = format;
+ }
+
+ int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
+ if (timeUs < 0LL) {
+ ALOGE("negative timeUs");
+ return NULL;
+ }
+ mAUIndex++;
+
+ sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
+ memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
+
+ accessUnit->meta()->setInt64("timeUs", timeUs);
+ accessUnit->meta()->setInt32("isSync", 1);
+
+ memmove(
+ mBuffer->data(),
+ mBuffer->data() + syncStartPos + payloadSize,
+ mBuffer->size() - syncStartPos - payloadSize);
+
+ mBuffer->setRange(0, mBuffer->size() - syncStartPos - payloadSize);
+
+ return accessUnit;
+}
+
+sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitDTSUHD()
+{
+ unsigned syncStartPos = 0; // in bytes
+ unsigned payloadSize = 0;
+ sp<MetaData> format = new MetaData;
+
+ ALOGV("dequeueAccessUnitDTSUHD[%d]: mBuffer %p(%zu)", mAUIndex,
+ mBuffer->data(), mBuffer->size());
+
+ while (true) {
+ if (syncStartPos + 4 >= mBuffer->size()) {
+ return NULL;
+ }
+ uint8_t *ptr = mBuffer->data() + syncStartPos;
+ size_t size = mBuffer->size() - syncStartPos;
+ status_t status = parseDTSUHDSyncFrame(ptr, size, payloadSize, &format);
+ if (status == 0) {
+ break;
+ }
+ ++syncStartPos;
+ }
+
+ if (mBuffer->size() < syncStartPos + payloadSize) {
+ ALOGV("Not enough buffer size for DTS-UHD");
+ return NULL;
+ }
+
+ if (mFormat == NULL) {
+ mFormat = format;
+ }
+
+ int64_t timeUs = fetchTimestamp(syncStartPos + payloadSize);
+ if (timeUs < 0LL) {
+ ALOGE("negative timeUs");
+ return NULL;
+ }
+ mAUIndex++;
+
+ sp<ABuffer> accessUnit = new ABuffer(syncStartPos + payloadSize);
+ memcpy(accessUnit->data(), mBuffer->data(), syncStartPos + payloadSize);
+
+ accessUnit->meta()->setInt64("timeUs", timeUs);
+ accessUnit->meta()->setInt32("isSync", 1);
+
+ memmove(
+ mBuffer->data(),
+ mBuffer->data() + syncStartPos + payloadSize,
+ mBuffer->size() - syncStartPos - payloadSize);
+
+ mBuffer->setRange(0, mBuffer->size() - syncStartPos - payloadSize);
+
+ return accessUnit;
+}
+
sp<ABuffer> ElementaryStreamQueue::dequeueAccessUnitEAC3() {
unsigned syncStartPos = 0; // in bytes
unsigned payloadSize = 0;
diff --git a/media/module/mpeg2ts/include/mpeg2ts/ATSParser.h b/media/module/mpeg2ts/include/mpeg2ts/ATSParser.h
index 49578d3..b658c5a 100644
--- a/media/module/mpeg2ts/include/mpeg2ts/ATSParser.h
+++ b/media/module/mpeg2ts/include/mpeg2ts/ATSParser.h
@@ -157,6 +157,9 @@
STREAMTYPE_LPCM_AC3 = 0x83,
STREAMTYPE_EAC3 = 0x87,
+ // DTS audio stream type which contains only Core substream
+ STREAMTYPE_DTS = 0x8A,
+
//Sample Encrypted types
STREAMTYPE_H264_ENCRYPTED = 0xDB,
STREAMTYPE_AAC_ENCRYPTED = 0xCF,
@@ -168,6 +171,7 @@
DESCRIPTOR_CA = 0x09,
// DVB BlueBook A038 Table 12
+ DESCRIPTOR_DTS = 0x7B,
DESCRIPTOR_DVB_EXTENSION = 0x7F,
};
@@ -175,6 +179,8 @@
enum {
EXT_DESCRIPTOR_DVB_AC4 = 0x15,
EXT_DESCRIPTOR_DVB_AUDIO_PRESELECTION = 0x19,
+ EXT_DESCRIPTOR_DVB_DTS_HD = 0x0E,
+ EXT_DESCRIPTOR_DVB_DTS_UHD = 0x21,
EXT_DESCRIPTOR_DVB_RESERVED_MAX = 0x7F,
};
diff --git a/media/module/mpeg2ts/include/mpeg2ts/ESQueue.h b/media/module/mpeg2ts/include/mpeg2ts/ESQueue.h
index a06bd6a..550a0e4 100644
--- a/media/module/mpeg2ts/include/mpeg2ts/ESQueue.h
+++ b/media/module/mpeg2ts/include/mpeg2ts/ESQueue.h
@@ -45,6 +45,9 @@
MPEG4_VIDEO,
PCM_AUDIO,
METADATA,
+ DTS,
+ DTS_HD,
+ DTS_UHD,
};
enum Flags {
@@ -125,6 +128,8 @@
sp<ABuffer> dequeueAccessUnitMPEG4Video();
sp<ABuffer> dequeueAccessUnitPCMAudio();
sp<ABuffer> dequeueAccessUnitMetadata();
+ sp<ABuffer> dequeueAccessUnitDTSOrDTSHD();
+ sp<ABuffer> dequeueAccessUnitDTSUHD();
// consume a logical (compressed) access unit of size "size",
// returns its timestamp in us (or -1 if no time information).
diff --git a/media/mtp/tests/MtpFuzzer/mtp_handle_fuzzer.cpp b/media/mtp/tests/MtpFuzzer/mtp_handle_fuzzer.cpp
index 676345a..7dcdc3f 100644
--- a/media/mtp/tests/MtpFuzzer/mtp_handle_fuzzer.cpp
+++ b/media/mtp/tests/MtpFuzzer/mtp_handle_fuzzer.cpp
@@ -128,10 +128,10 @@
std::unique_ptr<IMtpHandle> handle;
if (mFdp.ConsumeBool()) {
std::unique_ptr<IMtpHandle> mtpCompactHandle(new MtpFfsCompatHandle(controlFd));
- handle = move(mtpCompactHandle);
+ handle = std::move(mtpCompactHandle);
} else {
std::unique_ptr<IMtpHandle> mtpHandle(new MtpFfsHandle(controlFd));
- handle = move(mtpHandle);
+ handle = std::move(mtpHandle);
}
int32_t mtpHandle = mFdp.ConsumeIntegralInRange<size_t>(kMinAPICase, kMaxMtpHandleAPI);
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index 5005365..0df7636 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -758,6 +758,9 @@
EXPORT
media_status_t AMediaDrm_setPropertyByteArray(AMediaDrm *mObj,
const char *propertyName, const uint8_t *value, size_t valueSize) {
+ if (!mObj || mObj->mDrm == NULL) {
+ return AMEDIA_ERROR_INVALID_OBJECT;
+ }
Vector<uint8_t> byteArray;
byteArray.appendArray(value, valueSize);
diff --git a/media/utils/ProcessInfo.cpp b/media/utils/ProcessInfo.cpp
index da199c4..8437222 100644
--- a/media/utils/ProcessInfo.cpp
+++ b/media/utils/ProcessInfo.cpp
@@ -40,11 +40,11 @@
int32_t state;
int32_t score = INVALID_ADJ;
status_t err = service->getProcessStatesAndOomScoresFromPids(length, &pid, &state, &score);
+ ALOGV("%s: pid:%d state:%d score:%d err:%d", __FUNCTION__, pid, state, score, err);
if (err != OK) {
ALOGE("getProcessStatesAndOomScoresFromPids failed");
return false;
}
- ALOGV("pid %d state %d score %d", pid, state, score);
if (score <= NATIVE_ADJ) {
std::scoped_lock lock{mOverrideLock};
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 0de7e7d..00b612f 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2140,9 +2140,19 @@
if (!isStreamInitialized()) {
ALOGE("The stream is not open yet"); // This should not happen.
} else {
- // setEventCallback will need a strong pointer as a parameter. Calling it
- // here instead of constructor of PlaybackThread so that the onFirstRef
- // callback would not be made on an incompletely constructed object.
+ // Callbacks take strong or weak pointers as a parameter.
+ // Since PlaybackThread passes itself as a callback handler, it can only
+ // be done outside of the constructor. Creating weak and especially strong
+ // pointers to a refcounted object in its own constructor is strongly
+ // discouraged, see comments in system/core/libutils/include/utils/RefBase.h.
+ // Even if a function takes a weak pointer, it is possible that it will
+ // need to convert it to a strong pointer down the line.
+ if (mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING &&
+ mOutput->stream->setCallback(this) == OK) {
+ mUseAsyncWrite = true;
+ mCallbackThread = new AudioFlinger::AsyncCallbackThread(this);
+ }
+
if (mOutput->stream->setEventCallback(this) != OK) {
ALOGD("Failed to add event callback");
}
@@ -3007,13 +3017,6 @@
mFrameCount);
}
- if (mOutput->flags & AUDIO_OUTPUT_FLAG_NON_BLOCKING) {
- if (mOutput->stream->setCallback(this) == OK) {
- mUseAsyncWrite = true;
- mCallbackThread = new AudioFlinger::AsyncCallbackThread(this);
- }
- }
-
mHwSupportsPause = false;
if (mOutput->flags & AUDIO_OUTPUT_FLAG_DIRECT) {
bool supportsPause = false, supportsResume = false;
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index 2fe7b9e..9ee9037 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -33,6 +33,7 @@
#include <media/stagefright/foundation/AHandler.h>
#include <media/stagefright/foundation/AMessage.h>
#include <media/MediaMetricsItem.h>
+#include <media/QuaternionUtil.h>
#include <media/ShmemCompat.h>
#include <mediautils/SchedulingPolicyService.h>
#include <mediautils/ServiceUtilities.h>
@@ -75,6 +76,24 @@
return maxMask;
}
+static std::vector<float> recordFromTranslationRotationVector(
+ const std::vector<float>& trVector) {
+ auto headToStageOpt = Pose3f::fromVector(trVector);
+ if (!headToStageOpt) return {};
+
+ const auto stageToHead = headToStageOpt.value().inverse();
+ const auto stageToHeadTranslation = stageToHead.translation();
+ constexpr float RAD_TO_DEGREE = 180.f / M_PI;
+ std::vector<float> record{
+ stageToHeadTranslation[0], stageToHeadTranslation[1], stageToHeadTranslation[2],
+ 0.f, 0.f, 0.f};
+ media::quaternionToAngles(stageToHead.rotation(), &record[3], &record[4], &record[5]);
+ record[3] *= RAD_TO_DEGREE;
+ record[4] *= RAD_TO_DEGREE;
+ record[5] *= RAD_TO_DEGREE;
+ return record;
+}
+
// ---------------------------------------------------------------------------
class Spatializer::EngineCallbackHandler : public AHandler {
@@ -185,41 +204,6 @@
};
// ---------------------------------------------------------------------------
-
-// Convert recorded sensor data to string with level indentation.
-std::string Spatializer::HeadToStagePoseRecorder::toString(unsigned level) const {
- std::string prefixSpace(level, ' ');
- return mPoseRecordLog.dumpToString((prefixSpace + " ").c_str(), Spatializer::mMaxLocalLogLine);
-}
-
-// Compute sensor data, record into local log when it is time.
-void Spatializer::HeadToStagePoseRecorder::record(const std::vector<float>& headToStage) {
- if (headToStage.size() != mPoseVectorSize) return;
-
- if (mNumOfSampleSinceLastRecord++ == 0) {
- mFirstSampleTimestamp = std::chrono::steady_clock::now();
- }
- // if it's time, do record and reset.
- if (shouldRecordLog()) {
- poseSumToAverage();
- mPoseRecordLog.log(
- "mean: %s, min: %s, max %s, calculated %d samples in %0.4f second(s)",
- Spatializer::toString<double>(mPoseRadianSum, true /* radianToDegree */).c_str(),
- Spatializer::toString<float>(mMinPoseAngle, true /* radianToDegree */).c_str(),
- Spatializer::toString<float>(mMaxPoseAngle, true /* radianToDegree */).c_str(),
- mNumOfSampleSinceLastRecord, mNumOfSecondsSinceLastRecord.count());
- resetRecord();
- }
- // update stream average.
- for (int i = 0; i < mPoseVectorSize; i++) {
- mPoseRadianSum[i] += headToStage[i];
- mMaxPoseAngle[i] = std::max(mMaxPoseAngle[i], headToStage[i]);
- mMinPoseAngle[i] = std::min(mMinPoseAngle[i], headToStage[i]);
- }
- return;
-}
-
-// ---------------------------------------------------------------------------
sp<Spatializer> Spatializer::create(SpatializerPolicyCallback *callback) {
sp<Spatializer> spatializer;
@@ -590,7 +574,8 @@
}
std::lock_guard lock(mLock);
if (mPoseController != nullptr) {
- mLocalLog.log("%s with screenToStage %s", __func__, toString<float>(screenToStage).c_str());
+ mLocalLog.log("%s with screenToStage %s", __func__,
+ media::VectorRecorder::toString<float>(screenToStage).c_str());
mPoseController->setScreenToStagePose(maybePose.value());
}
return Status::ok();
@@ -771,8 +756,9 @@
callback = mHeadTrackingCallback;
if (mEngine != nullptr) {
setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage);
- mPoseRecorder.record(headToStage);
- mPoseDurableRecorder.record(headToStage);
+ const auto record = recordFromTranslationRotationVector(headToStage);
+ mPoseRecorder.record(record);
+ mPoseDurableRecorder.record(record);
}
}
@@ -822,8 +808,7 @@
}
}
callback = mHeadTrackingCallback;
- mLocalLog.log("%s: %s, spatializerMode %s", __func__, media::toString(mode).c_str(),
- media::toString(spatializerMode).c_str());
+ mLocalLog.log("%s: updating mode to %s", __func__, media::toString(mode).c_str());
}
if (callback != nullptr) {
callback->onHeadTrackingModeChanged(spatializerMode);
@@ -1048,8 +1033,7 @@
}
std::string Spatializer::toString(unsigned level) const {
- std::string prefixSpace;
- prefixSpace.append(level, ' ');
+ std::string prefixSpace(level, ' ');
std::string ss = prefixSpace + "Spatializer:\n";
bool needUnlock = false;
@@ -1105,14 +1089,15 @@
// PostController dump.
if (mPoseController != nullptr) {
- ss += mPoseController->toString(level + 1);
- ss.append(prefixSpace +
- "Sensor data format - [rx, ry, rz, vx, vy, vz] (units-degree, "
- "r-transform, v-angular velocity, x-pitch, y-roll, z-yaw):\n");
- ss.append(prefixSpace + " PerMinuteHistory:\n");
- ss += mPoseDurableRecorder.toString(level + 1);
- ss.append(prefixSpace + " PerSecondHistory:\n");
- ss += mPoseRecorder.toString(level + 1);
+ ss.append(mPoseController->toString(level + 1))
+ .append(prefixSpace)
+ .append("Pose (active stage-to-head) [tx, ty, tz : pitch, roll, yaw]:\n")
+ .append(prefixSpace)
+ .append(" PerMinuteHistory:\n")
+ .append(mPoseDurableRecorder.toString(level + 3))
+ .append(prefixSpace)
+ .append(" PerSecondHistory:\n")
+ .append(mPoseRecorder.toString(level + 3));
} else {
ss.append(prefixSpace).append("SpatializerPoseController not exist\n");
}
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index 0f6bafe..4d6c875 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -27,6 +27,7 @@
#include <audio_utils/SimpleLog.h>
#include <math.h>
#include <media/AudioEffect.h>
+#include <media/VectorRecorder.h>
#include <media/audiohal/EffectHalInterface.h>
#include <media/stagefright/foundation/ALooper.h>
#include <system/audio_effects/effect_spatializer.h>
@@ -172,30 +173,6 @@
media::audio::common::toString(*result) : "unknown_latency_mode";
}
- /**
- * Format head to stage vector to a string, [0.00, 0.00, 0.00, -1.29, -0.50, 15.27].
- */
- template <typename T>
- static std::string toString(const std::vector<T>& vec, bool radianToDegree = false) {
- if (vec.size() == 0) {
- return "[]";
- }
-
- std::string ss = "[";
- for (auto f = vec.begin(); f != vec.end(); ++f) {
- if (f != vec.begin()) {
- ss .append(", ");
- }
- if (radianToDegree) {
- base::StringAppendF(&ss, "%0.2f", HeadToStagePoseRecorder::getDegreeWithRadian(*f));
- } else {
- base::StringAppendF(&ss, "%f", *f);
- }
- }
- ss.append("]");
- return ss;
- };
-
// If the Spatializer is not created, we send the status for metrics purposes.
// OK: Spatializer not expected to be created.
// NO_INIT: Spatializer creation failed.
@@ -427,92 +404,12 @@
* @brief Calculate and record sensor data.
* Dump to local log with max/average pose angle every mPoseRecordThreshold.
*/
- class HeadToStagePoseRecorder {
- public:
- HeadToStagePoseRecorder(std::chrono::duration<double> threshold, int maxLogLine)
- : mPoseRecordThreshold(threshold), mPoseRecordLog(maxLogLine) {
- resetRecord();
- }
-
- /** Convert recorded sensor data to string with level indentation */
- std::string toString(unsigned level) const;
-
- /**
- * @brief Calculate sensor data, record into local log when it is time.
- *
- * @param headToStage The vector from Pose3f::toVector().
- */
- void record(const std::vector<float>& headToStage);
-
- static constexpr float getDegreeWithRadian(const float radian) {
- float radianToDegreeRatio = (180 / PI);
- return (radian * radianToDegreeRatio);
- }
-
- private:
- static constexpr float PI = M_PI;
- /**
- * Pose recorder time threshold to record sensor data in local log.
- * Sensor data will be recorded into log at least every mPoseRecordThreshold.
- */
- std::chrono::duration<double> mPoseRecordThreshold;
- // Number of seconds pass since last record.
- std::chrono::duration<double> mNumOfSecondsSinceLastRecord;
- /**
- * According to frameworks/av/media/libheadtracking/include/media/Pose.h
- * "The vector will have exactly 6 elements, where the first three are a translation vector
- * and the last three are a rotation vector."
- */
- static constexpr size_t mPoseVectorSize = 6;
- /**
- * Timestamp of last sensor data record in local log.
- */
- std::chrono::time_point<std::chrono::steady_clock> mFirstSampleTimestamp;
- /**
- * Number of sensor samples received since last record, sample rate is ~100Hz which produce
- * ~6k samples/minute.
- */
- uint32_t mNumOfSampleSinceLastRecord = 0;
- /* The sum of pose angle represented by radian since last dump, div
- * mNumOfSampleSinceLastRecord to get arithmetic mean. Largest possible value: 2PI * 100Hz *
- * mPoseRecordThreshold.
- */
- std::vector<double> mPoseRadianSum;
- std::vector<float> mMaxPoseAngle;
- std::vector<float> mMinPoseAngle;
- // Local log for history sensor data.
- SimpleLog mPoseRecordLog{mMaxLocalLogLine};
-
- bool shouldRecordLog() {
- mNumOfSecondsSinceLastRecord = std::chrono::duration_cast<std::chrono::seconds>(
- std::chrono::steady_clock::now() - mFirstSampleTimestamp);
- return mNumOfSecondsSinceLastRecord >= mPoseRecordThreshold;
- }
-
- void resetRecord() {
- mPoseRadianSum.assign(mPoseVectorSize, 0);
- mMaxPoseAngle.assign(mPoseVectorSize, -PI);
- mMinPoseAngle.assign(mPoseVectorSize, PI);
- mNumOfSampleSinceLastRecord = 0;
- mNumOfSecondsSinceLastRecord = std::chrono::seconds(0);
- }
-
- // Add each sample to sum and only calculate when record.
- void poseSumToAverage() {
- if (mNumOfSampleSinceLastRecord == 0) return;
- for (auto& p : mPoseRadianSum) {
- const float reciprocal = 1.f / mNumOfSampleSinceLastRecord;
- p *= reciprocal;
- }
- }
- }; // HeadToStagePoseRecorder
-
// Record one log line per second (up to mMaxLocalLogLine) to capture most recent sensor data.
- HeadToStagePoseRecorder mPoseRecorder GUARDED_BY(mLock) =
- HeadToStagePoseRecorder(std::chrono::seconds(1), mMaxLocalLogLine);
+ media::VectorRecorder mPoseRecorder GUARDED_BY(mLock) {
+ 6 /* vectorSize */, std::chrono::seconds(1), mMaxLocalLogLine, { 3 } /* delimiterIdx */};
// Record one log line per minute (up to mMaxLocalLogLine) to capture durable sensor data.
- HeadToStagePoseRecorder mPoseDurableRecorder GUARDED_BY(mLock) =
- HeadToStagePoseRecorder(std::chrono::minutes(1), mMaxLocalLogLine);
+ media::VectorRecorder mPoseDurableRecorder GUARDED_BY(mLock) {
+ 6 /* vectorSize */, std::chrono::minutes(1), mMaxLocalLogLine, { 3 } /* delimiterIdx */};
}; // Spatializer
}; // namespace android
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
index 72dba3d..63f53b7 100644
--- a/services/audiopolicy/service/SpatializerPoseController.cpp
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -13,6 +13,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
#include "SpatializerPoseController.h"
#include <android-base/stringprintf.h>
#include <chrono>
@@ -21,8 +22,10 @@
#define LOG_TAG "SpatializerPoseController"
//#define LOG_NDEBUG 0
+#include <cutils/properties.h>
#include <sensor/Sensor.h>
#include <media/MediaMetricsItem.h>
+#include <media/QuaternionUtil.h>
#include <utils/Log.h>
#include <utils/SystemClock.h>
@@ -45,11 +48,17 @@
// This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting.
constexpr float kMaxRotationalVelocity = 0.8f;
-// This is how far into the future we predict the head pose, using linear extrapolation based on
-// twist (velocity). It should be set to a value that matches the characteristic durations of moving
-// one's head. The higher we set this, the more latency we are able to reduce, but setting this too
-// high will result in high prediction errors whenever the head accelerates (changes velocity).
-constexpr auto kPredictionDuration = 50ms;
+// This is how far into the future we predict the head pose.
+// The prediction duration should be based on the actual latency from
+// head-tracker to audio output, though setting the prediction duration too
+// high may result in higher prediction errors when the head accelerates or
+// decelerates (changes velocity).
+//
+// The head tracking predictor will do a best effort to achieve the requested
+// prediction duration. If the duration is too far in the future based on
+// current sensor variance, the predictor may internally restrict duration to what
+// is achievable with reasonable confidence as the "best prediction".
+constexpr auto kPredictionDuration = 120ms;
// After not getting a pose sample for this long, we would treat the measurement as stale.
// The max connection interval is 50ms, and HT sensor event interval can differ depending on the
@@ -97,7 +106,15 @@
.maxTranslationalVelocity = kMaxTranslationalVelocity / kTicksPerSecond,
.maxRotationalVelocity = kMaxRotationalVelocity / kTicksPerSecond,
.freshnessTimeout = Ticks(kFreshnessTimeout).count(),
- .predictionDuration = Ticks(kPredictionDuration).count(),
+ .predictionDuration = []() -> float {
+ const int duration_ms =
+ property_get_int32("audio.spatializer.prediction_duration_ms", 0);
+ if (duration_ms > 0) {
+ return duration_ms * 1'000'000LL;
+ } else {
+ return Ticks(kPredictionDuration).count();
+ }
+ }(),
.autoRecenterWindowDuration = Ticks(kAutoRecenterWindowDuration).count(),
.autoRecenterTranslationalThreshold = kAutoRecenterTranslationThreshold,
.autoRecenterRotationalThreshold = kAutoRecenterRotationThreshold,
@@ -145,7 +162,14 @@
mShouldCalculate = false;
}
}
- }) {}
+ }) {
+ const media::PosePredictorType posePredictorType =
+ (media::PosePredictorType)
+ property_get_int32("audio.spatializer.pose_predictor_type", -1);
+ if (isValidPosePredictorType(posePredictorType)) {
+ mProcessor->setPosePredictorType(posePredictorType);
+ }
+ }
SpatializerPoseController::~SpatializerPoseController() {
{
@@ -282,7 +306,36 @@
void SpatializerPoseController::onPose(int64_t timestamp, int32_t sensor, const Pose3f& pose,
const std::optional<Twist3f>& twist, bool isNewReference) {
std::lock_guard lock(mMutex);
+ constexpr float NANOS_TO_MILLIS = 1e-6;
+ constexpr float RAD_TO_DEGREE = 180.f / M_PI;
+
+ const float delayMs = (elapsedRealtimeNano() - timestamp) * NANOS_TO_MILLIS; // CLOCK_BOOTTIME
+
if (sensor == mHeadSensor) {
+ std::vector<float> pryprydt(8); // pitch, roll, yaw, d_pitch, d_roll, d_yaw,
+ // discontinuity, timestamp_delay
+ media::quaternionToAngles(pose.rotation(), &pryprydt[0], &pryprydt[1], &pryprydt[2]);
+ if (twist) {
+ const auto rotationalVelocity = twist->rotationalVelocity();
+ // The rotational velocity is an intrinsic transform (i.e. based on the head
+ // coordinate system, not the world coordinate system). It is a 3 element vector:
+ // axis (d theta / dt).
+ //
+ // We leave rotational velocity relative to the head coordinate system,
+ // as the initial head tracking sensor's world frame is arbitrary.
+ media::quaternionToAngles(media::rotationVectorToQuaternion(rotationalVelocity),
+ &pryprydt[3], &pryprydt[4], &pryprydt[5]);
+ }
+ pryprydt[6] = isNewReference;
+ pryprydt[7] = delayMs;
+ for (size_t i = 0; i < 6; ++i) {
+ // pitch, roll, yaw in degrees, referenced in degrees on the world frame.
+ // d_pitch, d_roll, d_yaw rotational velocity in degrees/s, based on the world frame.
+ pryprydt[i] *= RAD_TO_DEGREE;
+ }
+ mHeadSensorRecorder.record(pryprydt);
+ mHeadSensorDurableRecorder.record(pryprydt);
+
mProcessor->setWorldToHeadPose(timestamp, pose,
twist.value_or(Twist3f()) / kTicksPerSecond);
if (isNewReference) {
@@ -290,6 +343,14 @@
}
}
if (sensor == mScreenSensor) {
+ std::vector<float> pryt{ 0.f, 0.f, 0.f, delayMs}; // pitch, roll, yaw, timestamp_delay
+ media::quaternionToAngles(pose.rotation(), &pryt[0], &pryt[1], &pryt[2]);
+ for (size_t i = 0; i < 3; ++i) {
+ pryt[i] *= RAD_TO_DEGREE;
+ }
+ mScreenSensorRecorder.record(pryt);
+ mScreenSensorDurableRecorder.record(pryt);
+
mProcessor->setWorldToScreenPose(timestamp, pose);
if (isNewReference) {
mProcessor->recenter(false, true);
@@ -298,8 +359,7 @@
}
std::string SpatializerPoseController::toString(unsigned level) const {
- std::string prefixSpace;
- prefixSpace.append(level, ' ');
+ std::string prefixSpace(level, ' ');
std::string ss = prefixSpace + "SpatializerPoseController:\n";
bool needUnlock = false;
@@ -315,14 +375,31 @@
if (mHeadSensor == INVALID_SENSOR) {
ss += "HeadSensor: INVALID\n";
} else {
- base::StringAppendF(&ss, "HeadSensor: 0x%08x\n", mHeadSensor);
+ base::StringAppendF(&ss, "HeadSensor: 0x%08x "
+ "(active world-to-head : head-relative velocity) "
+ "[ pitch, roll, yaw : d_pitch, d_roll, d_yaw : disc : delay ] "
+ "(degrees, degrees/s, bool, ms)\n", mHeadSensor);
+ ss.append(prefixSpace)
+ .append(" PerMinuteHistory:\n")
+ .append(mHeadSensorDurableRecorder.toString(level + 3))
+ .append(prefixSpace)
+ .append(" PerSecondHistory:\n")
+ .append(mHeadSensorRecorder.toString(level + 3));
}
ss += prefixSpace;
if (mScreenSensor == INVALID_SENSOR) {
ss += "ScreenSensor: INVALID\n";
} else {
- base::StringAppendF(&ss, "ScreenSensor: 0x%08x\n", mScreenSensor);
+ base::StringAppendF(&ss, "ScreenSensor: 0x%08x (active world-to-screen) "
+ "[ pitch, roll, yaw : delay ] "
+ "(degrees, ms)\n", mScreenSensor);
+ ss.append(prefixSpace)
+ .append(" PerMinuteHistory:\n")
+ .append(mScreenSensorDurableRecorder.toString(level + 3))
+ .append(prefixSpace)
+ .append(" PerSecondHistory:\n")
+ .append(mScreenSensorRecorder.toString(level + 3));
}
ss += prefixSpace;
diff --git a/services/audiopolicy/service/SpatializerPoseController.h b/services/audiopolicy/service/SpatializerPoseController.h
index 233f94c..9d78188 100644
--- a/services/audiopolicy/service/SpatializerPoseController.h
+++ b/services/audiopolicy/service/SpatializerPoseController.h
@@ -24,6 +24,7 @@
#include <media/HeadTrackingProcessor.h>
#include <media/SensorPoseProvider.h>
+#include <media/VectorRecorder.h>
namespace android {
@@ -131,6 +132,20 @@
bool mShouldExit = false;
bool mCalculated = false;
+ media::VectorRecorder mHeadSensorRecorder{
+ 8 /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */,
+ { 3, 6, 7 } /* delimiterIdx */};
+ media::VectorRecorder mHeadSensorDurableRecorder{
+ 8 /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */,
+ { 3, 6, 7 } /* delimiterIdx */};
+
+ media::VectorRecorder mScreenSensorRecorder{
+ 4 /* vectorSize */, std::chrono::seconds(1), 10 /* maxLogLine */,
+ { 3 } /* delimiterIdx */};
+ media::VectorRecorder mScreenSensorDurableRecorder{
+ 4 /* vectorSize */, std::chrono::minutes(1), 10 /* maxLogLine */,
+ { 3 } /* delimiterIdx */};
+
// It's important that mThread is the last variable in this class
// since we starts mThread in initializer list
std::thread mThread;
diff --git a/services/mediametrics/statsd_drm.cpp b/services/mediametrics/statsd_drm.cpp
index 863fdbe..9f08eca 100644
--- a/services/mediametrics/statsd_drm.cpp
+++ b/services/mediametrics/statsd_drm.cpp
@@ -248,10 +248,10 @@
if (!item->getInt32("frontend", &frontend)) return false;
// Optional to be included
- int64_t apex_version = -1;
- item->getInt64("apex_version", &apex_version);
+ std::string version = "";
+ item->getString("version", &version);
const int result = stats_write(stats::media_metrics::MEDIA_DRM_CREATED,
- scheme, uuid_lsb, uuid_msb, uid, frontend, apex_version);
+ scheme, uuid_lsb, uuid_msb, uid, frontend, version.c_str());
std::stringstream log;
log << "result:" << result << " {"
@@ -262,7 +262,7 @@
<< " uuid_msb:" << uuid_msb
<< " uid:" << uid
<< " frontend:" << frontend
- << " apex_version:" << apex_version
+ << " version:" << version
<< " }";
statsdLog->log(stats::media_metrics::MEDIA_DRM_CREATED, log.str());
return true;
@@ -287,10 +287,10 @@
if (!item->getInt32("opened_security_level", &opened_security_level)) return false;
// Optional to be included
- int64_t apex_version = -1;
- item->getInt64("apex_version", &apex_version);
+ std::string version = "";
+ item->getString("version", &version);
const int result = stats_write(stats::media_metrics::MEDIA_DRM_SESSION_OPENED,
- scheme, uuid_lsb, uuid_msb, uid, frontend, apex_version,
+ scheme, uuid_lsb, uuid_msb, uid, frontend, version.c_str(),
object_nonce.c_str(), requested_security_level,
opened_security_level);
@@ -303,7 +303,7 @@
<< " uuid_msb:" << uuid_msb
<< " uid:" << uid
<< " frontend:" << frontend
- << " apex_version:" << apex_version
+ << " version:" << version
<< " object_nonce:" << object_nonce
<< " requested_security_level:" << requested_security_level
<< " opened_security_level:" << opened_security_level
@@ -334,8 +334,8 @@
if (!item->getInt32("error_code", &error_code)) return false;
// Optional to be included
- int64_t apex_version = -1;
- item->getInt64("apex_version", &apex_version);
+ std::string version = "";
+ item->getString("version", &version);
std::string session_nonce = "";
item->getString("session_nonce", &session_nonce);
@@ -347,7 +347,7 @@
item->getInt32("error_context", &error_context);
const int result = stats_write(stats::media_metrics::MEDIA_DRM_ERRORED, scheme, uuid_lsb,
- uuid_msb, uid, frontend, apex_version, object_nonce.c_str(),
+ uuid_msb, uid, frontend, version.c_str(), object_nonce.c_str(),
session_nonce.c_str(), security_level, api, error_code, cdm_err,
oem_err, error_context);
@@ -360,7 +360,7 @@
<< " uuid_msb:" << uuid_msb
<< " uid:" << uid
<< " frontend:" << frontend
- << " apex_version:" << apex_version
+ << " version:" << version
<< " object_nonce:" << object_nonce
<< " session_nonce:" << session_nonce
<< " security_level:" << security_level
diff --git a/services/mediaresourcemanager/Android.bp b/services/mediaresourcemanager/Android.bp
index 5d80744..2b8245e 100644
--- a/services/mediaresourcemanager/Android.bp
+++ b/services/mediaresourcemanager/Android.bp
@@ -16,6 +16,7 @@
"aidl/android/media/MediaResourceSubType.aidl",
"aidl/android/media/MediaResourceParcel.aidl",
"aidl/android/media/MediaResourcePolicyParcel.aidl",
+ "aidl/android/media/ClientInfoParcel.aidl",
],
path: "aidl",
}
@@ -87,10 +88,15 @@
"libbinder_ndk",
"libutils",
"liblog",
+ "libstats_media_metrics",
+ "libstatspull",
+ "libstatssocket",
+ "libprotobuf-cpp-lite",
],
static_libs: [
"resourceobserver_aidl_interface-V1-ndk",
+ "libplatformprotos",
],
include_dirs: ["frameworks/av/include"],
@@ -101,4 +107,10 @@
],
export_include_dirs: ["."],
+
+ export_shared_lib_headers: [
+ "libstats_media_metrics",
+ "libstatspull",
+ "libstatssocket",
+ ],
}
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 4d18876..f94446f 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -34,6 +34,7 @@
#include <sys/stat.h>
#include <sys/time.h>
#include <unistd.h>
+#include <stats_media_metrics.h>
#include "IMediaResourceMonitor.h"
#include "ResourceManagerService.h"
@@ -42,6 +43,14 @@
namespace android {
+using stats::media_metrics::stats_write;
+using stats::media_metrics::MEDIA_CODEC_RECLAIM_REQUEST_COMPLETED;
+using stats::media_metrics::MEDIA_CODEC_RECLAIM_REQUEST_COMPLETED__RECLAIM_STATUS__RECLAIM_SUCCESS;
+using stats::media_metrics::\
+ MEDIA_CODEC_RECLAIM_REQUEST_COMPLETED__RECLAIM_STATUS__RECLAIM_FAILED_NO_CLIENTS;
+using stats::media_metrics::\
+ MEDIA_CODEC_RECLAIM_REQUEST_COMPLETED__RECLAIM_STATUS__RECLAIM_FAILED_RECLAIM_RESOURCES;
+
//static
std::mutex ResourceManagerService::sCookieLock;
//static
@@ -97,7 +106,8 @@
service->overridePid(mPid, -1);
// thiz is freed in the call below, so it must be last call referring thiz
- service->removeResource(mPid, mClientId, false /*checkValid*/);
+ ClientInfoParcel clientInfo{.pid = mPid, .id = mClientId};
+ service->removeResource(clientInfo, false /*checkValid*/);
}
class OverrideProcessInfoDeathNotifier : public DeathNotifier {
@@ -183,6 +193,7 @@
}
static ResourceInfo& getResourceInfoForEdit(uid_t uid, int64_t clientId,
+ const std::string& name,
const std::shared_ptr<IResourceManagerClient>& client, ResourceInfos& infos) {
ssize_t index = infos.indexOfKey(clientId);
@@ -190,6 +201,7 @@
ResourceInfo info;
info.uid = uid;
info.clientId = clientId;
+ info.name = name;
info.client = client;
info.cookie = 0;
info.pendingRemoval = false;
@@ -262,7 +274,15 @@
result.append(" Processes:\n");
for (size_t i = 0; i < mapCopy.size(); ++i) {
- snprintf(buffer, SIZE, " Pid: %d\n", mapCopy.keyAt(i));
+ int pid = mapCopy.keyAt(i);
+ snprintf(buffer, SIZE, " Pid: %d\n", pid);
+ result.append(buffer);
+ int priority = 0;
+ if (getPriority_l(pid, &priority)) {
+ snprintf(buffer, SIZE, " Priority: %d\n", priority);
+ } else {
+ snprintf(buffer, SIZE, " Priority: <unknown>\n");
+ }
result.append(buffer);
const ResourceInfos &infos = mapCopy.valueAt(i);
@@ -273,7 +293,7 @@
std::string clientName = "<unknown client>";
if (infos[j].client != nullptr) {
- Status status = infos[j].client->getName(&clientName);
+ clientName = infos[j].name;
}
snprintf(buffer, SIZE, " Name: %s\n", clientName.c_str());
result.append(buffer);
@@ -433,11 +453,15 @@
}
}
-Status ResourceManagerService::addResource(int32_t pid, int32_t uid, int64_t clientId,
+Status ResourceManagerService::addResource(const ClientInfoParcel& clientInfo,
const std::shared_ptr<IResourceManagerClient>& client,
const std::vector<MediaResourceParcel>& resources) {
- String8 log = String8::format("addResource(pid %d, clientId %lld, resources %s)",
- pid, (long long) clientId, getString(resources).string());
+ int32_t pid = clientInfo.pid;
+ int32_t uid = clientInfo.uid;
+ int64_t clientId = clientInfo.id;
+ const std::string& name = clientInfo.name;
+ String8 log = String8::format("addResource(pid %d, uid %d clientId %lld, resources %s)",
+ pid, uid, (long long) clientId, getString(resources).string());
mServiceLog->add(log);
Mutex::Autolock lock(mLock);
@@ -450,7 +474,7 @@
uid = callingUid;
}
ResourceInfos& infos = getResourceInfosForEdit(pid, mMap);
- ResourceInfo& info = getResourceInfoForEdit(uid, clientId, client, infos);
+ ResourceInfo& info = getResourceInfoForEdit(uid, clientId, name, client, infos);
ResourceList resourceAdded;
for (size_t i = 0; i < resources.size(); ++i) {
@@ -489,13 +513,50 @@
mObserverService->onResourceAdded(uid, pid, resourceAdded);
}
notifyResourceGranted(pid, resources);
+
+ // Increase the instance count of the resource associated with this client.
+ increaseResourceInstanceCount(clientId, name);
+
return Status::ok();
}
-Status ResourceManagerService::removeResource(int32_t pid, int64_t clientId,
+void ResourceManagerService::increaseResourceInstanceCount(int64_t clientId,
+ const std::string& name) {
+ // Check whether this client has been looked into already.
+ if (mClientIdSet.find(clientId) == mClientIdSet.end()) {
+ mClientIdSet.insert(clientId);
+ // Update the resource instance count.
+ auto found = mConcurrentResourceCountMap.find(name);
+ if (found == mConcurrentResourceCountMap.end()) {
+ mConcurrentResourceCountMap[name] = 1;
+ } else {
+ found->second++;
+ }
+ }
+}
+
+void ResourceManagerService::decreaseResourceInstanceCount(int64_t clientId,
+ const std::string& name) {
+ // Since this client has been removed, remove it from mClientIdSet
+ mClientIdSet.erase(clientId);
+ // Update the resource instance count also.
+ auto found = mConcurrentResourceCountMap.find(name);
+ if (found != mConcurrentResourceCountMap.end()) {
+ if (found->second == 1) {
+ mConcurrentResourceCountMap.erase(found);
+ } else {
+ found->second--;
+ }
+ }
+}
+
+Status ResourceManagerService::removeResource(const ClientInfoParcel& clientInfo,
const std::vector<MediaResourceParcel>& resources) {
- String8 log = String8::format("removeResource(pid %d, clientId %lld, resources %s)",
- pid, (long long) clientId, getString(resources).string());
+ int32_t pid = clientInfo.pid;
+ int32_t uid = clientInfo.uid;
+ int64_t clientId = clientInfo.id;
+ String8 log = String8::format("removeResource(pid %d, uid %d clientId %lld, resources %s)",
+ pid, uid, (long long) clientId, getString(resources).string());
mServiceLog->add(log);
Mutex::Autolock lock(mLock);
@@ -555,15 +616,17 @@
return Status::ok();
}
-Status ResourceManagerService::removeClient(int32_t pid, int64_t clientId) {
- removeResource(pid, clientId, true /*checkValid*/);
+Status ResourceManagerService::removeClient(const ClientInfoParcel& clientInfo) {
+ removeResource(clientInfo, true /*checkValid*/);
return Status::ok();
}
-Status ResourceManagerService::removeResource(int pid, int64_t clientId, bool checkValid) {
- String8 log = String8::format(
- "removeResource(pid %d, clientId %lld)",
- pid, (long long) clientId);
+Status ResourceManagerService::removeResource(const ClientInfoParcel& clientInfo, bool checkValid) {
+ int32_t pid = clientInfo.pid;
+ int32_t uid = clientInfo.uid;
+ int64_t clientId = clientInfo.id;
+ String8 log = String8::format("removeResource(pid %d, uid %d clientId %lld)",
+ pid, uid, (long long) clientId);
mServiceLog->add(log);
Mutex::Autolock lock(mLock);
@@ -591,6 +654,10 @@
onLastRemoved(it->second, info);
}
+ // Since this client has been removed, decrease the corresponding
+ // resources instance count.
+ decreaseResourceInstanceCount(clientId, info.name);
+
removeCookieAndUnlink_l(info.client, info.cookie);
if (mObserverService != nullptr && !info.resources.empty()) {
@@ -601,25 +668,30 @@
return Status::ok();
}
-void ResourceManagerService::getClientForResource_l(int callingPid, const MediaResourceParcel *res,
+void ResourceManagerService::getClientForResource_l(int callingPid,
+ const MediaResourceParcel *res,
+ PidUidVector* idVector,
Vector<std::shared_ptr<IResourceManagerClient>> *clients) {
if (res == NULL) {
return;
}
std::shared_ptr<IResourceManagerClient> client;
- if (getLowestPriorityBiggestClient_l(callingPid, res->type, res->subType, &client)) {
+ if (getLowestPriorityBiggestClient_l(callingPid, res->type, res->subType, idVector, &client)) {
clients->push_back(client);
}
}
-Status ResourceManagerService::reclaimResource(int32_t callingPid,
+Status ResourceManagerService::reclaimResource(const ClientInfoParcel& clientInfo,
const std::vector<MediaResourceParcel>& resources, bool* _aidl_return) {
- String8 log = String8::format("reclaimResource(callingPid %d, resources %s)",
- callingPid, getString(resources).string());
+ int32_t callingPid = clientInfo.pid;
+ std::string clientName = clientInfo.name;
+ String8 log = String8::format("reclaimResource(callingPid %d, uid %d resources %s)",
+ callingPid, clientInfo.uid, getString(resources).string());
mServiceLog->add(log);
*_aidl_return = false;
Vector<std::shared_ptr<IResourceManagerClient>> clients;
+ PidUidVector idVector;
{
Mutex::Autolock lock(mLock);
if (!mProcessInfo->isPidTrusted(callingPid)) {
@@ -655,13 +727,13 @@
if (secureCodec != NULL) {
if (!mSupportsMultipleSecureCodecs) {
if (!getAllClients_l(callingPid, MediaResource::Type::kSecureCodec,
- secureCodec->subType, &clients)) {
+ secureCodec->subType, &idVector, &clients)) {
return Status::ok();
}
}
if (!mSupportsSecureWithNonSecureCodec) {
if (!getAllClients_l(callingPid, MediaResource::Type::kNonSecureCodec,
- secureCodec->subType, &clients)) {
+ secureCodec->subType, &idVector, &clients)) {
return Status::ok();
}
}
@@ -669,13 +741,13 @@
if (nonSecureCodec != NULL) {
if (!mSupportsSecureWithNonSecureCodec) {
if (!getAllClients_l(callingPid, MediaResource::Type::kSecureCodec,
- nonSecureCodec->subType, &clients)) {
+ nonSecureCodec->subType, &idVector, &clients)) {
return Status::ok();
}
}
}
if (drmSession != NULL) {
- getClientForResource_l(callingPid, drmSession, &clients);
+ getClientForResource_l(callingPid, drmSession, &idVector, &clients);
if (clients.size() == 0) {
return Status::ok();
}
@@ -683,32 +755,108 @@
if (clients.size() == 0) {
// if no secure/non-secure codec conflict, run second pass to handle other resources.
- getClientForResource_l(callingPid, graphicMemory, &clients);
+ getClientForResource_l(callingPid, graphicMemory, &idVector, &clients);
}
if (clients.size() == 0) {
// if we are here, run the third pass to free one codec with the same type.
- getClientForResource_l(callingPid, secureCodec, &clients);
- getClientForResource_l(callingPid, nonSecureCodec, &clients);
+ getClientForResource_l(callingPid, secureCodec, &idVector, &clients);
+ getClientForResource_l(callingPid, nonSecureCodec, &idVector, &clients);
}
if (clients.size() == 0) {
// if we are here, run the fourth pass to free one codec with the different type.
if (secureCodec != NULL) {
MediaResource temp(MediaResource::Type::kNonSecureCodec, secureCodec->subType, 1);
- getClientForResource_l(callingPid, &temp, &clients);
+ getClientForResource_l(callingPid, &temp, &idVector, &clients);
}
if (nonSecureCodec != NULL) {
MediaResource temp(MediaResource::Type::kSecureCodec, nonSecureCodec->subType, 1);
- getClientForResource_l(callingPid, &temp, &clients);
+ getClientForResource_l(callingPid, &temp, &idVector, &clients);
}
}
}
*_aidl_return = reclaimUnconditionallyFrom(clients);
+
+ // Log Reclaim Pushed Atom to statsd
+ pushReclaimAtom(clientInfo, clients, idVector, *_aidl_return);
+
return Status::ok();
}
+void ResourceManagerService::pushReclaimAtom(const ClientInfoParcel& clientInfo,
+ const Vector<std::shared_ptr<IResourceManagerClient>>& clients,
+ const PidUidVector& idVector, bool reclaimed) {
+ // Construct the metrics for codec reclaim as a pushed atom.
+ // 1. Information about the requester.
+ // - UID and the priority (oom score)
+ int32_t callingPid = clientInfo.pid;
+ int32_t requesterUid = clientInfo.uid;
+ std::string clientName = clientInfo.name;
+ int requesterPriority = -1;
+ getPriority_l(callingPid, &requesterPriority);
+
+ // 2. Information about the codec.
+ // - Name of the codec requested
+ // - Number of concurrent codecs running.
+ int32_t noOfConcurrentCodecs = 0;
+ auto found = mConcurrentResourceCountMap.find(clientName);
+ if (found != mConcurrentResourceCountMap.end()) {
+ noOfConcurrentCodecs = found->second;
+ }
+
+ // 3. Information about the Reclaim:
+ // - Status of reclaim request
+ // - How many codecs are reclaimed
+ // - For each codecs reclaimed, information of the process that it belonged to:
+ // - UID and the Priority (oom score)
+ int32_t reclaimStatus = MEDIA_CODEC_RECLAIM_REQUEST_COMPLETED__RECLAIM_STATUS__RECLAIM_SUCCESS;
+ if (!reclaimed) {
+ if (clients.size() == 0) {
+ // No clients to reclaim from
+ reclaimStatus =
+ MEDIA_CODEC_RECLAIM_REQUEST_COMPLETED__RECLAIM_STATUS__RECLAIM_FAILED_NO_CLIENTS;
+ } else {
+ // Couldn't reclaim resources from the clients
+ reclaimStatus =
+ MEDIA_CODEC_RECLAIM_REQUEST_COMPLETED__RECLAIM_STATUS__RECLAIM_FAILED_RECLAIM_RESOURCES;
+ }
+ }
+ int32_t noOfCodecsReclaimed = clients.size();
+ int32_t targetIndex = 1;
+ for (const auto& id : idVector) {
+ int32_t targetUid = id.second;
+ int targetPriority = -1;
+ getPriority_l(id.first, &targetPriority);
+ // Post the pushed atom
+ int result = stats_write(
+ MEDIA_CODEC_RECLAIM_REQUEST_COMPLETED,
+ requesterUid,
+ requesterPriority,
+ clientName.c_str(),
+ noOfConcurrentCodecs,
+ reclaimStatus,
+ noOfCodecsReclaimed,
+ targetIndex,
+ targetUid,
+ targetPriority);
+ ALOGI("%s: Pushed MEDIA_CODEC_RECLAIM_REQUEST_COMPLETED atom: "
+ "Requester[pid(%d): uid(%d): priority(%d)] "
+ "Codec: [%s] "
+ "No of concurrent codecs: %d "
+ "Reclaim Status: %d "
+ "No of codecs reclaimed: %d "
+ "Target[%d][pid(%d): uid(%d): priority(%d)] "
+ "Atom Size: %d",
+ __func__, callingPid, requesterUid, requesterPriority,
+ clientName.c_str(), noOfConcurrentCodecs,
+ reclaimStatus, noOfCodecsReclaimed,
+ targetIndex, id.first, targetUid, targetPriority, result);
+ targetIndex++;
+ }
+}
+
bool ResourceManagerService::reclaimUnconditionallyFrom(
const Vector<std::shared_ptr<IResourceManagerClient>> &clients) {
if (clients.size() == 0) {
@@ -868,7 +1016,9 @@
mProcessInfoOverrideMap.erase(pid);
}
-Status ResourceManagerService::markClientForPendingRemoval(int32_t pid, int64_t clientId) {
+Status ResourceManagerService::markClientForPendingRemoval(const ClientInfoParcel& clientInfo) {
+ int32_t pid = clientInfo.pid;
+ int64_t clientId = clientInfo.id;
String8 log = String8::format(
"markClientForPendingRemoval(pid %d, clientId %lld)",
pid, (long long) clientId);
@@ -926,7 +1076,8 @@
MediaResource::SubType::kVideoCodec,
MediaResource::SubType::kImageCodec}) {
std::shared_ptr<IResourceManagerClient> client;
- if (getBiggestClientPendingRemoval_l(pid, type, subType, &client)) {
+ uid_t uid = 0;
+ if (getBiggestClientPendingRemoval_l(pid, type, subType, uid, &client)) {
clients.add(client);
continue;
}
@@ -935,8 +1086,9 @@
// Non-codec resources are shared by audio, video and image codecs (no subtype).
default:
std::shared_ptr<IResourceManagerClient> client;
+ uid_t uid = 0;
if (getBiggestClientPendingRemoval_l(pid, type,
- MediaResource::SubType::kUnspecifiedSubType, &client)) {
+ MediaResource::SubType::kUnspecifiedSubType, uid, &client)) {
clients.add(client);
}
break;
@@ -963,8 +1115,12 @@
}
bool ResourceManagerService::getAllClients_l(int callingPid, MediaResource::Type type,
- MediaResource::SubType subType, Vector<std::shared_ptr<IResourceManagerClient>> *clients) {
+ MediaResource::SubType subType,
+ PidUidVector* idVector,
+ Vector<std::shared_ptr<IResourceManagerClient>> *clients) {
Vector<std::shared_ptr<IResourceManagerClient>> temp;
+ PidUidVector tempIdList;
+
for (size_t i = 0; i < mMap.size(); ++i) {
ResourceInfos &infos = mMap.editValueAt(i);
for (size_t j = 0; j < infos.size(); ++j) {
@@ -977,6 +1133,7 @@
return false;
}
temp.push_back(infos[j].client);
+ tempIdList.emplace_back(mMap.keyAt(i), infos[j].uid);
}
}
}
@@ -985,19 +1142,24 @@
return true;
}
clients->appendVector(temp);
+ idVector->insert(std::end(*idVector), std::begin(tempIdList), std::end(tempIdList));
return true;
}
bool ResourceManagerService::getLowestPriorityBiggestClient_l(int callingPid,
- MediaResource::Type type, MediaResource::SubType subType,
+ MediaResource::Type type,
+ MediaResource::SubType subType,
+ PidUidVector* idVector,
std::shared_ptr<IResourceManagerClient> *client) {
int lowestPriorityPid;
int lowestPriority;
int callingPriority;
+ uid_t uid = 0;
// Before looking into other processes, check if we have clients marked for
// pending removal in the same process.
- if (getBiggestClientPendingRemoval_l(callingPid, type, subType, client)) {
+ if (getBiggestClientPendingRemoval_l(callingPid, type, subType, uid, client)) {
+ idVector->emplace_back(callingPid, uid);
return true;
}
if (!getPriority_l(callingPid, &callingPriority)) {
@@ -1014,9 +1176,11 @@
return false;
}
- if (!getBiggestClient_l(lowestPriorityPid, type, subType, client)) {
+ if (!getBiggestClient_l(lowestPriorityPid, type, subType, uid, client)) {
return false;
}
+
+ idVector->emplace_back(lowestPriorityPid, uid);
return true;
}
@@ -1068,12 +1232,14 @@
}
bool ResourceManagerService::getBiggestClientPendingRemoval_l(int pid, MediaResource::Type type,
- MediaResource::SubType subType, std::shared_ptr<IResourceManagerClient> *client) {
- return getBiggestClient_l(pid, type, subType, client, true /* pendingRemovalOnly */);
+ MediaResource::SubType subType, uid_t& uid,
+ std::shared_ptr<IResourceManagerClient> *client) {
+ return getBiggestClient_l(pid, type, subType, uid, client, true /* pendingRemovalOnly */);
}
bool ResourceManagerService::getBiggestClient_l(int pid, MediaResource::Type type,
- MediaResource::SubType subType, std::shared_ptr<IResourceManagerClient> *client,
+ MediaResource::SubType subType, uid_t& uid,
+ std::shared_ptr<IResourceManagerClient> *client,
bool pendingRemovalOnly) {
ssize_t index = mMap.indexOfKey(pid);
if (index < 0) {
@@ -1096,6 +1262,7 @@
if (resource.value > largestValue) {
largestValue = resource.value;
clientTemp = infos[i].client;
+ uid = infos[i].uid;
}
}
}
diff --git a/services/mediaresourcemanager/ResourceManagerService.h b/services/mediaresourcemanager/ResourceManagerService.h
index c636a0f..0016a19 100644
--- a/services/mediaresourcemanager/ResourceManagerService.h
+++ b/services/mediaresourcemanager/ResourceManagerService.h
@@ -19,7 +19,9 @@
#define ANDROID_MEDIA_RESOURCEMANAGERSERVICE_H
#include <map>
+#include <set>
#include <mutex>
+#include <string>
#include <aidl/android/media/BnResourceManagerService.h>
#include <arpa/inet.h>
@@ -43,20 +45,24 @@
using ::aidl::android::media::BnResourceManagerService;
using ::aidl::android::media::MediaResourceParcel;
using ::aidl::android::media::MediaResourcePolicyParcel;
+using ::aidl::android::media::ClientInfoParcel;
typedef std::map<std::tuple<
MediaResource::Type, MediaResource::SubType, std::vector<uint8_t>>,
MediaResourceParcel> ResourceList;
struct ResourceInfo {
- int64_t clientId;
uid_t uid;
+ int64_t clientId;
+ std::string name;
std::shared_ptr<IResourceManagerClient> client;
uintptr_t cookie{0};
ResourceList resources;
bool pendingRemoval{false};
};
+typedef std::vector<std::pair<int32_t, uid_t>> PidUidVector;
+
// TODO: convert these to std::map
typedef KeyedVector<int64_t, ResourceInfo> ResourceInfos;
typedef KeyedVector<int, ResourceInfos> PidResourceInfosMap;
@@ -85,31 +91,32 @@
// IResourceManagerService interface
Status config(const std::vector<MediaResourcePolicyParcel>& policies) override;
- Status addResource(int32_t pid, int32_t uid, int64_t clientId,
- const std::shared_ptr<IResourceManagerClient>& client,
- const std::vector<MediaResourceParcel>& resources) override;
+ Status addResource(const ClientInfoParcel& clientInfo,
+ const std::shared_ptr<IResourceManagerClient>& client,
+ const std::vector<MediaResourceParcel>& resources) override;
- Status removeResource(int32_t pid, int64_t clientId,
- const std::vector<MediaResourceParcel>& resources) override;
+ Status removeResource(const ClientInfoParcel& clientInfo,
+ const std::vector<MediaResourceParcel>& resources) override;
- Status removeClient(int32_t pid, int64_t clientId) override;
+ Status removeClient(const ClientInfoParcel& clientInfo) override;
// Tries to reclaim resource from processes with lower priority than the calling process
// according to the requested resources.
// Returns true if any resource has been reclaimed, otherwise returns false.
- Status reclaimResource(int32_t callingPid, const std::vector<MediaResourceParcel>& resources,
- bool* _aidl_return) override;
+ Status reclaimResource(const ClientInfoParcel& clientInfo,
+ const std::vector<MediaResourceParcel>& resources,
+ bool* _aidl_return) override;
- Status overridePid(int originalPid, int newPid) override;
+ Status overridePid(int32_t originalPid, int32_t newPid) override;
- Status overrideProcessInfo(const std::shared_ptr<IResourceManagerClient>& client, int pid,
- int procState, int oomScore) override;
+ Status overrideProcessInfo(const std::shared_ptr<IResourceManagerClient>& client,
+ int32_t pid, int32_t procState, int32_t oomScore) override;
- Status markClientForPendingRemoval(int32_t pid, int64_t clientId) override;
+ Status markClientForPendingRemoval(const ClientInfoParcel& clientInfo) override;
Status reclaimResourcesFromClientsPendingRemoval(int32_t pid) override;
- Status removeResource(int pid, int64_t clientId, bool checkValid);
+ Status removeResource(const ClientInfoParcel& clientInfo, bool checkValid);
private:
friend class ResourceManagerServiceTest;
@@ -124,13 +131,15 @@
// Returns false if any client belongs to a process with higher priority than the
// calling process. The clients will remain unchanged if returns false.
bool getAllClients_l(int callingPid, MediaResource::Type type, MediaResource::SubType subType,
+ PidUidVector* idList,
Vector<std::shared_ptr<IResourceManagerClient>> *clients);
// Gets the client who owns specified resource type from lowest possible priority process.
// Returns false if the calling process priority is not higher than the lowest process
// priority. The client will remain unchanged if returns false.
bool getLowestPriorityBiggestClient_l(int callingPid, MediaResource::Type type,
- MediaResource::SubType subType, std::shared_ptr<IResourceManagerClient> *client);
+ MediaResource::SubType subType, PidUidVector* idList,
+ std::shared_ptr<IResourceManagerClient> *client);
// Gets lowest priority process that has the specified resource type.
// Returns false if failed. The output parameters will remain unchanged if failed.
@@ -141,17 +150,19 @@
// Returns false with no change to client if there are no clients holdiing resources of thisi
// type.
bool getBiggestClient_l(int pid, MediaResource::Type type, MediaResource::SubType subType,
- std::shared_ptr<IResourceManagerClient> *client,
+ uid_t& uid, std::shared_ptr<IResourceManagerClient> *client,
bool pendingRemovalOnly = false);
// Same method as above, but with pendingRemovalOnly as true.
bool getBiggestClientPendingRemoval_l(int pid, MediaResource::Type type,
- MediaResource::SubType subType, std::shared_ptr<IResourceManagerClient> *client);
+ MediaResource::SubType subType, uid_t& uid,
+ std::shared_ptr<IResourceManagerClient> *client);
bool isCallingPriorityHigher_l(int callingPid, int pid);
// A helper function basically calls getLowestPriorityBiggestClient_l and add
// the result client to the given Vector.
void getClientForResource_l(int callingPid, const MediaResourceParcel *res,
+ PidUidVector* idList,
Vector<std::shared_ptr<IResourceManagerClient>> *clients);
void onFirstAdded(const MediaResourceParcel& res, const ResourceInfo& clientInfo);
@@ -171,6 +182,15 @@
void removeCookieAndUnlink_l(const std::shared_ptr<IResourceManagerClient>& client,
uintptr_t cookie);
+ // To increase/decrease the number of instances of a given resource
+ // associated with a client.
+ void increaseResourceInstanceCount(int64_t clientId, const std::string& name);
+ void decreaseResourceInstanceCount(int64_t clientId, const std::string& name);
+
+ void pushReclaimAtom(const ClientInfoParcel& clientInfo,
+ const Vector<std::shared_ptr<IResourceManagerClient>>& clients,
+ const PidUidVector& idList, bool reclaimed);
+
mutable Mutex mLock;
sp<ProcessInfoInterface> mProcessInfo;
sp<SystemCallbackInterface> mSystemCB;
@@ -191,6 +211,11 @@
static std::map<uintptr_t, sp<DeathNotifier> > sCookieToDeathNotifierMap
GUARDED_BY(sCookieLock);
std::shared_ptr<ResourceObserverService> mObserverService;
+
+ // List of active clients
+ std::set<int64_t> mClientIdSet;
+ // Map of resources (name) and number of concurrent instances
+ std::map<std::string, int> mConcurrentResourceCountMap;
};
// ----------------------------------------------------------------------------
diff --git a/services/mediaresourcemanager/aidl/android/media/ClientInfoParcel.aidl b/services/mediaresourcemanager/aidl/android/media/ClientInfoParcel.aidl
new file mode 100644
index 0000000..eb4bc42
--- /dev/null
+++ b/services/mediaresourcemanager/aidl/android/media/ClientInfoParcel.aidl
@@ -0,0 +1,44 @@
+/**
+ * Copyright (c) 2023, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * Description of a Client(codec) information.
+ *
+ * {@hide}
+ */
+parcelable ClientInfoParcel {
+ /**
+ * The PID of the client process.
+ */
+ int pid = -1;
+
+ /**
+ * The UID of the client process.
+ */
+ int uid = -1;
+
+ /**
+ * The ID of the client.
+ */
+ long id = 0;
+
+ /**
+ * Name of the resource associated with the client.
+ */
+ @utf8InCpp String name;
+}
diff --git a/services/mediaresourcemanager/aidl/android/media/IResourceManagerService.aidl b/services/mediaresourcemanager/aidl/android/media/IResourceManagerService.aidl
index 7a0a50f..30ad41b 100644
--- a/services/mediaresourcemanager/aidl/android/media/IResourceManagerService.aidl
+++ b/services/mediaresourcemanager/aidl/android/media/IResourceManagerService.aidl
@@ -19,6 +19,7 @@
import android.media.IResourceManagerClient;
import android.media.MediaResourceParcel;
import android.media.MediaResourcePolicyParcel;
+import android.media.ClientInfoParcel;
/**
* ResourceManagerService interface that keeps track of media resource
@@ -44,46 +45,40 @@
/**
* Add a client to a process with a list of resources.
*
- * @param pid pid of the client.
- * @param uid uid of the client.
- * @param clientId an identifier that uniquely identifies the client within the pid.
+ * @param clientInfo info of the calling client.
* @param client interface for the ResourceManagerService to call the client.
* @param resources an array of resources to be added.
*/
void addResource(
- int pid,
- int uid,
- long clientId,
+ in ClientInfoParcel clientInfo,
IResourceManagerClient client,
in MediaResourceParcel[] resources);
/**
* Remove the listed resources from a client.
*
- * @param pid pid from which the list of resources will be removed.
- * @param clientId clientId within the pid from which the list of resources will be removed.
+ * @param clientInfo info of the calling client.
* @param resources an array of resources to be removed from the client.
*/
- void removeResource(int pid, long clientId, in MediaResourceParcel[] resources);
+ void removeResource(in ClientInfoParcel clientInfo, in MediaResourceParcel[] resources);
/**
* Remove all resources from a client.
*
- * @param pid pid from which the client's resources will be removed.
- * @param clientId clientId within the pid that will be removed.
+ * @param clientInfo info of the calling client.
*/
- void removeClient(int pid, long clientId);
+ void removeClient(in ClientInfoParcel clientInfo);
/**
* Tries to reclaim resource from processes with lower priority than the
* calling process according to the requested resources.
*
- * @param callingPid pid of the calling process.
+ * @param clientInfo info of the calling client.
* @param resources an array of resources to be reclaimed.
*
* @return true if the reclaim was successful and false otherwise.
*/
- boolean reclaimResource(int callingPid, in MediaResourceParcel[] resources);
+ boolean reclaimResource(in ClientInfoParcel clientInfo, in MediaResourceParcel[] resources);
/**
* Override the pid of original calling process with the pid of the process
@@ -120,10 +115,9 @@
/**
* Mark a client for pending removal
*
- * @param pid pid from which the client's resources will be removed.
- * @param clientId clientId within the pid that will be removed.
+ * @param clientInfo info of the calling client.
*/
- void markClientForPendingRemoval(int pid, long clientId);
+ void markClientForPendingRemoval(in ClientInfoParcel clientInfo);
/**
* Reclaim resources from clients pending removal, if any.
diff --git a/services/mediaresourcemanager/fuzzer/Android.bp b/services/mediaresourcemanager/fuzzer/Android.bp
index 81c85e5..1d7f14f 100644
--- a/services/mediaresourcemanager/fuzzer/Android.bp
+++ b/services/mediaresourcemanager/fuzzer/Android.bp
@@ -41,6 +41,9 @@
"libbinder_ndk",
"libmedia",
"libutils",
+ "libstats_media_metrics",
+ "libstatspull",
+ "libstatssocket",
],
fuzz_config: {
cc: [
diff --git a/services/mediaresourcemanager/fuzzer/mediaresourcemanager_fuzzer.cpp b/services/mediaresourcemanager/fuzzer/mediaresourcemanager_fuzzer.cpp
index e4aaea0..5c2fef9 100644
--- a/services/mediaresourcemanager/fuzzer/mediaresourcemanager_fuzzer.cpp
+++ b/services/mediaresourcemanager/fuzzer/mediaresourcemanager_fuzzer.cpp
@@ -135,11 +135,15 @@
};
struct TestClient : public BnResourceManagerClient {
- TestClient(int pid, const shared_ptr<ResourceManagerService>& service)
- : mReclaimed(false), mPid(pid), mService(service) {}
+ TestClient(int pid, int uid, const shared_ptr<ResourceManagerService>& service)
+ : mReclaimed(false), mPid(pid), mUid(uid), mService(service) {}
Status reclaimResource(bool* aidlReturn) override {
- mService->removeClient(mPid, getId(ref<TestClient>()));
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(mPid),
+ .uid = static_cast<int32_t>(mUid),
+ .id = getId(ref<TestClient>()),
+ .name = ""};
+ mService->removeClient(clientInfo);
mReclaimed = true;
*aidlReturn = true;
return Status::ok();
@@ -155,6 +159,7 @@
private:
bool mReclaimed;
int mPid;
+ int mUid;
shared_ptr<ResourceManagerService> mService;
DISALLOW_EVIL_CONSTRUCTORS(TestClient);
};
@@ -176,9 +181,12 @@
static void* addResource(void* arg) {
resourceThreadArgs* tArgs = (resourceThreadArgs*)arg;
if (tArgs) {
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(tArgs->pid),
+ .uid = static_cast<int32_t>(tArgs->uid),
+ .id = tArgs->testClientId,
+ .name = ""};
(tArgs->service)
- ->addResource(tArgs->pid, tArgs->uid, tArgs->testClientId, tArgs->testClient,
- tArgs->mediaResource);
+ ->addResource(clientInfo, tArgs->testClient, tArgs->mediaResource);
}
return nullptr;
}
@@ -187,10 +195,14 @@
resourceThreadArgs* tArgs = (resourceThreadArgs*)arg;
if (tArgs) {
bool result;
- (tArgs->service)->markClientForPendingRemoval(tArgs->pid, tArgs->testClientId);
- (tArgs->service)->removeResource(tArgs->pid, tArgs->testClientId, tArgs->mediaResource);
- (tArgs->service)->reclaimResource(tArgs->pid, tArgs->mediaResource, &result);
- (tArgs->service)->removeClient(tArgs->pid, tArgs->testClientId);
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(tArgs->pid),
+ .uid = static_cast<int32_t>(tArgs->uid),
+ .id = tArgs->testClientId,
+ .name = ""};
+ (tArgs->service)->markClientForPendingRemoval(clientInfo);
+ (tArgs->service)->removeResource(clientInfo, tArgs->mediaResource);
+ (tArgs->service)->reclaimResource(clientInfo, tArgs->mediaResource, &result);
+ (tArgs->service)->removeClient(clientInfo);
(tArgs->service)->overridePid(tArgs->pid, tArgs->pid - 1);
}
return nullptr;
@@ -240,7 +252,8 @@
uint64_t mediaResourceValue = mFuzzedDataProvider->ConsumeIntegral<uint64_t>();
threadArgs[k].service = mService;
shared_ptr<IResourceManagerClient> testClient =
- ::ndk::SharedRefBase::make<TestClient>(threadArgs[k].pid, mService);
+ ::ndk::SharedRefBase::make<TestClient>(threadArgs[k].pid, threadArgs[k].uid,
+ mService);
threadArgs[k].testClient = testClient;
threadArgs[k].testClientId = getId(testClient);
mediaResource[k].push_back(MediaResource(static_cast<MedResType>(mediaResourceType),
@@ -258,7 +271,7 @@
// No resource was added with pid = 0
int32_t pidZero = 0;
shared_ptr<IResourceManagerClient> testClient =
- ::ndk::SharedRefBase::make<TestClient>(pidZero, mService);
+ ::ndk::SharedRefBase::make<TestClient>(pidZero, 0, mService);
int32_t mediaResourceType =
mFuzzedDataProvider->ConsumeIntegralInRange<int32_t>(kMinResourceType, kMaxResourceType);
int32_t mediaResourceSubType =
@@ -269,9 +282,13 @@
static_cast<MedResSubType>(mediaResourceSubType),
mediaResourceValue));
bool result;
- mService->reclaimResource(pidZero, mediaRes, &result);
- mService->removeResource(pidZero, getId(testClient), mediaRes);
- mService->removeClient(pidZero, getId(testClient));
+ ClientInfoParcel pidZeroClient{.pid = static_cast<int32_t>(pidZero),
+ .uid = static_cast<int32_t>(0),
+ .id = getId(testClient),
+ .name = ""};
+ mService->reclaimResource(pidZeroClient, mediaRes, &result);
+ mService->removeResource(pidZeroClient, mediaRes);
+ mService->removeClient(pidZeroClient);
}
void ResourceManagerServiceFuzzer::setServiceLog() {
diff --git a/services/mediaresourcemanager/test/Android.bp b/services/mediaresourcemanager/test/Android.bp
index 618626f..60bb8c3 100644
--- a/services/mediaresourcemanager/test/Android.bp
+++ b/services/mediaresourcemanager/test/Android.bp
@@ -19,6 +19,9 @@
"liblog",
"libmedia",
"libutils",
+ "libstats_media_metrics",
+ "libstatspull",
+ "libstatssocket",
],
include_dirs: [
"frameworks/av/include",
@@ -64,6 +67,9 @@
"liblog",
"libmedia",
"libutils",
+ "libstats_media_metrics",
+ "libstatspull",
+ "libstatssocket",
],
include_dirs: [
"frameworks/av/include",
diff --git a/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h b/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h
index 5bf44ce..8194e23 100644
--- a/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h
+++ b/services/mediaresourcemanager/test/ResourceManagerServiceTestUtils.h
@@ -122,11 +122,15 @@
struct TestClient : public BnResourceManagerClient {
- TestClient(int pid, const std::shared_ptr<ResourceManagerService> &service)
- : mPid(pid), mService(service) {}
+ TestClient(int pid, int uid, const std::shared_ptr<ResourceManagerService> &service)
+ : mPid(pid), mUid(uid), mService(service) {}
Status reclaimResource(bool* _aidl_return) override {
- mService->removeClient(mPid, getId(ref<TestClient>()));
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(mPid),
+ .uid = static_cast<int32_t>(mUid),
+ .id = getId(ref<TestClient>()),
+ .name = "none"};
+ mService->removeClient(clientInfo);
mWasReclaimResourceCalled = true;
*_aidl_return = true;
return Status::ok();
@@ -148,6 +152,7 @@
private:
bool mWasReclaimResourceCalled = false;
int mPid;
+ int mUid;
std::shared_ptr<ResourceManagerService> mService;
DISALLOW_EVIL_CONSTRUCTORS(TestClient);
};
@@ -196,13 +201,13 @@
: mSystemCB(new TestSystemCallback()),
mService(::ndk::SharedRefBase::make<ResourceManagerService>(
new TestProcessInfo, mSystemCB)),
- mTestClient1(::ndk::SharedRefBase::make<TestClient>(kTestPid1, mService)),
- mTestClient2(::ndk::SharedRefBase::make<TestClient>(kTestPid2, mService)),
- mTestClient3(::ndk::SharedRefBase::make<TestClient>(kTestPid2, mService)) {
+ mTestClient1(::ndk::SharedRefBase::make<TestClient>(kTestPid1, kTestUid1, mService)),
+ mTestClient2(::ndk::SharedRefBase::make<TestClient>(kTestPid2, kTestUid2, mService)),
+ mTestClient3(::ndk::SharedRefBase::make<TestClient>(kTestPid2, kTestUid2, mService)) {
}
- std::shared_ptr<IResourceManagerClient> createTestClient(int pid) {
- return ::ndk::SharedRefBase::make<TestClient>(pid, mService);
+ std::shared_ptr<IResourceManagerClient> createTestClient(int pid, int uid) {
+ return ::ndk::SharedRefBase::make<TestClient>(pid, uid, mService);
}
sp<TestSystemCallback> mSystemCB;
diff --git a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
index 8739c3b..41cccb8 100644
--- a/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceManagerService_test.cpp
@@ -98,24 +98,36 @@
// kTestPid1 mTestClient1
std::vector<MediaResourceParcel> resources1;
resources1.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kTestPid1),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(mTestClient1),
+ .name = "none"};
+ mService->addResource(client1Info, mTestClient1, resources1);
resources1.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 200));
std::vector<MediaResourceParcel> resources11;
resources11.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 200));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
+ mService->addResource(client1Info, mTestClient1, resources11);
// kTestPid2 mTestClient2
std::vector<MediaResourceParcel> resources2;
resources2.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 1));
resources2.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 300));
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources2);
+ ClientInfoParcel client2Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient2),
+ .name = "none"};
+ mService->addResource(client2Info, mTestClient2, resources2);
// kTestPid2 mTestClient3
std::vector<MediaResourceParcel> resources3;
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources3);
+ ClientInfoParcel client3Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient3),
+ .name = "none"};
+ mService->addResource(client3Info, mTestClient3, resources3);
resources3.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
resources3.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 100));
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources3);
+ mService->addResource(client3Info, mTestClient3, resources3);
const PidResourceInfosMap &map = mService->mMap;
EXPECT_EQ(2u, map.size());
@@ -138,7 +150,11 @@
std::vector<MediaResourceParcel> resources1;
resources1.push_back(MediaResource(MediaResource::Type::kDrmSession, -100));
resources1.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, -100));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kTestPid1),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(mTestClient1),
+ .name = "none"};
+ mService->addResource(client1Info, mTestClient1, resources1);
// Expected result:
// 1) the client should have been added;
@@ -155,11 +171,11 @@
resources1.clear();
resources1.push_back(MediaResource(MediaResource::Type::kDrmSession, INT64_MAX));
resources1.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, INT64_MAX));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ mService->addResource(client1Info, mTestClient1, resources1);
resources1.clear();
resources1.push_back(MediaResource(MediaResource::Type::kDrmSession, 10));
resources1.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 10));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ mService->addResource(client1Info, mTestClient1, resources1);
// Expected result:
// Both values should saturate to INT64_MAX
@@ -170,7 +186,7 @@
resources1.clear();
resources1.push_back(MediaResource(MediaResource::Type::kDrmSession, -10));
resources1.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, -10));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ mService->addResource(client1Info, mTestClient1, resources1);
// Expected result:
// 1) DrmSession resource should allow negative value addition, and value should drop accordingly
@@ -182,7 +198,7 @@
resources1.clear();
resources1.push_back(MediaResource(MediaResource::Type::kDrmSession, INT64_MIN));
expected.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, INT64_MIN));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ mService->addResource(client1Info, mTestClient1, resources1);
// Expected result:
// 1) DrmSession resource value should drop to 0, but the entry shouldn't be removed.
@@ -228,11 +244,15 @@
// kTestPid1 mTestClient1
std::vector<MediaResourceParcel> resources1;
resources1.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kTestPid1),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(mTestClient1),
+ .name = "none"};
+ mService->addResource(client1Info, mTestClient1, resources1);
std::vector<MediaResourceParcel> resources11;
resources11.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 200));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
+ mService->addResource(client1Info, mTestClient1, resources11);
const PidResourceInfosMap &map = mService->mMap;
EXPECT_EQ(1u, map.size());
@@ -243,7 +263,7 @@
// test adding existing types to combine values
resources1.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 100));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ mService->addResource(client1Info, mTestClient1, resources1);
std::vector<MediaResourceParcel> expected;
expected.push_back(MediaResource(MediaResource::Type::kSecureCodec, 2));
@@ -253,7 +273,7 @@
// test adding new types (including types that differs only in subType)
resources11.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 1));
resources11.push_back(MediaResource(MediaResource::Type::kSecureCodec, MediaResource::SubType::kVideoCodec, 1));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
+ mService->addResource(client1Info, mTestClient1, resources11);
expected.clear();
expected.push_back(MediaResource(MediaResource::Type::kSecureCodec, 2));
@@ -267,11 +287,15 @@
// kTestPid1 mTestClient1
std::vector<MediaResourceParcel> resources1;
resources1.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kTestPid1),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(mTestClient1),
+ .name = "none"};
+ mService->addResource(client1Info, mTestClient1, resources1);
std::vector<MediaResourceParcel> resources11;
resources11.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 200));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources11);
+ mService->addResource(client1Info, mTestClient1, resources11);
const PidResourceInfosMap &map = mService->mMap;
EXPECT_EQ(1u, map.size());
@@ -282,7 +306,7 @@
// test partial removal
resources11[0].value = 100;
- mService->removeResource(kTestPid1, getId(mTestClient1), resources11);
+ mService->removeResource(client1Info, resources11);
std::vector<MediaResourceParcel> expected;
expected.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
@@ -291,13 +315,13 @@
// test removal request with negative value, should be ignored
resources11[0].value = -10000;
- mService->removeResource(kTestPid1, getId(mTestClient1), resources11);
+ mService->removeResource(client1Info, resources11);
expectEqResourceInfo(infos1.valueFor(getId(mTestClient1)), kTestUid1, mTestClient1, expected);
// test complete removal with overshoot value
resources11[0].value = 1000;
- mService->removeResource(kTestPid1, getId(mTestClient1), resources11);
+ mService->removeResource(client1Info, resources11);
expected.clear();
expected.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
@@ -317,19 +341,35 @@
mService->mSupportsSecureWithNonSecureCodec = true;
// priority too low to reclaim resource
- CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(kLowPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = 0,
+ .name = "none"};
+ CHECK_STATUS_FALSE(mService->reclaimResource(clientInfo, resources, &result));
// override Low Priority Pid with High Priority Pid
mService->overridePid(kLowPriorityPid, kHighPriorityPid);
- CHECK_STATUS_TRUE(mService->reclaimResource(kLowPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(clientInfo, resources, &result));
// restore Low Priority Pid
mService->overridePid(kLowPriorityPid, -1);
- CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(clientInfo, resources, &result));
}
}
void testMarkClientForPendingRemoval() {
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kTestPid1),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(mTestClient1),
+ .name = "none"};
+ ClientInfoParcel client2Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient2),
+ .name = "none"};
+ ClientInfoParcel client3Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient3),
+ .name = "none"};
{
addResource();
mService->mSupportsSecureWithNonSecureCodec = true;
@@ -338,24 +378,24 @@
resources.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 1));
// Remove low priority clients
- mService->removeClient(kTestPid1, getId(mTestClient1));
+ mService->removeClient(client1Info);
// no lower priority client
- CHECK_STATUS_FALSE(mService->reclaimResource(kTestPid2, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(client2Info, resources, &result));
EXPECT_EQ(false, toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_EQ(false, toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_EQ(false, toTestClient(mTestClient3)->checkIfReclaimedAndReset());
- mService->markClientForPendingRemoval(kTestPid2, getId(mTestClient2));
+ mService->markClientForPendingRemoval(client2Info);
// client marked for pending removal from the same process got reclaimed
- CHECK_STATUS_TRUE(mService->reclaimResource(kTestPid2, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(client2Info, resources, &result));
EXPECT_EQ(false, toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_EQ(true, toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_EQ(false, toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// clean up client 3 which still left
- mService->removeClient(kTestPid2, getId(mTestClient3));
+ mService->removeClient(client3Info);
}
{
@@ -365,30 +405,30 @@
std::vector<MediaResourceParcel> resources;
resources.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 1));
- mService->markClientForPendingRemoval(kTestPid2, getId(mTestClient2));
+ mService->markClientForPendingRemoval(client2Info);
// client marked for pending removal from the same process got reclaimed
// first, even though there are lower priority process
- CHECK_STATUS_TRUE(mService->reclaimResource(kTestPid2, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(client2Info, resources, &result));
EXPECT_EQ(false, toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_EQ(true, toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_EQ(false, toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// lower priority client got reclaimed
- CHECK_STATUS_TRUE(mService->reclaimResource(kTestPid2, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(client2Info, resources, &result));
EXPECT_EQ(true, toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_EQ(false, toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_EQ(false, toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// clean up client 3 which still left
- mService->removeClient(kTestPid2, getId(mTestClient3));
+ mService->removeClient(client3Info);
}
{
addResource();
mService->mSupportsSecureWithNonSecureCodec = true;
- mService->markClientForPendingRemoval(kTestPid2, getId(mTestClient2));
+ mService->markClientForPendingRemoval(client2Info);
// client marked for pending removal got reclaimed
EXPECT_TRUE(mService->reclaimResourcesFromClientsPendingRemoval(kTestPid2).isOk());
@@ -402,7 +442,7 @@
EXPECT_EQ(false, toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_EQ(false, toTestClient(mTestClient3)->checkIfReclaimedAndReset());
- mService->markClientForPendingRemoval(kTestPid2, getId(mTestClient3));
+ mService->markClientForPendingRemoval(client3Info);
// client marked for pending removal got reclaimed
EXPECT_TRUE(mService->reclaimResourcesFromClientsPendingRemoval(kTestPid2).isOk());
@@ -411,14 +451,18 @@
EXPECT_EQ(true, toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// clean up client 1 which still left
- mService->removeClient(kTestPid1, getId(mTestClient1));
+ mService->removeClient(client1Info);
}
}
void testRemoveClient() {
addResource();
- mService->removeClient(kTestPid2, getId(mTestClient2));
+ ClientInfoParcel client2Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient2),
+ .name = "none"};
+ mService->removeClient(client2Info);
const PidResourceInfosMap &map = mService->mMap;
EXPECT_EQ(2u, map.size());
@@ -437,11 +481,12 @@
MediaResource::SubType subType = MediaResource::SubType::kUnspecifiedSubType;
Vector<std::shared_ptr<IResourceManagerClient> > clients;
- EXPECT_FALSE(mService->getAllClients_l(kLowPriorityPid, type, subType, &clients));
+ PidUidVector idList;
+ EXPECT_FALSE(mService->getAllClients_l(kLowPriorityPid, type, subType, &idList, &clients));
// some higher priority process (e.g. kTestPid2) owns the resource, so getAllClients_l
// will fail.
- EXPECT_FALSE(mService->getAllClients_l(kMidPriorityPid, type, subType, &clients));
- EXPECT_TRUE(mService->getAllClients_l(kHighPriorityPid, type, subType, &clients));
+ EXPECT_FALSE(mService->getAllClients_l(kMidPriorityPid, type, subType, &idList, &clients));
+ EXPECT_TRUE(mService->getAllClients_l(kHighPriorityPid, type, subType, &idList, &clients));
EXPECT_EQ(2u, clients.size());
// (OK to require ordering in clients[], as the pid map is sorted)
@@ -454,6 +499,19 @@
resources.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
resources.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 150));
+ ClientInfoParcel lowPriorityClient{.pid = static_cast<int32_t>(kLowPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = 0,
+ .name = "none"};
+ ClientInfoParcel midPriorityClient{.pid = static_cast<int32_t>(kMidPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = 0,
+ .name = "none"};
+ ClientInfoParcel highPriorityClient{.pid = static_cast<int32_t>(kHighPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = 0,
+ .name = "none"};
+
// ### secure codec can't coexist and secure codec can coexist with non-secure codec ###
{
addResource();
@@ -461,23 +519,23 @@
mService->mSupportsSecureWithNonSecureCodec = true;
// priority too low
- CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
- CHECK_STATUS_FALSE(mService->reclaimResource(kMidPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(lowPriorityClient, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(midPriorityClient, resources, &result));
// reclaim all secure codecs
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_TRUE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// call again should reclaim one largest graphic memory from lowest process
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// nothing left
- CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(highPriorityClient, resources, &result));
}
// ### secure codecs can't coexist and secure codec can't coexist with non-secure codec ###
@@ -487,17 +545,17 @@
mService->mSupportsSecureWithNonSecureCodec = false;
// priority too low
- CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
- CHECK_STATUS_FALSE(mService->reclaimResource(kMidPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(lowPriorityClient, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(midPriorityClient, resources, &result));
// reclaim all secure and non-secure codecs
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_TRUE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// nothing left
- CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(highPriorityClient, resources, &result));
}
@@ -508,29 +566,29 @@
mService->mSupportsSecureWithNonSecureCodec = false;
// priority too low
- CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
- CHECK_STATUS_FALSE(mService->reclaimResource(kMidPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(lowPriorityClient, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(midPriorityClient, resources, &result));
// reclaim all non-secure codecs
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// call again should reclaim one largest graphic memory from lowest process
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_TRUE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// call again should reclaim another largest graphic memory from lowest process
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// nothing left
- CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(highPriorityClient, resources, &result));
}
// ### secure codecs can coexist and secure codec can coexist with non-secure codec ###
@@ -540,28 +598,28 @@
mService->mSupportsSecureWithNonSecureCodec = true;
// priority too low
- CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(lowPriorityClient, resources, &result));
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
// one largest graphic memory from lowest process got reclaimed
EXPECT_TRUE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// call again should reclaim another graphic memory from lowest process
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// call again should reclaim another graphic memory from lowest process
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// nothing left
- CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(highPriorityClient, resources, &result));
}
// ### secure codecs can coexist and secure codec can coexist with non-secure codec ###
@@ -573,20 +631,20 @@
std::vector<MediaResourceParcel> resources;
resources.push_back(MediaResource(MediaResource::Type::kSecureCodec, 1));
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
// secure codec from lowest process got reclaimed
EXPECT_TRUE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// call again should reclaim another secure codec from lowest process
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// no more secure codec, non-secure codec will be reclaimed.
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
@@ -598,29 +656,42 @@
resources.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 1));
resources.push_back(MediaResource(MediaResource::Type::kGraphicMemory, 150));
+ ClientInfoParcel lowPriorityClient{.pid = static_cast<int32_t>(kLowPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = 0,
+ .name = "none"};
+ ClientInfoParcel midPriorityClient{.pid = static_cast<int32_t>(kMidPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = 0,
+ .name = "none"};
+ ClientInfoParcel highPriorityClient{.pid = static_cast<int32_t>(kHighPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = 0,
+ .name = "none"};
+
// ### secure codec can't coexist with non-secure codec ###
{
addResource();
mService->mSupportsSecureWithNonSecureCodec = false;
// priority too low
- CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
- CHECK_STATUS_FALSE(mService->reclaimResource(kMidPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(lowPriorityClient, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(midPriorityClient, resources, &result));
// reclaim all secure codecs
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_TRUE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// call again should reclaim one graphic memory from lowest process
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// nothing left
- CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(highPriorityClient, resources, &result));
}
@@ -630,28 +701,28 @@
mService->mSupportsSecureWithNonSecureCodec = true;
// priority too low
- CHECK_STATUS_FALSE(mService->reclaimResource(kLowPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(lowPriorityClient, resources, &result));
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
// one largest graphic memory from lowest process got reclaimed
EXPECT_TRUE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// call again should reclaim another graphic memory from lowest process
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// call again should reclaim another graphic memory from lowest process
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// nothing left
- CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_FALSE(mService->reclaimResource(highPriorityClient, resources, &result));
}
// ### secure codec can coexist with non-secure codec ###
@@ -662,20 +733,24 @@
std::vector<MediaResourceParcel> resources;
resources.push_back(MediaResource(MediaResource::Type::kNonSecureCodec, 1));
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
// one non secure codec from lowest process got reclaimed
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_TRUE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// no more non-secure codec, secure codec from lowest priority process will be reclaimed
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, resources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, resources, &result));
EXPECT_TRUE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient2)->checkIfReclaimedAndReset());
EXPECT_FALSE(toTestClient(mTestClient3)->checkIfReclaimedAndReset());
// clean up client 3 which still left
- mService->removeClient(kTestPid2, getId(mTestClient3));
+ ClientInfoParcel clientInfo{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient3),
+ .name = "none"};
+ mService->removeClient(clientInfo);
}
}
@@ -683,15 +758,16 @@
MediaResource::Type type = MediaResource::Type::kGraphicMemory;
MediaResource::SubType subType = MediaResource::SubType::kUnspecifiedSubType;
std::shared_ptr<IResourceManagerClient> client;
+ PidUidVector idList;
EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, subType,
- &client));
+ &idList, &client));
addResource();
EXPECT_FALSE(mService->getLowestPriorityBiggestClient_l(kLowPriorityPid, type, subType,
- &client));
+ &idList, &client));
EXPECT_TRUE(mService->getLowestPriorityBiggestClient_l(kHighPriorityPid, type, subType,
- &client));
+ &idList, &client));
// kTestPid1 is the lowest priority process with MediaResource::Type::kGraphicMemory.
// mTestClient1 has the largest MediaResource::Type::kGraphicMemory within kTestPid1.
@@ -737,33 +813,41 @@
// new client request should cause VIDEO_ON
std::vector<MediaResourceParcel> resources1;
resources1.push_back(MediaResource(MediaResource::Type::kBattery, MediaResource::SubType::kVideoCodec, 1));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kTestPid1),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(mTestClient1),
+ .name = "none"};
+ mService->addResource(client1Info, mTestClient1, resources1);
EXPECT_EQ(2u, mSystemCB->eventCount());
EXPECT_EQ(EventEntry({EventType::VIDEO_ON, kTestUid1}), mSystemCB->lastEvent());
// each client should only cause 1 VIDEO_ON
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ mService->addResource(client1Info, mTestClient1, resources1);
EXPECT_EQ(2u, mSystemCB->eventCount());
// new client request should cause VIDEO_ON
std::vector<MediaResourceParcel> resources2;
resources2.push_back(MediaResource(MediaResource::Type::kBattery, MediaResource::SubType::kVideoCodec, 2));
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources2);
+ ClientInfoParcel client2Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient2),
+ .name = "none"};
+ mService->addResource(client2Info, mTestClient2, resources2);
EXPECT_EQ(3u, mSystemCB->eventCount());
EXPECT_EQ(EventEntry({EventType::VIDEO_ON, kTestUid2}), mSystemCB->lastEvent());
// partially remove mTestClient1's request, shouldn't be any VIDEO_OFF
- mService->removeResource(kTestPid1, getId(mTestClient1), resources1);
+ mService->removeResource(client1Info, resources1);
EXPECT_EQ(3u, mSystemCB->eventCount());
// remove mTestClient1's request, should be VIDEO_OFF for kTestUid1
// (use resource2 to test removing more instances than previously requested)
- mService->removeResource(kTestPid1, getId(mTestClient1), resources2);
+ mService->removeResource(client1Info, resources2);
EXPECT_EQ(4u, mSystemCB->eventCount());
EXPECT_EQ(EventEntry({EventType::VIDEO_OFF, kTestUid1}), mSystemCB->lastEvent());
// remove mTestClient2, should be VIDEO_OFF for kTestUid2
- mService->removeClient(kTestPid2, getId(mTestClient2));
+ mService->removeClient(client2Info);
EXPECT_EQ(5u, mSystemCB->eventCount());
EXPECT_EQ(EventEntry({EventType::VIDEO_OFF, kTestUid2}), mSystemCB->lastEvent());
}
@@ -776,32 +860,40 @@
// new client request should cause CPUSET_ENABLE
std::vector<MediaResourceParcel> resources1;
resources1.push_back(MediaResource(MediaResource::Type::kCpuBoost, 1));
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kTestPid1),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(mTestClient1),
+ .name = "none"};
+ mService->addResource(client1Info, mTestClient1, resources1);
EXPECT_EQ(2u, mSystemCB->eventCount());
EXPECT_EQ(EventType::CPUSET_ENABLE, mSystemCB->lastEventType());
// each client should only cause 1 CPUSET_ENABLE
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources1);
+ mService->addResource(client1Info, mTestClient1, resources1);
EXPECT_EQ(2u, mSystemCB->eventCount());
// new client request should cause CPUSET_ENABLE
std::vector<MediaResourceParcel> resources2;
resources2.push_back(MediaResource(MediaResource::Type::kCpuBoost, 2));
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources2);
+ ClientInfoParcel client2Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient2),
+ .name = "none"};
+ mService->addResource(client2Info, mTestClient2, resources2);
EXPECT_EQ(3u, mSystemCB->eventCount());
EXPECT_EQ(EventType::CPUSET_ENABLE, mSystemCB->lastEventType());
// remove mTestClient2 should not cause CPUSET_DISABLE, mTestClient1 still active
- mService->removeClient(kTestPid2, getId(mTestClient2));
+ mService->removeClient(client2Info);
EXPECT_EQ(3u, mSystemCB->eventCount());
// remove 1 cpuboost from mTestClient1, should not be CPUSET_DISABLE (still 1 left)
- mService->removeResource(kTestPid1, getId(mTestClient1), resources1);
+ mService->removeResource(client1Info, resources1);
EXPECT_EQ(3u, mSystemCB->eventCount());
// remove 2 cpuboost from mTestClient1, should be CPUSET_DISABLE
// (use resource2 to test removing more than previously requested)
- mService->removeResource(kTestPid1, getId(mTestClient1), resources2);
+ mService->removeResource(client1Info, resources2);
EXPECT_EQ(4u, mSystemCB->eventCount());
EXPECT_EQ(EventType::CPUSET_DISABLE, mSystemCB->lastEventType());
}
@@ -814,22 +906,32 @@
std::vector<MediaResourceParcel> audioImageResources;
audioImageResources.push_back(createNonSecureAudioCodecResource());
audioImageResources.push_back(createNonSecureImageCodecResource());
- mService->addResource(kLowPriorityPid, kTestUid1, getId(audioImageTestClient),
- audioImageTestClient, audioImageResources);
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kLowPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(audioImageTestClient),
+ .name = "none"};
+ mService->addResource(client1Info, audioImageTestClient, audioImageResources);
// Fail to reclaim a video codec resource
std::vector<MediaResourceParcel> reclaimResources;
reclaimResources.push_back(createNonSecureVideoCodecResource());
- CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, reclaimResources, &result));
+ ClientInfoParcel highPriorityClient{.pid = static_cast<int32_t>(kHighPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = 0,
+ .name = "none"};
+ CHECK_STATUS_FALSE(mService->reclaimResource(highPriorityClient, reclaimResources, &result));
// Now add a video codec resource
std::vector<MediaResourceParcel> videoResources;
videoResources.push_back(createNonSecureVideoCodecResource());
- mService->addResource(kLowPriorityPid, kTestUid1, getId(videoTestClient), videoTestClient,
- videoResources);
+ ClientInfoParcel client2Info{.pid = static_cast<int32_t>(kLowPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(videoTestClient),
+ .name = "none"};
+ mService->addResource(client2Info, videoTestClient, videoResources);
// Verify that the newly-created video codec resource can be reclaimed
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, reclaimResources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, reclaimResources, &result));
// Verify that the audio and image resources are untouched
EXPECT_FALSE(toTestClient(audioImageTestClient)->checkIfReclaimedAndReset());
@@ -845,22 +947,32 @@
std::vector<MediaResourceParcel> videoImageResources;
videoImageResources.push_back(createNonSecureVideoCodecResource());
videoImageResources.push_back(createNonSecureImageCodecResource());
- mService->addResource(kLowPriorityPid, kTestUid1, getId(videoImageTestClient),
- videoImageTestClient, videoImageResources);
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kLowPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(videoImageTestClient),
+ .name = "none"};
+ mService->addResource(client1Info, videoImageTestClient, videoImageResources);
// Fail to reclaim an audio codec resource
std::vector<MediaResourceParcel> reclaimResources;
reclaimResources.push_back(createNonSecureAudioCodecResource());
- CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, reclaimResources, &result));
+ ClientInfoParcel highPriorityClient{.pid = static_cast<int32_t>(kHighPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = 0,
+ .name = "none"};
+ CHECK_STATUS_FALSE(mService->reclaimResource(highPriorityClient, reclaimResources, &result));
// Now add an audio codec resource
std::vector<MediaResourceParcel> audioResources;
audioResources.push_back(createNonSecureAudioCodecResource());
- mService->addResource(kLowPriorityPid, kTestUid2, getId(audioTestClient), audioTestClient,
- audioResources);
+ ClientInfoParcel client2Info{.pid = static_cast<int32_t>(kLowPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(audioTestClient),
+ .name = "none"};
+ mService->addResource(client2Info, audioTestClient, audioResources);
// Verify that the newly-created audio codec resource can be reclaimed
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, reclaimResources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, reclaimResources, &result));
// Verify that the video and image resources are untouched
EXPECT_FALSE(toTestClient(videoImageTestClient)->checkIfReclaimedAndReset());
@@ -876,22 +988,32 @@
std::vector<MediaResourceParcel> videoAudioResources;
videoAudioResources.push_back(createNonSecureVideoCodecResource());
videoAudioResources.push_back(createNonSecureAudioCodecResource());
- mService->addResource(kLowPriorityPid, kTestUid1, getId(videoAudioTestClient),
- videoAudioTestClient, videoAudioResources);
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kLowPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(videoAudioTestClient),
+ .name = "none"};
+ mService->addResource(client1Info, videoAudioTestClient, videoAudioResources);
// Fail to reclaim an image codec resource
std::vector<MediaResourceParcel> reclaimResources;
reclaimResources.push_back(createNonSecureImageCodecResource());
- CHECK_STATUS_FALSE(mService->reclaimResource(kHighPriorityPid, reclaimResources, &result));
+ ClientInfoParcel highPriorityClient{.pid = static_cast<int32_t>(kHighPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = 0,
+ .name = "none"};
+ CHECK_STATUS_FALSE(mService->reclaimResource(highPriorityClient, reclaimResources, &result));
// Now add an image codec resource
std::vector<MediaResourceParcel> imageResources;
imageResources.push_back(createNonSecureImageCodecResource());
- mService->addResource(kLowPriorityPid, kTestUid2, getId(imageTestClient), imageTestClient,
- imageResources);
+ ClientInfoParcel client2Info{.pid = static_cast<int32_t>(kLowPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(imageTestClient),
+ .name = "none"};
+ mService->addResource(client2Info, imageTestClient, imageResources);
// Verify that the newly-created image codec resource can be reclaimed
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, reclaimResources, &result));
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, reclaimResources, &result));
// Verify that the video and audio resources are untouched
EXPECT_FALSE(toTestClient(mTestClient1)->checkIfReclaimedAndReset());
@@ -901,20 +1023,27 @@
void testReclaimResources_whenPartialResourceMatch_reclaims() {
const int onlyUid = kTestUid1;
- const auto onlyClient = createTestClient(kLowPriorityPid);
+ const auto onlyClient = createTestClient(kLowPriorityPid, onlyUid);
std::vector<MediaResourceParcel> ownedResources;
ownedResources.push_back(createNonSecureVideoCodecResource());
ownedResources.push_back(createGraphicMemoryResource(100));
- mService->addResource(kLowPriorityPid, onlyUid, getId(onlyClient), onlyClient,
- ownedResources);
+ ClientInfoParcel onlyClientInfo{.pid = static_cast<int32_t>(kLowPriorityPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(onlyClient),
+ .name = "none"};
+ mService->addResource(onlyClientInfo, onlyClient, ownedResources);
// Reclaim an image codec instead of the video codec that is owned, but also reclaim
// graphics memory, which will trigger the reclaim.
std::vector<MediaResourceParcel> reclaimResources;
reclaimResources.push_back(createNonSecureImageCodecResource());
reclaimResources.push_back(createGraphicMemoryResource(100));
- CHECK_STATUS_TRUE(mService->reclaimResource(kHighPriorityPid, reclaimResources, &result));
+ ClientInfoParcel highPriorityClient{.pid = static_cast<int32_t>(kHighPriorityPid),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = 0,
+ .name = "none"};
+ CHECK_STATUS_TRUE(mService->reclaimResource(highPriorityClient, reclaimResources, &result));
// Verify that the video codec resources (including the needed graphic memory) is reclaimed
EXPECT_TRUE(toTestClient(onlyClient)->checkIfReclaimedAndReset());
@@ -926,200 +1055,278 @@
const int onlyUid = kTestUid1;
// secure video codec
- const auto smallSecureVideoMarkedClient = createTestClient(onlyPid);
- const auto largeSecureVideoMarkedClient = createTestClient(onlyPid);
- const auto largestSecureVideoActiveClient = createTestClient(onlyPid);
+ const auto smallSecureVideoMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largeSecureVideoMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largestSecureVideoActiveClient = createTestClient(onlyPid, onlyUid);
+ ClientInfoParcel clientA{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(smallSecureVideoMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientB{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largeSecureVideoMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientC{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largestSecureVideoActiveClient),
+ .name = "none"};
{
std::vector<MediaResourceParcel> resources;
resources.push_back(createSecureVideoCodecResource(1));
- mService->addResource(onlyPid, onlyUid, getId(smallSecureVideoMarkedClient),
- smallSecureVideoMarkedClient, resources);
+ mService->addResource(clientA, smallSecureVideoMarkedClient, resources);
resources.clear();
resources.push_back(createSecureVideoCodecResource(2));
- mService->addResource(onlyPid, onlyUid, getId(largeSecureVideoMarkedClient),
- largeSecureVideoMarkedClient, resources);
+ mService->addResource(clientB, largeSecureVideoMarkedClient, resources);
resources.clear();
resources.push_back(createSecureVideoCodecResource(3));
- mService->addResource(onlyPid, onlyUid, getId(largestSecureVideoActiveClient),
- largestSecureVideoActiveClient, resources);
+ mService->addResource(clientC, largestSecureVideoActiveClient, resources);
}
- mService->markClientForPendingRemoval(onlyPid, getId(smallSecureVideoMarkedClient));
- mService->markClientForPendingRemoval(onlyPid, getId(largeSecureVideoMarkedClient));
+ mService->markClientForPendingRemoval(clientA);
+ mService->markClientForPendingRemoval(clientB);
// don't mark the largest client
// non-secure video codec
- const auto smallNonSecureVideoMarkedClient = createTestClient(onlyPid);
- const auto largeNonSecureVideoMarkedClient = createTestClient(onlyPid);
- const auto largestNonSecureVideoActiveClient = createTestClient(onlyPid);
+ const auto smallNonSecureVideoMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largeNonSecureVideoMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largestNonSecureVideoActiveClient = createTestClient(onlyPid, onlyUid);
+ ClientInfoParcel clientD{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(smallNonSecureVideoMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientE{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largeNonSecureVideoMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientF{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largestNonSecureVideoActiveClient),
+ .name = "none"};
{
std::vector<MediaResourceParcel> resources;
resources.push_back(createNonSecureVideoCodecResource(1));
- mService->addResource(onlyPid, onlyUid, getId(smallNonSecureVideoMarkedClient),
- smallNonSecureVideoMarkedClient, resources);
+ mService->addResource(clientD, smallNonSecureVideoMarkedClient, resources);
resources.clear();
resources.push_back(createNonSecureVideoCodecResource(2));
- mService->addResource(onlyPid, onlyUid, getId(largeNonSecureVideoMarkedClient),
- largeNonSecureVideoMarkedClient, resources);
+ mService->addResource(clientE, largeNonSecureVideoMarkedClient, resources);
resources.clear();
resources.push_back(createNonSecureVideoCodecResource(3));
- mService->addResource(onlyPid, onlyUid, getId(largestNonSecureVideoActiveClient),
- largestNonSecureVideoActiveClient, resources);
+ mService->addResource(clientF, largestNonSecureVideoActiveClient, resources);
}
- mService->markClientForPendingRemoval(onlyPid, getId(smallNonSecureVideoMarkedClient));
- mService->markClientForPendingRemoval(onlyPid, getId(largeNonSecureVideoMarkedClient));
+ mService->markClientForPendingRemoval(clientD);
+ mService->markClientForPendingRemoval(clientE);
// don't mark the largest client
// secure audio codec
- const auto smallSecureAudioMarkedClient = createTestClient(onlyPid);
- const auto largeSecureAudioMarkedClient = createTestClient(onlyPid);
- const auto largestSecureAudioActiveClient = createTestClient(onlyPid);
+ const auto smallSecureAudioMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largeSecureAudioMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largestSecureAudioActiveClient = createTestClient(onlyPid, onlyUid);
+ ClientInfoParcel clientG{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(smallSecureAudioMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientH{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largeSecureAudioMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientI{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largestSecureVideoActiveClient),
+ .name = "none"};
{
std::vector<MediaResourceParcel> resources;
resources.push_back(createSecureAudioCodecResource(1));
- mService->addResource(onlyPid, onlyUid, getId(smallSecureAudioMarkedClient),
- smallSecureAudioMarkedClient, resources);
+ mService->addResource(clientG, smallSecureAudioMarkedClient, resources);
resources.clear();
resources.push_back(createSecureAudioCodecResource(2));
- mService->addResource(onlyPid, onlyUid, getId(largeSecureAudioMarkedClient),
- largeSecureAudioMarkedClient, resources);
+ mService->addResource(clientH, largeSecureAudioMarkedClient, resources);
resources.clear();
resources.push_back(createSecureAudioCodecResource(3));
- mService->addResource(onlyPid, onlyUid, getId(largestSecureVideoActiveClient),
- largestSecureVideoActiveClient, resources);
+ mService->addResource(clientI, largestSecureVideoActiveClient, resources);
}
- mService->markClientForPendingRemoval(onlyPid, getId(smallSecureAudioMarkedClient));
- mService->markClientForPendingRemoval(onlyPid, getId(largeSecureAudioMarkedClient));
+ mService->markClientForPendingRemoval(clientG);
+ mService->markClientForPendingRemoval(clientH);
// don't mark the largest client
// non-secure audio codec
- const auto smallNonSecureAudioMarkedClient = createTestClient(onlyPid);
- const auto largeNonSecureAudioMarkedClient = createTestClient(onlyPid);
- const auto largestNonSecureAudioActiveClient = createTestClient(onlyPid);
+ const auto smallNonSecureAudioMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largeNonSecureAudioMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largestNonSecureAudioActiveClient = createTestClient(onlyPid, onlyUid);
+ ClientInfoParcel clientJ{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(smallNonSecureAudioMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientK{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largeNonSecureAudioMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientL{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largestNonSecureAudioActiveClient),
+ .name = "none"};
{
std::vector<MediaResourceParcel> resources;
resources.push_back(createNonSecureAudioCodecResource(1));
- mService->addResource(onlyPid, onlyUid, getId(smallNonSecureAudioMarkedClient),
- smallNonSecureAudioMarkedClient, resources);
+ mService->addResource(clientJ, smallNonSecureAudioMarkedClient, resources);
resources.clear();
resources.push_back(createNonSecureAudioCodecResource(2));
- mService->addResource(onlyPid, onlyUid, getId(largeNonSecureAudioMarkedClient),
- largeNonSecureAudioMarkedClient, resources);
+ mService->addResource(clientK, largeNonSecureAudioMarkedClient, resources);
resources.clear();
resources.push_back(createNonSecureAudioCodecResource(3));
- mService->addResource(onlyPid, onlyUid, getId(largestNonSecureAudioActiveClient),
- largestNonSecureAudioActiveClient, resources);
+ mService->addResource(clientL, largestNonSecureAudioActiveClient, resources);
}
- mService->markClientForPendingRemoval(onlyPid, getId(smallNonSecureAudioMarkedClient));
- mService->markClientForPendingRemoval(onlyPid, getId(largeNonSecureAudioMarkedClient));
+ mService->markClientForPendingRemoval(clientJ);
+ mService->markClientForPendingRemoval(clientK);
// don't mark the largest client
// secure image codec
- const auto smallSecureImageMarkedClient = createTestClient(onlyPid);
- const auto largeSecureImageMarkedClient = createTestClient(onlyPid);
- const auto largestSecureImageActiveClient = createTestClient(onlyPid);
+ const auto smallSecureImageMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largeSecureImageMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largestSecureImageActiveClient = createTestClient(onlyPid, onlyUid);
+ ClientInfoParcel clientM{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(smallSecureImageMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientN{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largeSecureImageMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientO{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largestSecureImageActiveClient),
+ .name = "none"};
{
std::vector<MediaResourceParcel> resources;
resources.push_back(createSecureImageCodecResource(1));
- mService->addResource(onlyPid, onlyUid, getId(smallSecureImageMarkedClient),
- smallSecureImageMarkedClient, resources);
+ mService->addResource(clientM, smallSecureImageMarkedClient, resources);
resources.clear();
resources.push_back(createSecureImageCodecResource(2));
- mService->addResource(onlyPid, onlyUid, getId(largeSecureImageMarkedClient),
- largeSecureImageMarkedClient, resources);
+ mService->addResource(clientN, largeSecureImageMarkedClient, resources);
resources.clear();
resources.push_back(createSecureImageCodecResource(3));
- mService->addResource(onlyPid, onlyUid, getId(largestSecureImageActiveClient),
- largestSecureImageActiveClient, resources);
+ mService->addResource(clientO, largestSecureImageActiveClient, resources);
}
- mService->markClientForPendingRemoval(onlyPid, getId(smallSecureImageMarkedClient));
- mService->markClientForPendingRemoval(onlyPid, getId(largeSecureImageMarkedClient));
+ mService->markClientForPendingRemoval(clientM);
+ mService->markClientForPendingRemoval(clientN);
// don't mark the largest client
// non-secure image codec
- const auto smallNonSecureImageMarkedClient = createTestClient(onlyPid);
- const auto largeNonSecureImageMarkedClient = createTestClient(onlyPid);
- const auto largestNonSecureImageActiveClient = createTestClient(onlyPid);
+ const auto smallNonSecureImageMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largeNonSecureImageMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largestNonSecureImageActiveClient = createTestClient(onlyPid, onlyUid);
+ ClientInfoParcel clientP{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(smallNonSecureImageMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientQ{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largeNonSecureImageMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientR{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largestNonSecureImageActiveClient),
+ .name = "none"};
{
std::vector<MediaResourceParcel> resources;
resources.push_back(createNonSecureImageCodecResource(1));
- mService->addResource(onlyPid, onlyUid, getId(smallNonSecureImageMarkedClient),
- smallNonSecureImageMarkedClient, resources);
+ mService->addResource(clientP, smallNonSecureImageMarkedClient, resources);
resources.clear();
resources.push_back(createNonSecureImageCodecResource(2));
- mService->addResource(onlyPid, onlyUid, getId(largeNonSecureImageMarkedClient),
- largeNonSecureImageMarkedClient, resources);
+ mService->addResource(clientQ, largeNonSecureImageMarkedClient, resources);
resources.clear();
resources.push_back(createNonSecureImageCodecResource(3));
- mService->addResource(onlyPid, onlyUid, getId(largestNonSecureImageActiveClient),
- largestNonSecureImageActiveClient, resources);
+ mService->addResource(clientR, largestNonSecureImageActiveClient, resources);
}
- mService->markClientForPendingRemoval(onlyPid, getId(smallNonSecureImageMarkedClient));
- mService->markClientForPendingRemoval(onlyPid, getId(largeNonSecureImageMarkedClient));
+ mService->markClientForPendingRemoval(clientP);
+ mService->markClientForPendingRemoval(clientQ);
// don't mark the largest client
// graphic memory
- const auto smallGraphicMemoryMarkedClient = createTestClient(onlyPid);
- const auto largeGraphicMemoryMarkedClient = createTestClient(onlyPid);
- const auto largestGraphicMemoryActiveClient = createTestClient(onlyPid);
+ const auto smallGraphicMemoryMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largeGraphicMemoryMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largestGraphicMemoryActiveClient = createTestClient(onlyPid, onlyUid);
+ ClientInfoParcel clientS{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(smallGraphicMemoryMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientT{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largeGraphicMemoryMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientU{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largestGraphicMemoryActiveClient),
+ .name = "none"};
{
std::vector<MediaResourceParcel> resources;
resources.push_back(createGraphicMemoryResource(100));
- mService->addResource(onlyPid, onlyUid, getId(smallGraphicMemoryMarkedClient),
- smallGraphicMemoryMarkedClient, resources);
+ mService->addResource(clientS, smallGraphicMemoryMarkedClient, resources);
resources.clear();
resources.push_back(createGraphicMemoryResource(200));
- mService->addResource(onlyPid, onlyUid, getId(largeGraphicMemoryMarkedClient),
- largeGraphicMemoryMarkedClient, resources);
+ mService->addResource(clientT, largeGraphicMemoryMarkedClient, resources);
resources.clear();
resources.push_back(createGraphicMemoryResource(300));
- mService->addResource(onlyPid, onlyUid, getId(largestGraphicMemoryActiveClient),
- largestGraphicMemoryActiveClient, resources);
+ mService->addResource(clientU, largestGraphicMemoryActiveClient, resources);
}
- mService->markClientForPendingRemoval(onlyPid, getId(smallGraphicMemoryMarkedClient));
- mService->markClientForPendingRemoval(onlyPid, getId(largeGraphicMemoryMarkedClient));
+ mService->markClientForPendingRemoval(clientS);
+ mService->markClientForPendingRemoval(clientT);
// don't mark the largest client
// DRM session
- const auto smallDrmSessionMarkedClient = createTestClient(onlyPid);
- const auto largeDrmSessionMarkedClient = createTestClient(onlyPid);
- const auto largestDrmSessionActiveClient = createTestClient(onlyPid);
+ const auto smallDrmSessionMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largeDrmSessionMarkedClient = createTestClient(onlyPid, onlyUid);
+ const auto largestDrmSessionActiveClient = createTestClient(onlyPid, onlyUid);
+ ClientInfoParcel clientV{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(smallDrmSessionMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientW{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largeDrmSessionMarkedClient),
+ .name = "none"};
+ ClientInfoParcel clientX{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(largestDrmSessionActiveClient),
+ .name = "none"};
{
std::vector<MediaResourceParcel> resources;
resources.push_back(createDrmSessionResource(1));
- mService->addResource(onlyPid, onlyUid, getId(smallDrmSessionMarkedClient),
- smallDrmSessionMarkedClient, resources);
+ mService->addResource(clientV, smallDrmSessionMarkedClient, resources);
resources.clear();
resources.push_back(createDrmSessionResource(2));
- mService->addResource(onlyPid, onlyUid, getId(largeDrmSessionMarkedClient),
- largeDrmSessionMarkedClient, resources);
+ mService->addResource(clientW, largeDrmSessionMarkedClient, resources);
resources.clear();
resources.push_back(createDrmSessionResource(3));
- mService->addResource(onlyPid, onlyUid, getId(largestDrmSessionActiveClient),
- largestDrmSessionActiveClient, resources);
+ mService->addResource(clientX, largestDrmSessionActiveClient, resources);
}
- mService->markClientForPendingRemoval(onlyPid, getId(smallDrmSessionMarkedClient));
- mService->markClientForPendingRemoval(onlyPid, getId(largeDrmSessionMarkedClient));
+ mService->markClientForPendingRemoval(clientV);
+ mService->markClientForPendingRemoval(clientW);
// don't mark the largest client
// battery
- const auto batteryMarkedClient = createTestClient(onlyPid);
+ const auto batteryMarkedClient = createTestClient(onlyPid, onlyUid);
+ ClientInfoParcel clientY{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(batteryMarkedClient),
+ .name = "none"};
{
std::vector<MediaResourceParcel> resources;
resources.push_back(createBatteryResource());
- mService->addResource(onlyPid, onlyUid, getId(batteryMarkedClient),
- batteryMarkedClient, resources);
+ mService->addResource(clientY, batteryMarkedClient, resources);
}
- mService->markClientForPendingRemoval(onlyPid, getId(batteryMarkedClient));
+ mService->markClientForPendingRemoval(clientY);
// CPU boost
- const auto cpuBoostMarkedClient = createTestClient(onlyPid);
+ const auto cpuBoostMarkedClient = createTestClient(onlyPid, onlyUid);
+ ClientInfoParcel clientZ{.pid = static_cast<int32_t>(onlyPid),
+ .uid = static_cast<int32_t>(onlyUid),
+ .id = getId(cpuBoostMarkedClient),
+ .name = "none"};
{
std::vector<MediaResourceParcel> resources;
resources.push_back(createCpuBoostResource());
- mService->addResource(onlyPid, onlyUid, getId(cpuBoostMarkedClient),
- cpuBoostMarkedClient, resources);
+ mService->addResource(clientZ, cpuBoostMarkedClient, resources);
}
- mService->markClientForPendingRemoval(onlyPid, getId(cpuBoostMarkedClient));
+ mService->markClientForPendingRemoval(clientZ);
// now we expect that we only reclaim resources from the biggest marked client
EXPECT_TRUE(mService->reclaimResourcesFromClientsPendingRemoval(onlyPid).isOk());
diff --git a/services/mediaresourcemanager/test/ResourceObserverService_test.cpp b/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
index 003569d..a0d728c 100644
--- a/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
+++ b/services/mediaresourcemanager/test/ResourceObserverService_test.cpp
@@ -251,17 +251,31 @@
observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
{MediaObservableType::kVideoNonSecureCodec, 1}};
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kTestPid1),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(mTestClient1),
+ .name = "none"};
+
+ ClientInfoParcel client2Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient2),
+ .name = "none"};
+
+ ClientInfoParcel client3Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient3),
+ .name = "none"};
std::vector<MediaResourceParcel> resources;
// Add secure video codec.
resources = {createSecureVideoCodecResource()};
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources);
+ mService->addResource(client1Info, mTestClient1, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
// Add non-secure video codec.
resources = {createNonSecureVideoCodecResource()};
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources);
+ mService->addResource(client2Info, mTestClient2, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
@@ -269,7 +283,7 @@
// Add secure & non-secure video codecs.
resources = {createSecureVideoCodecResource(),
createNonSecureVideoCodecResource()};
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ mService->addResource(client3Info, mTestClient3, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables3));
@@ -277,7 +291,7 @@
// Add additional audio codecs, should be ignored.
resources.push_back(createSecureAudioCodecResource());
resources.push_back(createNonSecureAudioCodecResource());
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources);
+ mService->addResource(client1Info, mTestClient1, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables2));
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables3));
@@ -303,7 +317,11 @@
observables2 = {{MediaObservableType::kVideoNonSecureCodec, 3}};
observables3 = {{MediaObservableType::kVideoSecureCodec, 2},
{MediaObservableType::kVideoNonSecureCodec, 3}};
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ ClientInfoParcel client3Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient3),
+ .name = "none"};
+ mService->addResource(client3Info, mTestClient3, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables3));
@@ -318,47 +336,61 @@
observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
{MediaObservableType::kVideoNonSecureCodec, 1}};
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kTestPid1),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(mTestClient1),
+ .name = "none"};
+
+ ClientInfoParcel client2Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient2),
+ .name = "none"};
+
+ ClientInfoParcel client3Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient3),
+ .name = "none"};
std::vector<MediaResourceParcel> resources;
// Add secure video codec to client1.
resources = {createSecureVideoCodecResource()};
- mService->addResource(kTestPid1, kTestUid1, getId(mTestClient1), mTestClient1, resources);
+ mService->addResource(client1Info, mTestClient1, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid1, kTestPid1, observables1));
// Remove secure video codec. observer 1&3 should receive updates.
- mService->removeResource(kTestPid1, getId(mTestClient1), resources);
+ mService->removeResource(client1Info, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::Idle(kTestUid1, kTestPid1, observables1));
EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid1, kTestPid1, observables1));
// Remove secure video codec again, should have no event.
- mService->removeResource(kTestPid1, getId(mTestClient1), resources);
+ mService->removeResource(client1Info, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
// Remove client1, should have no event.
- mService->removeClient(kTestPid1, getId(mTestClient1));
+ mService->removeClient(client1Info);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
// Add non-secure video codec to client2.
resources = {createNonSecureVideoCodecResource()};
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient2), mTestClient2, resources);
+ mService->addResource(client2Info, mTestClient2, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
// Remove client2, observer 2&3 should receive updates.
- mService->removeClient(kTestPid2, getId(mTestClient2));
+ mService->removeClient(client2Info);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
// Remove non-secure codec after client2 removed, should have no event.
- mService->removeResource(kTestPid2, getId(mTestClient2), resources);
+ mService->removeResource(client2Info, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
// Remove client2 again, should have no event.
- mService->removeClient(kTestPid2, getId(mTestClient2));
+ mService->removeClient(client2Info);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
@@ -368,13 +400,13 @@
createNonSecureVideoCodecResource(),
createSecureAudioCodecResource(),
createNonSecureAudioCodecResource()};
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ mService->addResource(client3Info, mTestClient3, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
EXPECT_EQ(mTestObserver2->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables3));
// Remove one audio codec, should have no event.
resources = {createSecureAudioCodecResource()};
- mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ mService->removeResource(client3Info, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver3->pop(), EventTracker::NoEvent);
@@ -382,12 +414,12 @@
// removal should be reported.
resources = {createNonSecureAudioCodecResource(),
createSecureVideoCodecResource()};
- mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ mService->removeResource(client3Info, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables1));
EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables1));
// Remove client3 entirely. Non-secure video codec removal should be reported.
- mService->removeClient(kTestPid2, getId(mTestClient3));
+ mService->removeClient(client3Info);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
@@ -410,7 +442,12 @@
createNonSecureVideoCodecResource(4),
createSecureAudioCodecResource(),
createNonSecureAudioCodecResource()};
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+
+ ClientInfoParcel client3Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient3),
+ .name = "none"};
+ mService->addResource(client3Info, mTestClient3, resources);
observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
observables2 = {{MediaObservableType::kVideoNonSecureCodec, 4}};
observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
@@ -424,7 +461,7 @@
createSecureVideoCodecResource(),
createSecureVideoCodecResource(),
createNonSecureVideoCodecResource(2)};
- mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ mService->removeResource(client3Info, resources);
observables1 = {{MediaObservableType::kVideoSecureCodec, 1}};
observables2 = {{MediaObservableType::kVideoNonSecureCodec, 2}};
observables3 = {{MediaObservableType::kVideoSecureCodec, 1},
@@ -433,7 +470,7 @@
EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables3));
// Remove client3 entirely. 2 non-secure video codecs removal should be reported.
- mService->removeClient(kTestPid2, getId(mTestClient3));
+ mService->removeClient(client3Info);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
@@ -465,13 +502,27 @@
// Add secure & non-secure video codecs.
resources = {createSecureVideoCodecResource(),
createNonSecureVideoCodecResource()};
- mService->addResource(kTestPid2, kTestUid2, getId(mTestClient3), mTestClient3, resources);
+ ClientInfoParcel client1Info{.pid = static_cast<int32_t>(kTestPid1),
+ .uid = static_cast<int32_t>(kTestUid1),
+ .id = getId(mTestClient1),
+ .name = "none"};
+
+ ClientInfoParcel client2Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient2),
+ .name = "none"};
+
+ ClientInfoParcel client3Info{.pid = static_cast<int32_t>(kTestPid2),
+ .uid = static_cast<int32_t>(kTestUid2),
+ .id = getId(mTestClient3),
+ .name = "none"};
+ mService->addResource(client3Info, mTestClient3, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables1));
EXPECT_EQ(mTestObserver2->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Busy(kTestUid2, kTestPid2, observables2));
// Remove secure & non-secure video codecs.
- mService->removeResource(kTestPid2, getId(mTestClient3), resources);
+ mService->removeResource(client3Info, resources);
EXPECT_EQ(mTestObserver1->pop(), EventTracker::NoEvent);
EXPECT_EQ(mTestObserver2->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables2));
EXPECT_EQ(mTestObserver3->pop(), EventTracker::Idle(kTestUid2, kTestPid2, observables1));
diff --git a/services/tuner/hidl/TunerHidlDvr.cpp b/services/tuner/hidl/TunerHidlDvr.cpp
index 1a619d5..3ea1eb1 100644
--- a/services/tuner/hidl/TunerHidlDvr.cpp
+++ b/services/tuner/hidl/TunerHidlDvr.cpp
@@ -72,7 +72,7 @@
AidlMQDesc aidlMQDesc;
unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(dvrMQDesc, &aidlMQDesc);
- *_aidl_return = move(aidlMQDesc);
+ *_aidl_return = std::move(aidlMQDesc);
return ::ndk::ScopedAStatus::ok();
}
diff --git a/services/tuner/hidl/TunerHidlFilter.cpp b/services/tuner/hidl/TunerHidlFilter.cpp
index fe74a5c..c82732b 100644
--- a/services/tuner/hidl/TunerHidlFilter.cpp
+++ b/services/tuner/hidl/TunerHidlFilter.cpp
@@ -139,7 +139,7 @@
AidlMQDesc aidlMQDesc;
unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(filterMQDesc, &aidlMQDesc);
- *_aidl_return = move(aidlMQDesc);
+ *_aidl_return = std::move(aidlMQDesc);
return ::ndk::ScopedAStatus::ok();
}
@@ -1084,8 +1084,8 @@
}
DemuxFilterEvent filterEvent;
- filterEvent.set<DemuxFilterEvent::media>(move(media));
- res.push_back(move(filterEvent));
+ filterEvent.set<DemuxFilterEvent::media>(std::move(media));
+ res.push_back(std::move(filterEvent));
}
}
@@ -1101,8 +1101,8 @@
section.dataLength = static_cast<int64_t>(sectionEvent.dataLength);
DemuxFilterEvent filterEvent;
- filterEvent.set<DemuxFilterEvent::section>(move(section));
- res.push_back(move(filterEvent));
+ filterEvent.set<DemuxFilterEvent::section>(std::move(section));
+ res.push_back(std::move(filterEvent));
}
}
@@ -1117,8 +1117,8 @@
pes.mpuSequenceNumber = static_cast<int32_t>(pesEvent.mpuSequenceNumber);
DemuxFilterEvent filterEvent;
- filterEvent.set<DemuxFilterEvent::pes>(move(pes));
- res.push_back(move(filterEvent));
+ filterEvent.set<DemuxFilterEvent::pes>(std::move(pes));
+ res.push_back(std::move(filterEvent));
}
}
@@ -1167,8 +1167,8 @@
}
DemuxFilterEvent filterEvent;
- filterEvent.set<DemuxFilterEvent::tsRecord>(move(tsRecord));
- res.push_back(move(filterEvent));
+ filterEvent.set<DemuxFilterEvent::tsRecord>(std::move(tsRecord));
+ res.push_back(std::move(filterEvent));
}
}
@@ -1194,8 +1194,8 @@
}
DemuxFilterEvent filterEvent;
- filterEvent.set<DemuxFilterEvent::mmtpRecord>(move(mmtpRecord));
- res.push_back(move(filterEvent));
+ filterEvent.set<DemuxFilterEvent::mmtpRecord>(std::move(mmtpRecord));
+ res.push_back(std::move(filterEvent));
}
}
@@ -1213,8 +1213,8 @@
download.dataLength = static_cast<int32_t>(downloadEvent.dataLength);
DemuxFilterEvent filterEvent;
- filterEvent.set<DemuxFilterEvent::download>(move(download));
- res.push_back(move(filterEvent));
+ filterEvent.set<DemuxFilterEvent::download>(std::move(download));
+ res.push_back(std::move(filterEvent));
}
}
@@ -1227,8 +1227,8 @@
ipPayload.dataLength = static_cast<int32_t>(ipPayloadEvent.dataLength);
DemuxFilterEvent filterEvent;
- filterEvent.set<DemuxFilterEvent::ipPayload>(move(ipPayload));
- res.push_back(move(filterEvent));
+ filterEvent.set<DemuxFilterEvent::ipPayload>(std::move(ipPayload));
+ res.push_back(std::move(filterEvent));
}
}
@@ -1245,8 +1245,8 @@
copy(descrData.begin(), descrData.end(), temi.descrData.begin());
DemuxFilterEvent filterEvent;
- filterEvent.set<DemuxFilterEvent::temi>(move(temi));
- res.push_back(move(filterEvent));
+ filterEvent.set<DemuxFilterEvent::temi>(std::move(temi));
+ res.push_back(std::move(filterEvent));
}
}
@@ -1268,15 +1268,15 @@
}
DemuxFilterEvent filterEvent;
- filterEvent.set<DemuxFilterEvent::monitorEvent>(move(monitor));
- res.push_back(move(filterEvent));
+ filterEvent.set<DemuxFilterEvent::monitorEvent>(std::move(monitor));
+ res.push_back(std::move(filterEvent));
}
void TunerHidlFilter::FilterCallback::getRestartEvent(
const vector<HidlDemuxFilterEventExt::Event>& eventsExt, vector<DemuxFilterEvent>& res) {
DemuxFilterEvent filterEvent;
filterEvent.set<DemuxFilterEvent::startId>(static_cast<int32_t>(eventsExt[0].startId()));
- res.push_back(move(filterEvent));
+ res.push_back(std::move(filterEvent));
}
} // namespace tuner