Merge "Implementing CryptoAsync"
diff --git a/apex/ld.config.txt b/apex/ld.config.txt
index 713f0b7..4dc5fb1 100644
--- a/apex/ld.config.txt
+++ b/apex/ld.config.txt
@@ -33,7 +33,7 @@
# TODO: replace the following when apex has a way to auto-generate this list
# namespace.default.link.platform.shared_libs = %LLNDK_LIBRARIES%
# namespace.default.link.platform.shared_libs += %SANITIZER_RUNTIME_LIBRARIES%
-namespace.default.link.platform.shared_libs = libEGL.so:libGLESv1_CM.so:libGLESv2.so:libGLESv3.so:libRS.so:libandroid_net.so:libc.so:libcgrouprc.so:libclang_rt.asan-aarch64-android.so:libclang_rt.asan-arm-android.so:libclang_rt.hwasan-aarch64-android.so:libclang_rt.asan-i686-android.so:libclang_rt.asan-x86_64-android.so:libdl.so:libft2.so:liblog.so:libm.so:libmediandk.so:libnativewindow.so:libneuralnetworks.so:libsync.so:libvndksupport.so:libdl_android.so:libvulkan.so:libbinder_ndk.so
+namespace.default.link.platform.shared_libs = libEGL.so:libGLESv1_CM.so:libGLESv2.so:libGLESv3.so:libandroid_net.so:libc.so:libcgrouprc.so:libclang_rt.asan-aarch64-android.so:libclang_rt.asan-arm-android.so:libclang_rt.hwasan-aarch64-android.so:libclang_rt.asan-i686-android.so:libclang_rt.asan-x86_64-android.so:libdl.so:libft2.so:liblog.so:libm.so:libmediandk.so:libnativewindow.so:libneuralnetworks.so:libsync.so:libvndksupport.so:libdl_android.so:libvulkan.so:libbinder_ndk.so
###############################################################################
# "platform" namespace
@@ -138,7 +138,7 @@
# TODO: replace the following when apex has a way to auto-generate this list
# namespace.sphal.link.platform.shared_libs = %LLNDK_LIBRARIES%
# namespace.sphal.link.platform.shared_libs += %SANITIZER_RUNTIME_LIBRARIES%
-namespace.sphal.link.platform.shared_libs = libEGL.so:libGLESv1_CM.so:libGLESv2.so:libGLESv3.so:libRS.so:libandroid_net.so:libc.so:libcgrouprc.so:libclang_rt.asan-aarch64-android.so:libclang_rt.asan-arm-android.so:libclang_rt.hwasan-aarch64-android.so:libclang_rt.asan-i686-android.so:libclang_rt.asan-x86_64-android.so:libdl.so:libft2.so:liblog.so:libm.so:libmediandk.so:libnativewindow.so:libneuralnetworks.so:libsync.so:libvndksupport.so:libvulkan.so:libbinder_ndk.so
+namespace.sphal.link.platform.shared_libs = libEGL.so:libGLESv1_CM.so:libGLESv2.so:libGLESv3.so:libandroid_net.so:libc.so:libcgrouprc.so:libclang_rt.asan-aarch64-android.so:libclang_rt.asan-arm-android.so:libclang_rt.hwasan-aarch64-android.so:libclang_rt.asan-i686-android.so:libclang_rt.asan-x86_64-android.so:libdl.so:libft2.so:liblog.so:libm.so:libmediandk.so:libnativewindow.so:libneuralnetworks.so:libsync.so:libvndksupport.so:libvulkan.so:libbinder_ndk.so
# Add a link for libz.so which is llndk on devices where VNDK is not enforced.
namespace.sphal.link.platform.shared_libs += libz.so
diff --git a/camera/cameraserver/manifest_android.frameworks.cameraservice.service.xml b/camera/cameraserver/manifest_android.frameworks.cameraservice.service.xml
index e892471..f7e455f 100644
--- a/camera/cameraserver/manifest_android.frameworks.cameraservice.service.xml
+++ b/camera/cameraserver/manifest_android.frameworks.cameraservice.service.xml
@@ -1,5 +1,5 @@
<manifest version="1.0" type="framework">
- <hal>
+ <hal format="hidl" max-level="7">
<name>android.frameworks.cameraservice.service</name>
<transport>hwbinder</transport>
<version>2.2</version>
diff --git a/camera/ndk/impl/ACameraManager.cpp b/camera/ndk/impl/ACameraManager.cpp
index 9dc262f..837b5be 100644
--- a/camera/ndk/impl/ACameraManager.cpp
+++ b/camera/ndk/impl/ACameraManager.cpp
@@ -695,7 +695,7 @@
CameraMetadata rawMetadata;
int targetSdkVersion = android_get_application_target_sdk_version();
binder::Status serviceRet = cs->getCameraCharacteristics(String16(cameraIdStr),
- targetSdkVersion, /*overrideToPortrait*/true, &rawMetadata);
+ targetSdkVersion, /*overrideToPortrait*/false, &rawMetadata);
if (!serviceRet.isOk()) {
switch(serviceRet.serviceSpecificErrorCode()) {
case hardware::ICameraService::ERROR_DISCONNECTED:
@@ -747,7 +747,7 @@
binder::Status serviceRet = cs->connectDevice(
callbacks, String16(cameraId), String16(""), {},
hardware::ICameraService::USE_CALLING_UID, /*oomScoreOffset*/0,
- targetSdkVersion, /*overrideToPortrait*/true, /*out*/&deviceRemote);
+ targetSdkVersion, /*overrideToPortrait*/false, /*out*/&deviceRemote);
if (!serviceRet.isOk()) {
ALOGE("%s: connect camera device failed: %s", __FUNCTION__, serviceRet.toString8().string());
diff --git a/camera/ndk/include/camera/NdkCameraDevice.h b/camera/ndk/include/camera/NdkCameraDevice.h
index 7be4bd3..239cb31 100644
--- a/camera/ndk/include/camera/NdkCameraDevice.h
+++ b/camera/ndk/include/camera/NdkCameraDevice.h
@@ -470,18 +470,6 @@
* <a href="http://developer.android.com/reference/android/media/CamcorderProfile.html">
* CamcorderProfiles</a>.</li>
*
- * <li>For efficient YUV processing with <a href=
- * "http://developer.android.com/reference/android/renderscript/package-summary.html">
- * RenderScript</a>:
- * Create a RenderScript
- * <a href="http://developer.android.com/reference/android/renderscript/Allocation.html">
- * Allocation</a> with a supported YUV
- * type, the IO_INPUT flag, and one of the YUV output sizes returned by
- * {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS},
- * Then obtain the Surface with
- * <a href="http://developer.android.com/reference/android/renderscript/Allocation.html#getSurface()">
- * Allocation#getSurface}</a>.</li>
- *
* <li>For access to RAW, uncompressed YUV, or compressed JPEG data in the application: Create an
* {@link AImageReader} object using the {@link AImageReader_new} method with one of the supported
* output formats given by {@link ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS}. Then obtain a
diff --git a/cmds/stagefright/Android.bp b/cmds/stagefright/Android.bp
index e1fe07e..445541e 100644
--- a/cmds/stagefright/Android.bp
+++ b/cmds/stagefright/Android.bp
@@ -211,46 +211,6 @@
}
cc_binary {
- name: "mediafilter",
-
- srcs: [
- "filters/argbtorgba.rscript",
- "filters/nightvision.rscript",
- "filters/saturation.rscript",
- "mediafilter.cpp",
- ],
-
- header_libs: [
- "libmediadrm_headers",
- "libmediametrics_headers",
- "libstagefright_headers",
- "rs-headers",
- ],
-
- shared_libs: [
- "libstagefright",
- "liblog",
- "libutils",
- "libbinder",
- "libstagefright_foundation",
- "libmedia_omx",
- "libui",
- "libgui",
- "libRScpp",
- ],
-
- static_libs: ["libstagefright_mediafilter"],
-
- cflags: [
- "-Wno-multichar",
- ],
-
- sanitize: {
- cfi: true,
- },
-}
-
-cc_binary {
name: "muxer",
srcs: ["muxer.cpp"],
diff --git a/cmds/stagefright/SineSource.cpp b/cmds/stagefright/SineSource.cpp
index 0ecc16c..0656030 100644
--- a/cmds/stagefright/SineSource.cpp
+++ b/cmds/stagefright/SineSource.cpp
@@ -63,12 +63,15 @@
MediaBufferBase **out, const ReadOptions * /* options */) {
*out = NULL;
- MediaBufferBase *buffer;
+ MediaBufferBase *buffer = nullptr;
status_t err = mGroup->acquire_buffer(&buffer);
if (err != OK) {
return err;
}
+ if (buffer == nullptr) {
+ return AMEDIA_ERROR_UNKNOWN;
+ }
size_t frameSize = mNumChannels * sizeof(int16_t);
size_t numFramesPerBuffer = buffer->size() / frameSize;
diff --git a/cmds/stagefright/filters/argbtorgba.rscript b/cmds/stagefright/filters/argbtorgba.rscript
deleted file mode 100644
index 229ff8c..0000000
--- a/cmds/stagefright/filters/argbtorgba.rscript
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma version(1)
-#pragma rs java_package_name(com.android.rs.cppbasic)
-#pragma rs_fp_relaxed
-
-void root(const uchar4 *v_in, uchar4 *v_out) {
- v_out->x = v_in->y;
- v_out->y = v_in->z;
- v_out->z = v_in->w;
- v_out->w = v_in->x;
-}
\ No newline at end of file
diff --git a/cmds/stagefright/filters/nightvision.rscript b/cmds/stagefright/filters/nightvision.rscript
deleted file mode 100644
index f61413c..0000000
--- a/cmds/stagefright/filters/nightvision.rscript
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma version(1)
-#pragma rs java_package_name(com.android.rs.cppbasic)
-#pragma rs_fp_relaxed
-
-const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
-const static float3 gNightVisionMult = {0.5f, 1.f, 0.5f};
-
-// calculates luminance of pixel, then biases color balance toward green
-void root(const uchar4 *v_in, uchar4 *v_out) {
- v_out->x = v_in->x; // don't modify A
-
- // get RGB, scale 0-255 uchar to 0-1.0 float
- float3 rgb = {v_in->y * 0.003921569f, v_in->z * 0.003921569f,
- v_in->w * 0.003921569f};
-
- // apply filter
- float3 result = dot(rgb, gMonoMult) * gNightVisionMult;
-
- v_out->y = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
- v_out->z = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
- v_out->w = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
-}
diff --git a/cmds/stagefright/filters/saturation.rscript b/cmds/stagefright/filters/saturation.rscript
deleted file mode 100644
index 1de9dd8..0000000
--- a/cmds/stagefright/filters/saturation.rscript
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma version(1)
-#pragma rs java_package_name(com.android.rs.cppbasic)
-#pragma rs_fp_relaxed
-
-const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
-
-// global variables (parameters accessible to application code)
-float gSaturation = 1.0f;
-
-void root(const uchar4 *v_in, uchar4 *v_out) {
- v_out->x = v_in->x; // don't modify A
-
- // get RGB, scale 0-255 uchar to 0-1.0 float
- float3 rgb = {v_in->y * 0.003921569f, v_in->z * 0.003921569f,
- v_in->w * 0.003921569f};
-
- // apply saturation filter
- float3 result = dot(rgb, gMonoMult);
- result = mix(result, rgb, gSaturation);
-
- v_out->y = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
- v_out->z = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
- v_out->w = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
-}
diff --git a/cmds/stagefright/mediafilter.cpp b/cmds/stagefright/mediafilter.cpp
deleted file mode 100644
index f042d5e..0000000
--- a/cmds/stagefright/mediafilter.cpp
+++ /dev/null
@@ -1,792 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "mediafilterTest"
-
-#include <inttypes.h>
-
-#include <binder/ProcessState.h>
-#include <filters/ColorConvert.h>
-#include <gui/ISurfaceComposer.h>
-#include <gui/SurfaceComposerClient.h>
-#include <gui/Surface.h>
-#include <media/IMediaHTTPService.h>
-#include <media/MediaCodecBuffer.h>
-#include <mediadrm/ICrypto.h>
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaCodec.h>
-#include <media/stagefright/NuMediaExtractor.h>
-#include <media/stagefright/RenderScriptWrapper.h>
-#include <OMX_IVCommon.h>
-#include <ui/DisplayMode.h>
-
-#include "RenderScript.h"
-#include "ScriptC_argbtorgba.h"
-#include "ScriptC_nightvision.h"
-#include "ScriptC_saturation.h"
-
-// test parameters
-static const bool kTestFlush = true; // Note: true will drop 1 out of
-static const int kFlushAfterFrames = 25; // kFlushAfterFrames output frames
-static const int64_t kTimeout = 500ll;
-
-// built-in filter parameters
-static const int32_t kInvert = false; // ZeroFilter param
-static const float kBlurRadius = 15.0f; // IntrinsicBlurFilter param
-static const float kSaturation = 0.0f; // SaturationFilter param
-
-static void usage(const char *me) {
- fprintf(stderr, "usage: [flags] %s\n"
- "\t[-b] use IntrinsicBlurFilter\n"
- "\t[-c] use argb to rgba conversion RSFilter\n"
- "\t[-n] use night vision RSFilter\n"
- "\t[-r] use saturation RSFilter\n"
- "\t[-s] use SaturationFilter\n"
- "\t[-z] use ZeroFilter (copy filter)\n"
- "\t[-R] render output to surface (enables -S)\n"
- "\t[-S] allocate buffers from a surface\n"
- "\t[-T] use render timestamps (enables -R)\n",
- me);
- exit(1);
-}
-
-namespace android {
-
-struct SaturationRSFilter : RenderScriptWrapper::RSFilterCallback {
- void init(const RSC::sp<RSC::RS> &context) {
- mScript = new ScriptC_saturation(context);
- mScript->set_gSaturation(3.f);
- }
-
- virtual status_t processBuffers(
- RSC::Allocation *inBuffer, RSC::Allocation *outBuffer) {
- mScript->forEach_root(inBuffer, outBuffer);
-
- return OK;
- }
-
- status_t handleSetParameters(const sp<AMessage> &msg __unused) {
- return OK;
- }
-
-private:
- RSC::sp<ScriptC_saturation> mScript;
-};
-
-struct NightVisionRSFilter : RenderScriptWrapper::RSFilterCallback {
- void init(const RSC::sp<RSC::RS> &context) {
- mScript = new ScriptC_nightvision(context);
- }
-
- virtual status_t processBuffers(
- RSC::Allocation *inBuffer, RSC::Allocation *outBuffer) {
- mScript->forEach_root(inBuffer, outBuffer);
-
- return OK;
- }
-
- status_t handleSetParameters(const sp<AMessage> &msg __unused) {
- return OK;
- }
-
-private:
- RSC::sp<ScriptC_nightvision> mScript;
-};
-
-struct ARGBToRGBARSFilter : RenderScriptWrapper::RSFilterCallback {
- void init(const RSC::sp<RSC::RS> &context) {
- mScript = new ScriptC_argbtorgba(context);
- }
-
- virtual status_t processBuffers(
- RSC::Allocation *inBuffer, RSC::Allocation *outBuffer) {
- mScript->forEach_root(inBuffer, outBuffer);
-
- return OK;
- }
-
- status_t handleSetParameters(const sp<AMessage> &msg __unused) {
- return OK;
- }
-
-private:
- RSC::sp<ScriptC_argbtorgba> mScript;
-};
-
-struct CodecState {
- sp<MediaCodec> mCodec;
- Vector<sp<MediaCodecBuffer> > mInBuffers;
- Vector<sp<MediaCodecBuffer> > mOutBuffers;
- bool mSignalledInputEOS;
- bool mSawOutputEOS;
- int64_t mNumBuffersDecoded;
-};
-
-struct DecodedFrame {
- size_t index;
- size_t offset;
- size_t size;
- int64_t presentationTimeUs;
- uint32_t flags;
-};
-
-enum FilterType {
- FILTERTYPE_ZERO,
- FILTERTYPE_INTRINSIC_BLUR,
- FILTERTYPE_SATURATION,
- FILTERTYPE_RS_SATURATION,
- FILTERTYPE_RS_NIGHT_VISION,
- FILTERTYPE_RS_ARGB_TO_RGBA,
-};
-
-size_t inputFramesSinceFlush = 0;
-void tryCopyDecodedBuffer(
- List<DecodedFrame> *decodedFrameIndices,
- CodecState *filterState,
- CodecState *vidState) {
- if (decodedFrameIndices->empty()) {
- return;
- }
-
- size_t filterIndex;
- status_t err = filterState->mCodec->dequeueInputBuffer(
- &filterIndex, kTimeout);
- if (err != OK) {
- return;
- }
-
- ++inputFramesSinceFlush;
-
- DecodedFrame frame = *decodedFrameIndices->begin();
-
- // only consume a buffer if we are not going to flush, since we expect
- // the dequeue -> flush -> queue operation to cause an error and
- // not produce an output frame
- if (!kTestFlush || inputFramesSinceFlush < kFlushAfterFrames) {
- decodedFrameIndices->erase(decodedFrameIndices->begin());
- }
- size_t outIndex = frame.index;
-
- const sp<MediaCodecBuffer> &srcBuffer =
- vidState->mOutBuffers.itemAt(outIndex);
- const sp<MediaCodecBuffer> &destBuffer =
- filterState->mInBuffers.itemAt(filterIndex);
-
- sp<AMessage> srcFormat, destFormat;
- vidState->mCodec->getOutputFormat(&srcFormat);
- filterState->mCodec->getInputFormat(&destFormat);
-
- int32_t srcWidth, srcHeight, srcStride, srcSliceHeight;
- int32_t srcColorFormat, destColorFormat;
- int32_t destWidth, destHeight, destStride, destSliceHeight;
- CHECK(srcFormat->findInt32("stride", &srcStride)
- && srcFormat->findInt32("slice-height", &srcSliceHeight)
- && srcFormat->findInt32("width", &srcWidth)
- && srcFormat->findInt32("height", & srcHeight)
- && srcFormat->findInt32("color-format", &srcColorFormat));
- CHECK(destFormat->findInt32("stride", &destStride)
- && destFormat->findInt32("slice-height", &destSliceHeight)
- && destFormat->findInt32("width", &destWidth)
- && destFormat->findInt32("height", & destHeight)
- && destFormat->findInt32("color-format", &destColorFormat));
-
- CHECK(srcWidth <= destStride && srcHeight <= destSliceHeight);
-
- convertYUV420spToARGB(
- srcBuffer->data(),
- srcBuffer->data() + srcStride * srcSliceHeight,
- srcWidth,
- srcHeight,
- destBuffer->data());
-
- // copy timestamp
- int64_t timeUs;
- CHECK(srcBuffer->meta()->findInt64("timeUs", &timeUs));
- destBuffer->meta()->setInt64("timeUs", timeUs);
-
- if (kTestFlush && inputFramesSinceFlush >= kFlushAfterFrames) {
- inputFramesSinceFlush = 0;
-
- // check that queueing a buffer that was dequeued before flush
- // fails with expected error EACCES
- filterState->mCodec->flush();
-
- err = filterState->mCodec->queueInputBuffer(
- filterIndex, 0 /* offset */, destBuffer->size(),
- timeUs, frame.flags);
-
- if (err == OK) {
- ALOGE("FAIL: queue after flush returned OK");
- } else if (err != -EACCES) {
- ALOGE("queueInputBuffer after flush returned %d, "
- "expected -EACCES (-13)", err);
- }
- } else {
- err = filterState->mCodec->queueInputBuffer(
- filterIndex, 0 /* offset */, destBuffer->size(),
- timeUs, frame.flags);
- CHECK(err == OK);
-
- err = vidState->mCodec->releaseOutputBuffer(outIndex);
- CHECK(err == OK);
- }
-}
-
-size_t outputFramesSinceFlush = 0;
-void tryDrainOutputBuffer(
- CodecState *filterState,
- const sp<Surface> &surface, bool renderSurface,
- bool useTimestamp, int64_t *startTimeRender) {
- size_t index;
- size_t offset;
- size_t size;
- int64_t presentationTimeUs;
- uint32_t flags;
- status_t err = filterState->mCodec->dequeueOutputBuffer(
- &index, &offset, &size, &presentationTimeUs, &flags,
- kTimeout);
-
- if (err != OK) {
- return;
- }
-
- ++outputFramesSinceFlush;
-
- if (kTestFlush && outputFramesSinceFlush >= kFlushAfterFrames) {
- filterState->mCodec->flush();
- }
-
- if (surface == NULL || !renderSurface) {
- err = filterState->mCodec->releaseOutputBuffer(index);
- } else if (useTimestamp) {
- if (*startTimeRender == -1) {
- // begin rendering 2 vsyncs after first decode
- *startTimeRender = systemTime(SYSTEM_TIME_MONOTONIC)
- + 33000000 - (presentationTimeUs * 1000);
- }
- presentationTimeUs =
- (presentationTimeUs * 1000) + *startTimeRender;
- err = filterState->mCodec->renderOutputBufferAndRelease(
- index, presentationTimeUs);
- } else {
- err = filterState->mCodec->renderOutputBufferAndRelease(index);
- }
-
- if (kTestFlush && outputFramesSinceFlush >= kFlushAfterFrames) {
- outputFramesSinceFlush = 0;
-
- // releasing the buffer dequeued before flush should cause an error
- // if so, the frame will also be skipped in output stream
- if (err == OK) {
- ALOGE("FAIL: release after flush returned OK");
- } else if (err != -EACCES) {
- ALOGE("releaseOutputBuffer after flush returned %d, "
- "expected -EACCES (-13)", err);
- }
- } else {
- CHECK(err == OK);
- }
-
- if (flags & MediaCodec::BUFFER_FLAG_EOS) {
- ALOGV("reached EOS on output.");
- filterState->mSawOutputEOS = true;
- }
-}
-
-static int decode(
- const sp<android::ALooper> &looper,
- const char *path,
- const sp<Surface> &surface,
- bool renderSurface,
- bool useTimestamp,
- FilterType filterType) {
-
- static int64_t kTimeout = 500ll;
-
- sp<NuMediaExtractor> extractor = new NuMediaExtractor(NuMediaExtractor::EntryPoint::OTHER);
-
- if (extractor->setDataSource(NULL /* httpService */, path) != OK) {
- fprintf(stderr, "unable to instantiate extractor.\n");
- return 1;
- }
-
- KeyedVector<size_t, CodecState> stateByTrack;
-
- CodecState *vidState = NULL;
- for (size_t i = 0; i < extractor->countTracks(); ++i) {
- sp<AMessage> format;
- status_t err = extractor->getTrackFormat(i, &format);
- CHECK(err == OK);
-
- AString mime;
- CHECK(format->findString("mime", &mime));
- bool isVideo = !strncasecmp(mime.c_str(), "video/", 6);
- if (!isVideo) {
- continue;
- }
-
- ALOGV("selecting track %zu", i);
-
- err = extractor->selectTrack(i);
- CHECK(err == OK);
-
- CodecState *state =
- &stateByTrack.editValueAt(stateByTrack.add(i, CodecState()));
-
- vidState = state;
-
- state->mNumBuffersDecoded = 0;
-
- state->mCodec = MediaCodec::CreateByType(
- looper, mime.c_str(), false /* encoder */);
-
- CHECK(state->mCodec != NULL);
-
- err = state->mCodec->configure(
- format, NULL /* surface */, NULL /* crypto */, 0 /* flags */);
-
- CHECK(err == OK);
-
- state->mSignalledInputEOS = false;
- state->mSawOutputEOS = false;
-
- break;
- }
- CHECK(!stateByTrack.isEmpty());
- CHECK(vidState != NULL);
- sp<AMessage> vidFormat;
- vidState->mCodec->getOutputFormat(&vidFormat);
-
- // set filter to use ARGB8888
- vidFormat->setInt32("color-format", OMX_COLOR_Format32bitARGB8888);
- // set app cache directory path
- vidFormat->setString("cacheDir", "/system/bin");
-
- // create RenderScript context for RSFilters
- RSC::sp<RSC::RS> context = new RSC::RS();
- context->init("/system/bin");
-
- sp<RenderScriptWrapper::RSFilterCallback> rsFilter;
-
- // create renderscript wrapper for RSFilters
- sp<RenderScriptWrapper> rsWrapper = new RenderScriptWrapper;
- rsWrapper->mContext = context.get();
-
- CodecState *filterState = new CodecState();
- filterState->mNumBuffersDecoded = 0;
-
- sp<AMessage> params = new AMessage();
-
- switch (filterType) {
- case FILTERTYPE_ZERO:
- {
- filterState->mCodec = MediaCodec::CreateByComponentName(
- looper, "android.filter.zerofilter");
- params->setInt32("invert", kInvert);
- break;
- }
- case FILTERTYPE_INTRINSIC_BLUR:
- {
- filterState->mCodec = MediaCodec::CreateByComponentName(
- looper, "android.filter.intrinsicblur");
- params->setFloat("blur-radius", kBlurRadius);
- break;
- }
- case FILTERTYPE_SATURATION:
- {
- filterState->mCodec = MediaCodec::CreateByComponentName(
- looper, "android.filter.saturation");
- params->setFloat("saturation", kSaturation);
- break;
- }
- case FILTERTYPE_RS_SATURATION:
- {
- SaturationRSFilter *satFilter = new SaturationRSFilter;
- satFilter->init(context);
- rsFilter = satFilter;
- rsWrapper->mCallback = rsFilter;
- vidFormat->setObject("rs-wrapper", rsWrapper);
-
- filterState->mCodec = MediaCodec::CreateByComponentName(
- looper, "android.filter.RenderScript");
- break;
- }
- case FILTERTYPE_RS_NIGHT_VISION:
- {
- NightVisionRSFilter *nightVisionFilter = new NightVisionRSFilter;
- nightVisionFilter->init(context);
- rsFilter = nightVisionFilter;
- rsWrapper->mCallback = rsFilter;
- vidFormat->setObject("rs-wrapper", rsWrapper);
-
- filterState->mCodec = MediaCodec::CreateByComponentName(
- looper, "android.filter.RenderScript");
- break;
- }
- case FILTERTYPE_RS_ARGB_TO_RGBA:
- {
- ARGBToRGBARSFilter *argbToRgbaFilter = new ARGBToRGBARSFilter;
- argbToRgbaFilter->init(context);
- rsFilter = argbToRgbaFilter;
- rsWrapper->mCallback = rsFilter;
- vidFormat->setObject("rs-wrapper", rsWrapper);
-
- filterState->mCodec = MediaCodec::CreateByComponentName(
- looper, "android.filter.RenderScript");
- break;
- }
- default:
- {
- LOG_ALWAYS_FATAL("mediacodec.cpp error: unrecognized FilterType");
- break;
- }
- }
- CHECK(filterState->mCodec != NULL);
-
- status_t err = filterState->mCodec->configure(
- vidFormat /* format */, surface, NULL /* crypto */, 0 /* flags */);
- CHECK(err == OK);
-
- filterState->mSignalledInputEOS = false;
- filterState->mSawOutputEOS = false;
-
- int64_t startTimeUs = android::ALooper::GetNowUs();
- int64_t startTimeRender = -1;
-
- for (size_t i = 0; i < stateByTrack.size(); ++i) {
- CodecState *state = &stateByTrack.editValueAt(i);
-
- sp<MediaCodec> codec = state->mCodec;
-
- CHECK_EQ((status_t)OK, codec->start());
-
- CHECK_EQ((status_t)OK, codec->getInputBuffers(&state->mInBuffers));
- CHECK_EQ((status_t)OK, codec->getOutputBuffers(&state->mOutBuffers));
-
- ALOGV("got %zu input and %zu output buffers",
- state->mInBuffers.size(), state->mOutBuffers.size());
- }
-
- CHECK_EQ((status_t)OK, filterState->mCodec->setParameters(params));
-
- if (kTestFlush) {
- status_t flushErr = filterState->mCodec->flush();
- if (flushErr == OK) {
- ALOGE("FAIL: Flush before start returned OK");
- } else {
- ALOGV("Flush before start returned status %d, usually ENOSYS (-38)",
- flushErr);
- }
- }
-
- CHECK_EQ((status_t)OK, filterState->mCodec->start());
- CHECK_EQ((status_t)OK, filterState->mCodec->getInputBuffers(
- &filterState->mInBuffers));
- CHECK_EQ((status_t)OK, filterState->mCodec->getOutputBuffers(
- &filterState->mOutBuffers));
-
- if (kTestFlush) {
- status_t flushErr = filterState->mCodec->flush();
- if (flushErr != OK) {
- ALOGE("FAIL: Flush after start returned %d, expect OK (0)",
- flushErr);
- } else {
- ALOGV("Flush immediately after start OK");
- }
- }
-
- List<DecodedFrame> decodedFrameIndices;
-
- // loop until decoder reaches EOS
- bool sawInputEOS = false;
- bool sawOutputEOSOnAllTracks = false;
- while (!sawOutputEOSOnAllTracks) {
- if (!sawInputEOS) {
- size_t trackIndex;
- status_t err = extractor->getSampleTrackIndex(&trackIndex);
-
- if (err != OK) {
- ALOGV("saw input eos");
- sawInputEOS = true;
- } else {
- CodecState *state = &stateByTrack.editValueFor(trackIndex);
-
- size_t index;
- err = state->mCodec->dequeueInputBuffer(&index, kTimeout);
-
- if (err == OK) {
- ALOGV("filling input buffer %zu", index);
-
- const sp<MediaCodecBuffer> &buffer = state->mInBuffers.itemAt(index);
- sp<ABuffer> abuffer = new ABuffer(buffer->base(), buffer->capacity());
-
- err = extractor->readSampleData(abuffer);
- CHECK(err == OK);
- buffer->setRange(abuffer->offset(), abuffer->size());
-
- int64_t timeUs;
- err = extractor->getSampleTime(&timeUs);
- CHECK(err == OK);
-
- uint32_t bufferFlags = 0;
-
- err = state->mCodec->queueInputBuffer(
- index, 0 /* offset */, buffer->size(),
- timeUs, bufferFlags);
-
- CHECK(err == OK);
-
- extractor->advance();
- } else {
- CHECK_EQ(err, -EAGAIN);
- }
- }
- } else {
- for (size_t i = 0; i < stateByTrack.size(); ++i) {
- CodecState *state = &stateByTrack.editValueAt(i);
-
- if (!state->mSignalledInputEOS) {
- size_t index;
- status_t err =
- state->mCodec->dequeueInputBuffer(&index, kTimeout);
-
- if (err == OK) {
- ALOGV("signalling input EOS on track %zu", i);
-
- err = state->mCodec->queueInputBuffer(
- index, 0 /* offset */, 0 /* size */,
- 0ll /* timeUs */, MediaCodec::BUFFER_FLAG_EOS);
-
- CHECK(err == OK);
-
- state->mSignalledInputEOS = true;
- } else {
- CHECK_EQ(err, -EAGAIN);
- }
- }
- }
- }
-
- sawOutputEOSOnAllTracks = true;
- for (size_t i = 0; i < stateByTrack.size(); ++i) {
- CodecState *state = &stateByTrack.editValueAt(i);
-
- if (state->mSawOutputEOS) {
- continue;
- } else {
- sawOutputEOSOnAllTracks = false;
- }
-
- DecodedFrame frame;
- status_t err = state->mCodec->dequeueOutputBuffer(
- &frame.index, &frame.offset, &frame.size,
- &frame.presentationTimeUs, &frame.flags, kTimeout);
-
- if (err == OK) {
- ALOGV("draining decoded buffer %zu, time = %lld us",
- frame.index, (long long)frame.presentationTimeUs);
-
- ++(state->mNumBuffersDecoded);
-
- decodedFrameIndices.push_back(frame);
-
- if (frame.flags & MediaCodec::BUFFER_FLAG_EOS) {
- ALOGV("reached EOS on decoder output.");
- state->mSawOutputEOS = true;
- }
-
- } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
- ALOGV("INFO_OUTPUT_BUFFERS_CHANGED");
- CHECK_EQ((status_t)OK, state->mCodec->getOutputBuffers(
- &state->mOutBuffers));
-
- ALOGV("got %zu output buffers", state->mOutBuffers.size());
- } else if (err == INFO_FORMAT_CHANGED) {
- sp<AMessage> format;
- CHECK_EQ((status_t)OK, state->mCodec->getOutputFormat(&format));
-
- ALOGV("INFO_FORMAT_CHANGED: %s",
- format->debugString().c_str());
- } else {
- CHECK_EQ(err, -EAGAIN);
- }
-
- tryCopyDecodedBuffer(&decodedFrameIndices, filterState, vidState);
-
- tryDrainOutputBuffer(
- filterState, surface, renderSurface,
- useTimestamp, &startTimeRender);
- }
- }
-
- // after EOS on decoder, let filter reach EOS
- while (!filterState->mSawOutputEOS) {
- tryCopyDecodedBuffer(&decodedFrameIndices, filterState, vidState);
-
- tryDrainOutputBuffer(
- filterState, surface, renderSurface,
- useTimestamp, &startTimeRender);
- }
-
- int64_t elapsedTimeUs = android::ALooper::GetNowUs() - startTimeUs;
-
- for (size_t i = 0; i < stateByTrack.size(); ++i) {
- CodecState *state = &stateByTrack.editValueAt(i);
-
- CHECK_EQ((status_t)OK, state->mCodec->release());
-
- printf("track %zu: %" PRId64 " frames decoded and filtered, "
- "%.2f fps.\n", i, state->mNumBuffersDecoded,
- state->mNumBuffersDecoded * 1E6 / elapsedTimeUs);
- }
-
- return 0;
-}
-
-} // namespace android
-
-int main(int argc, char **argv) {
- using namespace android;
-
- const char *me = argv[0];
-
- bool useSurface = false;
- bool renderSurface = false;
- bool useTimestamp = false;
- FilterType filterType = FILTERTYPE_ZERO;
-
- int res;
- while ((res = getopt(argc, argv, "bcnrszTRSh")) >= 0) {
- switch (res) {
- case 'b':
- {
- filterType = FILTERTYPE_INTRINSIC_BLUR;
- break;
- }
- case 'c':
- {
- filterType = FILTERTYPE_RS_ARGB_TO_RGBA;
- break;
- }
- case 'n':
- {
- filterType = FILTERTYPE_RS_NIGHT_VISION;
- break;
- }
- case 'r':
- {
- filterType = FILTERTYPE_RS_SATURATION;
- break;
- }
- case 's':
- {
- filterType = FILTERTYPE_SATURATION;
- break;
- }
- case 'z':
- {
- filterType = FILTERTYPE_ZERO;
- break;
- }
- case 'T':
- {
- useTimestamp = true;
- FALLTHROUGH_INTENDED;
- }
- case 'R':
- {
- renderSurface = true;
- FALLTHROUGH_INTENDED;
- }
- case 'S':
- {
- useSurface = true;
- break;
- }
- case '?':
- case 'h':
- default:
- {
- usage(me);
- break;
- }
- }
- }
-
- argc -= optind;
- argv += optind;
-
- if (argc != 1) {
- usage(me);
- }
-
- ProcessState::self()->startThreadPool();
-
- android::sp<android::ALooper> looper = new android::ALooper;
- looper->start();
-
- android::sp<SurfaceComposerClient> composerClient;
- android::sp<SurfaceControl> control;
- android::sp<Surface> surface;
-
- if (useSurface) {
- composerClient = new SurfaceComposerClient;
- CHECK_EQ((status_t)OK, composerClient->initCheck());
-
- const std::vector<PhysicalDisplayId> ids = SurfaceComposerClient::getPhysicalDisplayIds();
- CHECK(!ids.empty());
-
- const android::sp<IBinder> display = SurfaceComposerClient::getPhysicalDisplayToken(ids.front());
- CHECK(display != nullptr);
-
- ui::DisplayMode mode;
- CHECK_EQ(SurfaceComposerClient::getActiveDisplayMode(display, &mode), NO_ERROR);
-
- const ui::Size& resolution = mode.resolution;
- const ssize_t displayWidth = resolution.getWidth();
- const ssize_t displayHeight = resolution.getHeight();
-
- ALOGV("display is %zd x %zd", displayWidth, displayHeight);
-
- control = composerClient->createSurface(
- String8("A Surface"), displayWidth, displayHeight,
- PIXEL_FORMAT_RGBA_8888, 0);
-
- CHECK(control != NULL);
- CHECK(control->isValid());
-
- SurfaceComposerClient::Transaction{}
- .setLayer(control, INT_MAX)
- .show(control)
- .apply();
-
- surface = control->getSurface();
- CHECK(surface != NULL);
- }
-
- decode(looper, argv[0], surface, renderSurface, useTimestamp, filterType);
-
- if (useSurface) {
- composerClient->dispose();
- }
-
- looper->stop();
-
- return 0;
-}
diff --git a/cmds/stagefright/record.cpp b/cmds/stagefright/record.cpp
index 5743ad6..87e8832 100644
--- a/cmds/stagefright/record.cpp
+++ b/cmds/stagefright/record.cpp
@@ -89,6 +89,9 @@
if (err != OK) {
return err;
}
+ if (buffer == nullptr) {
+ return AMEDIA_ERROR_UNKNOWN;
+ }
char x = (char)((double)rand() / RAND_MAX * 255);
memset((*buffer)->data(), x, mSize);
diff --git a/drm/README.md b/drm/README.md
new file mode 100644
index 0000000..2681aac
--- /dev/null
+++ b/drm/README.md
@@ -0,0 +1,13 @@
+## AIDL error handling
+
+Starting in **Android U (14)**, `libmediadrm` (app-side) understands extra error
+details from **AIDL** DRM HALs passed through the binder exception message
+as a json string. The supported fields are:
+* `cdmError` (*int*)
+* `oemError` (*int*)
+* `context` (*int*)
+* `errorMessage` (*str*)
+
+The errors details will be reported to apps through the java interface
+`android.media.MediaDrmThrowable`. Please see the javadoc of `MediaDrmThrowable`
+for detailed definitions of each field above.
diff --git a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
index be2b546..6e55a16 100644
--- a/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
+++ b/drm/libdrmframework/plugins/forward-lock/FwdLockEngine/Android.bp
@@ -48,7 +48,6 @@
srcs: ["src/FwdLockEngine.cpp"],
shared_libs: [
- "libandroidicu",
"libutils",
"liblog",
"libdl",
diff --git a/drm/libmediadrm/Android.bp b/drm/libmediadrm/Android.bp
index 1e1a49d..7d68c5b 100644
--- a/drm/libmediadrm/Android.bp
+++ b/drm/libmediadrm/Android.bp
@@ -36,6 +36,7 @@
"DrmUtils.cpp",
"DrmHalListener.cpp",
"DrmStatus.cpp",
+ "DrmMetricsLogger.cpp",
],
local_include_dirs: [
diff --git a/drm/libmediadrm/DrmHalAidl.cpp b/drm/libmediadrm/DrmHalAidl.cpp
index c369529..1844acb 100644
--- a/drm/libmediadrm/DrmHalAidl.cpp
+++ b/drm/libmediadrm/DrmHalAidl.cpp
@@ -393,7 +393,8 @@
// DrmHalAidl methods
DrmHalAidl::DrmHalAidl()
- : mListener(::ndk::SharedRefBase::make<DrmHalListener>(&mMetrics)),
+ : mMetrics(std::make_shared<MediaDrmMetrics>()),
+ mListener(::ndk::SharedRefBase::make<DrmHalListener>(mMetrics)),
mFactories(DrmUtils::makeDrmFactoriesAidl()),
mInitCheck((mFactories.size() == 0) ? ERROR_UNSUPPORTED : NO_INIT) {}
@@ -462,8 +463,8 @@
Uuid uuidAidl = DrmUtils::toAidlUuid(uuid);
std::string appPackageNameAidl = toStdString(appPackageName);
std::shared_ptr<IDrmPluginAidl> pluginAidl;
- mMetrics.SetAppPackageName(appPackageName);
- mMetrics.SetAppUid(AIBinder_getCallingUid());
+ mMetrics->SetAppPackageName(appPackageName);
+ mMetrics->SetAppUid(AIBinder_getCallingUid());
for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
::ndk::ScopedAStatus status =
mFactories[i]->createDrmPlugin(uuidAidl, appPackageNameAidl, &pluginAidl);
@@ -539,10 +540,10 @@
AIBinder_getCallingPid(), std::static_pointer_cast<IResourceManagerClient>(client),
sessionId);
mOpenSessions.push_back(client);
- mMetrics.SetSessionStart(sessionId);
+ mMetrics->SetSessionStart(sessionId);
}
- mMetrics.mOpenSessionCounter.Increment(err);
+ mMetrics->mOpenSessionCounter.Increment(err);
return err;
}
@@ -562,10 +563,10 @@
}
}
- mMetrics.SetSessionEnd(sessionId);
+ mMetrics->SetSessionEnd(sessionId);
}
- mMetrics.mCloseSessionCounter.Increment(response);
+ mMetrics->mCloseSessionCounter.Increment(response);
return response;
}
@@ -577,7 +578,7 @@
DrmPlugin::KeyRequestType* keyRequestType) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
- EventTimer<status_t> keyRequestTimer(&mMetrics.mGetKeyRequestTimeUs);
+ EventTimer<status_t> keyRequestTimer(&mMetrics->mGetKeyRequestTimeUs);
DrmSessionManager::Instance()->useSession(sessionId);
@@ -618,7 +619,7 @@
Vector<uint8_t>& keySetId) {
Mutex::Autolock autoLock(mLock);
INIT_CHECK();
- EventTimer<status_t> keyResponseTimer(&mMetrics.mProvideKeyResponseTimeUs);
+ EventTimer<status_t> keyResponseTimer(&mMetrics->mProvideKeyResponseTimeUs);
DrmSessionManager::Instance()->useSession(sessionId);
@@ -687,7 +688,7 @@
defaultUrl = toString8(requestAidl.defaultUrl);
err = statusAidlToDrmStatus(status);
- mMetrics.mGetProvisionRequestCounter.Increment(err);
+ mMetrics->mGetProvisionRequestCounter.Increment(err);
return err;
}
@@ -704,7 +705,7 @@
certificate = toVector(result.certificate);
wrappedKey = toVector(result.wrappedKey);
err = statusAidlToDrmStatus(status);
- mMetrics.mProvideProvisionResponseCounter.Increment(err);
+ mMetrics->mProvideProvisionResponseCounter.Increment(err);
return err;
}
@@ -914,7 +915,7 @@
value = toVector(result);
err = statusAidlToDrmStatus(status);
if (name == kPropertyDeviceUniqueId) {
- mMetrics.mGetDeviceUniqueIdCounter.Increment(err);
+ mMetrics->mGetDeviceUniqueIdCounter.Increment(err);
}
return err;
}
@@ -940,7 +941,7 @@
if (consumer == nullptr) {
return DrmStatus(UNEXPECTED_NULL);
}
- consumer->consumeFrameworkMetrics(mMetrics);
+ consumer->consumeFrameworkMetrics(*mMetrics.get());
// Append vendor metrics if they are supported.
@@ -1146,7 +1147,7 @@
getPropertyByteArrayInternal(String8("metrics"), metricsVector) == OK) {
metricsString = toBase64StringNoPad(metricsVector.array(), metricsVector.size());
status_t res = android::reportDrmPluginMetrics(metricsString, vendor, description,
- mMetrics.GetAppUid());
+ mMetrics->GetAppUid());
if (res != OK) {
ALOGE("Metrics were retrieved but could not be reported: %d", res);
}
@@ -1156,7 +1157,7 @@
std::string DrmHalAidl::reportFrameworkMetrics(const std::string& pluginMetrics) const {
mediametrics_handle_t item(mediametrics_create("mediadrm"));
- mediametrics_setUid(item, mMetrics.GetAppUid());
+ mediametrics_setUid(item, mMetrics->GetAppUid());
String8 vendor;
String8 description;
status_t result = getPropertyStringInternal(String8("vendor"), vendor);
@@ -1173,7 +1174,7 @@
}
std::string serializedMetrics;
- result = mMetrics.GetSerializedMetrics(&serializedMetrics);
+ result = mMetrics->GetSerializedMetrics(&serializedMetrics);
if (result != OK) {
ALOGE("Failed to serialize framework metrics: %d", result);
}
diff --git a/drm/libmediadrm/DrmHalHidl.cpp b/drm/libmediadrm/DrmHalHidl.cpp
index 2e0bfee..56d63c5 100644
--- a/drm/libmediadrm/DrmHalHidl.cpp
+++ b/drm/libmediadrm/DrmHalHidl.cpp
@@ -514,10 +514,14 @@
if (mimeType == "") {
// isCryptoSchemeSupported(uuid)
*isSupported = true;
- } else {
- // isCryptoSchemeSupported(uuid, mimeType)
- *isSupported = factory->isContentTypeSupported(mimeType.string());
+ return DrmStatus(OK);
}
+ // isCryptoSchemeSupported(uuid, mimeType)
+ auto hResult = factory->isContentTypeSupported(mimeType.string());
+ if (!hResult.isOk()) {
+ return DrmStatus(DEAD_OBJECT);
+ }
+ *isSupported = hResult;
return DrmStatus(OK);
} else if (mimeType == "") {
return DrmStatus(BAD_VALUE);
@@ -527,8 +531,12 @@
if (factoryV1_2 == NULL) {
return DrmStatus(ERROR_UNSUPPORTED);
} else {
- *isSupported = factoryV1_2->isCryptoSchemeSupported_1_2(uuid, mimeType.string(),
+ auto hResult = factoryV1_2->isCryptoSchemeSupported_1_2(uuid, mimeType.string(),
toHidlSecurityLevel(level));
+ if (!hResult.isOk()) {
+ return DrmStatus(DEAD_OBJECT);
+ }
+ *isSupported = hResult;
return DrmStatus(OK);
}
}
@@ -538,7 +546,8 @@
Mutex::Autolock autoLock(mLock);
*isSupported = false;
for (ssize_t i = mFactories.size() - 1; i >= 0; i--) {
- if (mFactories[i]->isCryptoSchemeSupported(uuid)) {
+ auto hResult = mFactories[i]->isCryptoSchemeSupported(uuid);
+ if (hResult.isOk() && hResult) {
return matchMimeTypeAndSecurityLevel(mFactories[i], uuid, mimeType, level, isSupported);
}
}
diff --git a/drm/libmediadrm/DrmHalListener.cpp b/drm/libmediadrm/DrmHalListener.cpp
index cfcf475..4e868ac 100644
--- a/drm/libmediadrm/DrmHalListener.cpp
+++ b/drm/libmediadrm/DrmHalListener.cpp
@@ -37,12 +37,12 @@
return vec;
}
-DrmHalListener::DrmHalListener(MediaDrmMetrics* metrics)
+DrmHalListener::DrmHalListener(const std::shared_ptr<MediaDrmMetrics>& metrics)
: mMetrics(metrics) {}
DrmHalListener::~DrmHalListener() {}
-void DrmHalListener::setListener(sp<IDrmClient> listener) {
+void DrmHalListener::setListener(const sp<IDrmClient>& listener) {
Mutex::Autolock lock(mEventLock);
mListener = listener;
}
diff --git a/drm/libmediadrm/DrmMetricsLogger.cpp b/drm/libmediadrm/DrmMetricsLogger.cpp
new file mode 100644
index 0000000..89b1dcc
--- /dev/null
+++ b/drm/libmediadrm/DrmMetricsLogger.cpp
@@ -0,0 +1,562 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// #define LOG_NDEBUG 0
+#define LOG_TAG "DrmMetricsLogger"
+
+#include <media/MediaMetrics.h>
+#include <media/stagefright/foundation/AString.h>
+#include <media/stagefright/foundation/base64.h>
+#include <mediadrm/DrmHal.h>
+#include <mediadrm/DrmMetricsLogger.h>
+#include <mediadrm/DrmUtils.h>
+
+namespace android {
+
+namespace {
+
+std::vector<uint8_t> toStdVec(Vector<uint8_t> const& sessionId) {
+ auto sessionKey = sessionId.array();
+ std::vector<uint8_t> vec(sessionKey, sessionKey + sessionId.size());
+ return vec;
+}
+
+} // namespace
+
+DrmMetricsLogger::DrmMetricsLogger(IDrmFrontend frontend)
+ : mImpl(sp<DrmHal>::make()), mUuid(), mObjNonce(), mFrontend(frontend) {}
+
+DrmMetricsLogger::~DrmMetricsLogger() {}
+
+DrmStatus DrmMetricsLogger::initCheck() const {
+ DrmStatus status = mImpl->initCheck();
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::isCryptoSchemeSupported(const uint8_t uuid[IDRM_UUID_SIZE],
+ const String8& mimeType,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool* result) {
+ DrmStatus status = mImpl->isCryptoSchemeSupported(uuid, mimeType, securityLevel, result);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::createPlugin(const uint8_t uuid[IDRM_UUID_SIZE],
+ const String8& appPackageName) {
+ std::memcpy(mUuid.data(), uuid, IDRM_UUID_SIZE);
+ if (kUuidSchemeMap.count(mUuid)) {
+ mScheme = kUuidSchemeMap.at(mUuid);
+ } else {
+ mScheme = "Other";
+ }
+ if (generateNonce(&mObjNonce, kNonceSize, __func__) != OK) {
+ return ERROR_DRM_RESOURCE_BUSY;
+ }
+ DrmStatus status = mImpl->createPlugin(uuid, appPackageName);
+ if (status == OK) {
+ reportMediaDrmCreated();
+ } else {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::destroyPlugin() {
+ DrmStatus status = mImpl->destroyPlugin();
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::openSession(DrmPlugin::SecurityLevel securityLevel,
+ Vector<uint8_t>& sessionId) {
+ SessionContext ctx{};
+ if (generateNonce(&ctx.mNonce, kNonceSize, __func__) != OK) {
+ return ERROR_DRM_RESOURCE_BUSY;
+ }
+ DrmStatus status = mImpl->openSession(securityLevel, sessionId);
+ if (status == OK) {
+ std::vector<uint8_t> sessionKey = toStdVec(sessionId);
+ ctx.mTargetSecurityLevel = securityLevel;
+ if (getSecurityLevel(sessionId, &ctx.mActualSecurityLevel) != OK) {
+ ctx.mActualSecurityLevel = DrmPlugin::kSecurityLevelUnknown;
+ }
+ {
+ const std::lock_guard<std::mutex> lock(mSessionMapMutex);
+ mSessionMap.insert({sessionKey, ctx});
+ }
+ reportMediaDrmSessionOpened(sessionKey);
+ } else {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::closeSession(Vector<uint8_t> const& sessionId) {
+ std::vector<uint8_t> sid = toStdVec(sessionId);
+ {
+ const std::lock_guard<std::mutex> lock(mSessionMapMutex);
+ mSessionMap.erase(sid);
+ }
+ DrmStatus status = mImpl->closeSession(sessionId);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, sid);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getKeyRequest(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& initData, String8 const& mimeType,
+ DrmPlugin::KeyType keyType,
+ KeyedVector<String8, String8> const& optionalParameters,
+ Vector<uint8_t>& request, String8& defaultUrl,
+ DrmPlugin::KeyRequestType* keyRequestType) {
+ DrmStatus status =
+ mImpl->getKeyRequest(sessionId, initData, mimeType, keyType, optionalParameters,
+ request, defaultUrl, keyRequestType);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::provideKeyResponse(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& response,
+ Vector<uint8_t>& keySetId) {
+ DrmStatus status = mImpl->provideKeyResponse(sessionId, response, keySetId);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::removeKeys(Vector<uint8_t> const& keySetId) {
+ DrmStatus status = mImpl->removeKeys(keySetId);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::restoreKeys(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& keySetId) {
+ DrmStatus status = mImpl->restoreKeys(sessionId, keySetId);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::queryKeyStatus(Vector<uint8_t> const& sessionId,
+ KeyedVector<String8, String8>& infoMap) const {
+ DrmStatus status = mImpl->queryKeyStatus(sessionId, infoMap);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getProvisionRequest(String8 const& certType,
+ String8 const& certAuthority,
+ Vector<uint8_t>& request, String8& defaultUrl) {
+ DrmStatus status = mImpl->getProvisionRequest(certType, certAuthority, request, defaultUrl);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::provideProvisionResponse(Vector<uint8_t> const& response,
+ Vector<uint8_t>& certificate,
+ Vector<uint8_t>& wrappedKey) {
+ DrmStatus status = mImpl->provideProvisionResponse(response, certificate, wrappedKey);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getSecureStops(List<Vector<uint8_t>>& secureStops) {
+ DrmStatus status = mImpl->getSecureStops(secureStops);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getSecureStopIds(List<Vector<uint8_t>>& secureStopIds) {
+ DrmStatus status = mImpl->getSecureStopIds(secureStopIds);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getSecureStop(Vector<uint8_t> const& ssid,
+ Vector<uint8_t>& secureStop) {
+ DrmStatus status = mImpl->getSecureStop(ssid, secureStop);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::releaseSecureStops(Vector<uint8_t> const& ssRelease) {
+ DrmStatus status = mImpl->releaseSecureStops(ssRelease);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::removeSecureStop(Vector<uint8_t> const& ssid) {
+ DrmStatus status = mImpl->removeSecureStop(ssid);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::removeAllSecureStops() {
+ DrmStatus status = mImpl->removeAllSecureStops();
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getHdcpLevels(DrmPlugin::HdcpLevel* connectedLevel,
+ DrmPlugin::HdcpLevel* maxLevel) const {
+ DrmStatus status = mImpl->getHdcpLevels(connectedLevel, maxLevel);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getNumberOfSessions(uint32_t* currentSessions,
+ uint32_t* maxSessions) const {
+ DrmStatus status = mImpl->getNumberOfSessions(currentSessions, maxSessions);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getSecurityLevel(Vector<uint8_t> const& sessionId,
+ DrmPlugin::SecurityLevel* level) const {
+ DrmStatus status = mImpl->getSecurityLevel(sessionId, level);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const {
+ DrmStatus status = mImpl->getOfflineLicenseKeySetIds(keySetIds);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::removeOfflineLicense(Vector<uint8_t> const& keySetId) {
+ DrmStatus status = mImpl->removeOfflineLicense(keySetId);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getOfflineLicenseState(
+ Vector<uint8_t> const& keySetId, DrmPlugin::OfflineLicenseState* licenseState) const {
+ DrmStatus status = mImpl->getOfflineLicenseState(keySetId, licenseState);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getPropertyString(String8 const& name, String8& value) const {
+ DrmStatus status = mImpl->getPropertyString(name, value);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getPropertyByteArray(String8 const& name,
+ Vector<uint8_t>& value) const {
+ DrmStatus status = mImpl->getPropertyByteArray(name, value);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::setPropertyString(String8 const& name, String8 const& value) const {
+ DrmStatus status = mImpl->setPropertyString(name, value);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::setPropertyByteArray(String8 const& name,
+ Vector<uint8_t> const& value) const {
+ DrmStatus status = mImpl->setPropertyByteArray(name, value);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getMetrics(const sp<IDrmMetricsConsumer>& consumer) {
+ DrmStatus status = mImpl->getMetrics(consumer);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::setCipherAlgorithm(Vector<uint8_t> const& sessionId,
+ String8 const& algorithm) {
+ DrmStatus status = mImpl->setCipherAlgorithm(sessionId, algorithm);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::setMacAlgorithm(Vector<uint8_t> const& sessionId,
+ String8 const& algorithm) {
+ DrmStatus status = mImpl->setMacAlgorithm(sessionId, algorithm);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output) {
+ DrmStatus status = mImpl->encrypt(sessionId, keyId, input, iv, output);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output) {
+ DrmStatus status = mImpl->decrypt(sessionId, keyId, input, iv, output);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t>& signature) {
+ DrmStatus status = mImpl->sign(sessionId, keyId, message, signature);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
+ bool& match) {
+ DrmStatus status = mImpl->verify(sessionId, keyId, message, signature, match);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
+ Vector<uint8_t> const& message,
+ Vector<uint8_t> const& wrappedKey, Vector<uint8_t>& signature) {
+ DrmStatus status = mImpl->signRSA(sessionId, algorithm, message, wrappedKey, signature);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::setListener(const sp<IDrmClient>& listener) {
+ DrmStatus status = mImpl->setListener(listener);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::requiresSecureDecoder(const char* mime, bool* required) const {
+ DrmStatus status = mImpl->requiresSecureDecoder(mime, required);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::requiresSecureDecoder(const char* mime,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool* required) const {
+ DrmStatus status = mImpl->requiresSecureDecoder(mime, securityLevel, required);
+ if (status != OK) {
+ reportMediaDrmErrored(status, "requiresSecureDecoderLevel");
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::setPlaybackId(Vector<uint8_t> const& sessionId,
+ const char* playbackId) {
+ DrmStatus status = mImpl->setPlaybackId(sessionId, playbackId);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__, toStdVec(sessionId));
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const {
+ DrmStatus status = mImpl->getLogMessages(logs);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+DrmStatus DrmMetricsLogger::getSupportedSchemes(std::vector<uint8_t>& schemes) const {
+ DrmStatus status = mImpl->getSupportedSchemes(schemes);
+ if (status != OK) {
+ reportMediaDrmErrored(status, __func__);
+ }
+ return status;
+}
+
+void DrmMetricsLogger::reportMediaDrmCreated() const {
+ mediametrics_handle_t handle(mediametrics_create("mediadrm.created"));
+ mediametrics_setCString(handle, "scheme", mScheme.c_str());
+ mediametrics_setInt64(handle, "uuid_msb", be64toh(mUuid[0]));
+ mediametrics_setInt64(handle, "uuid_lsb", be64toh(mUuid[1]));
+ mediametrics_setInt32(handle, "frontend", mFrontend);
+ mediametrics_setCString(handle, "object_nonce", mObjNonce.c_str());
+ mediametrics_selfRecord(handle);
+ mediametrics_delete(handle);
+}
+
+void DrmMetricsLogger::reportMediaDrmSessionOpened(const std::vector<uint8_t>& sessionId) const {
+ mediametrics_handle_t handle(mediametrics_create("mediadrm.session_opened"));
+ mediametrics_setCString(handle, "scheme", mScheme.c_str());
+ mediametrics_setInt64(handle, "uuid_msb", be64toh(mUuid[0]));
+ mediametrics_setInt64(handle, "uuid_lsb", be64toh(mUuid[1]));
+ mediametrics_setInt32(handle, "frontend", mFrontend);
+ mediametrics_setCString(handle, "object_nonce", mObjNonce.c_str());
+ const std::lock_guard<std::mutex> lock(mSessionMapMutex);
+ auto it = mSessionMap.find(sessionId);
+ if (it != mSessionMap.end()) {
+ mediametrics_setCString(handle, "session_nonce", it->second.mNonce.c_str());
+ mediametrics_setInt64(handle, "requested_seucrity_level", it->second.mTargetSecurityLevel);
+ mediametrics_setInt64(handle, "opened_seucrity_level", it->second.mActualSecurityLevel);
+ }
+ mediametrics_selfRecord(handle);
+ mediametrics_delete(handle);
+}
+
+void DrmMetricsLogger::reportMediaDrmErrored(const DrmStatus& error_code, const char* api,
+ const std::vector<uint8_t>& sessionId) const {
+ mediametrics_handle_t handle(mediametrics_create("mediadrm.errored"));
+ mediametrics_setCString(handle, "scheme", mScheme.c_str());
+ mediametrics_setInt64(handle, "uuid_msb", be64toh(mUuid[0]));
+ mediametrics_setInt64(handle, "uuid_lsb", be64toh(mUuid[1]));
+ mediametrics_setInt32(handle, "frontend", mFrontend);
+ mediametrics_setCString(handle, "object_nonce", mObjNonce.c_str());
+ if (!sessionId.empty()) {
+ const std::lock_guard<std::mutex> lock(mSessionMapMutex);
+ auto it = mSessionMap.find(sessionId);
+ if (it != mSessionMap.end()) {
+ mediametrics_setCString(handle, "session_nonce", it->second.mNonce.c_str());
+ mediametrics_setInt64(handle, "seucrity_level", it->second.mActualSecurityLevel);
+ }
+ }
+ mediametrics_setCString(handle, "api", api);
+ mediametrics_setInt32(handle, "error_code", error_code);
+ mediametrics_setInt32(handle, "cdm_err", error_code.getCdmErr());
+ mediametrics_setInt32(handle, "oem_err", error_code.getOemErr());
+ mediametrics_setInt32(handle, "error_context", error_code.getContext());
+ mediametrics_selfRecord(handle);
+ mediametrics_delete(handle);
+}
+
+DrmStatus DrmMetricsLogger::generateNonce(std::string* out, size_t size, const char* api) {
+ std::vector<uint8_t> buf(size);
+ ssize_t bytes = getrandom(buf.data(), size, GRND_NONBLOCK);
+ if (bytes < size) {
+ ALOGE("getrandom failed: %d", errno);
+ reportMediaDrmErrored(ERROR_DRM_RESOURCE_BUSY, api);
+ return ERROR_DRM_RESOURCE_BUSY;
+ }
+ android::AString tmp;
+ encodeBase64(buf.data(), size, &tmp);
+ out->assign(tmp.c_str());
+ return OK;
+}
+
+const std::map<std::array<int64_t, 2>, std::string> DrmMetricsLogger::kUuidSchemeMap {
+ {{(int64_t)0x6DD8B3C345F44A68, (int64_t)0xBF3A64168D01A4A6}, "ABV DRM (MoDRM)"},
+ {{(int64_t)0xF239E769EFA34850, (int64_t)0x9C16A903C6932EFB},
+ "Adobe Primetime DRM version 4"},
+ {{(int64_t)0x616C746963617374, (int64_t)0x2D50726F74656374}, "Alticast"},
+ {{(int64_t)0x94CE86FB07FF4F43, (int64_t)0xADB893D2FA968CA2}, "Apple FairPlay"},
+ {{(int64_t)0x279FE473512C48FE, (int64_t)0xADE8D176FEE6B40F}, "Arris Titanium"},
+ {{(int64_t)0x3D5E6D359B9A41E8, (int64_t)0xB843DD3C6E72C42C}, "ChinaDRM"},
+ {{(int64_t)0x3EA8778F77424BF9, (int64_t)0xB18BE834B2ACBD47}, "Clear Key AES-128"},
+ {{(int64_t)0xBE58615B19C44684, (int64_t)0x88B3C8C57E99E957}, "Clear Key SAMPLE-AES"},
+ {{(int64_t)0xE2719D58A985B3C9, (int64_t)0x781AB030AF78D30E}, "Clear Key DASH-IF"},
+ {{(int64_t)0x644FE7B5260F4FAD, (int64_t)0x949A0762FFB054B4}, "CMLA (OMA DRM)"},
+ {{(int64_t)0x37C332587B994C7E, (int64_t)0xB15D19AF74482154}, "Commscope Titanium V3"},
+ {{(int64_t)0x45D481CB8FE049C0, (int64_t)0xADA9AB2D2455B2F2}, "CoreCrypt"},
+ {{(int64_t)0xDCF4E3E362F15818, (int64_t)0x7BA60A6FE33FF3DD}, "DigiCAP SmartXess"},
+ {{(int64_t)0x35BF197B530E42D7, (int64_t)0x8B651B4BF415070F}, "DivX DRM Series 5"},
+ {{(int64_t)0x80A6BE7E14484C37, (int64_t)0x9E70D5AEBE04C8D2}, "Irdeto Content Protection"},
+ {{(int64_t)0x5E629AF538DA4063, (int64_t)0x897797FFBD9902D4},
+ "Marlin Adaptive Streaming Simple Profile V1.0"},
+ {{(int64_t)0x9A04F07998404286, (int64_t)0xAB92E65BE0885F95}, "Microsoft PlayReady"},
+ {{(int64_t)0x6A99532D869F5922, (int64_t)0x9A91113AB7B1E2F3}, "MobiTV DRM"},
+ {{(int64_t)0xADB41C242DBF4A6D, (int64_t)0x958B4457C0D27B95}, "Nagra MediaAccess PRM 3.0"},
+ {{(int64_t)0x1F83E1E86EE94F0D, (int64_t)0xBA2F5EC4E3ED1A66}, "SecureMedia"},
+ {{(int64_t)0x992C46E6C4374899, (int64_t)0xB6A050FA91AD0E39}, "SecureMedia SteelKnot"},
+ {{(int64_t)0xA68129D3575B4F1A, (int64_t)0x9CBA3223846CF7C3},
+ "Synamedia/Cisco/NDS VideoGuard DRM"},
+ {{(int64_t)0xAA11967FCC014A4A, (int64_t)0x8E99C5D3DDDFEA2D}, "Unitend DRM (UDRM)"},
+ {{(int64_t)0x9A27DD82FDE24725, (int64_t)0x8CBC4234AA06EC09}, "Verimatrix VCAS"},
+ {{(int64_t)0xB4413586C58CFFB0, (int64_t)0x94A5D4896C1AF6C3}, "Viaccess-Orca DRM (VODRM)"},
+ {{(int64_t)0x793B79569F944946, (int64_t)0xA94223E7EF7E44B4}, "VisionCrypt"},
+ {{(int64_t)0x1077EFECC0B24D02, (int64_t)0xACE33C1E52E2FB4B}, "W3C Common PSSH box"},
+ {{(int64_t)0xEDEF8BA979D64ACE, (int64_t)0xA3C827DCD51D21ED}, "Widevine Content Protection"},
+};
+
+} // namespace android
\ No newline at end of file
diff --git a/drm/libmediadrm/DrmStatus.cpp b/drm/libmediadrm/DrmStatus.cpp
index 0258801..f622160 100644
--- a/drm/libmediadrm/DrmStatus.cpp
+++ b/drm/libmediadrm/DrmStatus.cpp
@@ -27,21 +27,20 @@
return;
}
- std::string errMsg;
auto val = errorDetails["cdmError"];
- if (!val.isNull()) {
+ if (val.isInt()) {
mCdmErr = val.asInt();
}
val = errorDetails["oemError"];
- if (!val.isNull()) {
+ if (val.isInt()) {
mOemErr = val.asInt();
}
val = errorDetails["context"];
- if (!val.isNull()) {
+ if (val.isInt()) {
mCtx = val.asInt();
}
val = errorDetails["errorMessage"];
- if (!val.isNull()) {
+ if (val.isString()) {
mErrMsg = val.asString();
} else {
mErrMsg = msg;
diff --git a/drm/libmediadrm/DrmUtils.cpp b/drm/libmediadrm/DrmUtils.cpp
index cb103f7..a99b1d1 100644
--- a/drm/libmediadrm/DrmUtils.cpp
+++ b/drm/libmediadrm/DrmUtils.cpp
@@ -59,11 +59,11 @@
namespace {
-template <typename Hal>
-Hal* MakeObject(status_t* pstatus) {
+template <typename Hal, typename... Ts>
+Hal* MakeObject(status_t* pstatus, Ts... args) {
status_t err = OK;
status_t& status = pstatus ? *pstatus : err;
- auto obj = new Hal();
+ auto obj = new Hal(args...);
status = obj->initCheck();
if (status != OK && status != NO_INIT) {
return NULL;
@@ -192,8 +192,8 @@
return factories;
}
-sp<IDrm> MakeDrm(status_t* pstatus) {
- return MakeObject<DrmHal>(pstatus);
+sp<IDrm> MakeDrm(IDrmFrontend frontend, status_t* pstatus) {
+ return MakeObject<DrmMetricsLogger>(pstatus, frontend);
}
sp<ICrypto> MakeCrypto(status_t* pstatus) {
diff --git a/drm/libmediadrm/include/mediadrm/DrmHalAidl.h b/drm/libmediadrm/include/mediadrm/DrmHalAidl.h
index e0b8341..a81b312 100644
--- a/drm/libmediadrm/include/mediadrm/DrmHalAidl.h
+++ b/drm/libmediadrm/include/mediadrm/DrmHalAidl.h
@@ -123,7 +123,7 @@
::ndk::ScopedAStatus onSessionLostState(const std::vector<uint8_t>& in_sessionId);
private:
static Mutex mLock;
- mutable MediaDrmMetrics mMetrics;
+ std::shared_ptr<MediaDrmMetrics> mMetrics;
std::shared_ptr<DrmHalListener> mListener;
const std::vector<std::shared_ptr<IDrmFactoryAidl>> mFactories;
std::shared_ptr<IDrmPluginAidl> mPlugin;
diff --git a/drm/libmediadrm/include/mediadrm/DrmHalHidl.h b/drm/libmediadrm/include/mediadrm/DrmHalHidl.h
index 45764a1..73a9e1d 100644
--- a/drm/libmediadrm/include/mediadrm/DrmHalHidl.h
+++ b/drm/libmediadrm/include/mediadrm/DrmHalHidl.h
@@ -70,7 +70,7 @@
DrmPlugin::SecurityLevel level, bool* isSupported);
virtual DrmStatus createPlugin(const uint8_t uuid[16],
- const String8 &appPackageName);
+ const String8 &appPackageName);
virtual DrmStatus destroyPlugin();
diff --git a/drm/libmediadrm/include/mediadrm/DrmHalListener.h b/drm/libmediadrm/include/mediadrm/DrmHalListener.h
index 22361ad..0eed929 100644
--- a/drm/libmediadrm/include/mediadrm/DrmHalListener.h
+++ b/drm/libmediadrm/include/mediadrm/DrmHalListener.h
@@ -27,7 +27,7 @@
namespace android {
struct DrmHalListener : public BnDrmPluginListener {
- explicit DrmHalListener(MediaDrmMetrics* mMetrics);
+ explicit DrmHalListener(const std::shared_ptr<MediaDrmMetrics>& in_metrics);
~DrmHalListener();
::ndk::ScopedAStatus onEvent(EventTypeAidl in_eventType,
const std::vector<uint8_t>& in_sessionId,
@@ -38,9 +38,9 @@
const std::vector<KeyStatusAidl>& in_keyStatusList,
bool in_hasNewUsableKey);
::ndk::ScopedAStatus onSessionLostState(const std::vector<uint8_t>& in_sessionId);
- void setListener(sp<IDrmClient> listener);
+ void setListener(const sp<IDrmClient>& listener);
private:
- mutable MediaDrmMetrics* mMetrics;
+ std::shared_ptr<MediaDrmMetrics> mMetrics;
sp<IDrmClient> mListener;
mutable Mutex mEventLock;
mutable Mutex mNotifyLock;
diff --git a/drm/libmediadrm/include/mediadrm/DrmMetricsLogger.h b/drm/libmediadrm/include/mediadrm/DrmMetricsLogger.h
new file mode 100644
index 0000000..f4e3c3e
--- /dev/null
+++ b/drm/libmediadrm/include/mediadrm/DrmMetricsLogger.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <mediadrm/DrmStatus.h>
+#include <mediadrm/IDrm.h>
+#include <sys/random.h>
+#include <map>
+#include <mutex>
+
+#ifndef DRM_METRICS_LOGGER_H
+#define DRM_METRICS_LOGGER_H
+
+namespace android {
+
+struct SessionContext {
+ std::string mNonce;
+ int64_t mTargetSecurityLevel;
+ DrmPlugin::SecurityLevel mActualSecurityLevel;
+};
+
+class DrmMetricsLogger : public IDrm {
+ public:
+ DrmMetricsLogger(IDrmFrontend);
+
+ virtual ~DrmMetricsLogger();
+
+ virtual DrmStatus initCheck() const;
+
+ virtual DrmStatus isCryptoSchemeSupported(const uint8_t uuid[IDRM_UUID_SIZE],
+ const String8& mimeType,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool* result);
+
+ virtual DrmStatus createPlugin(const uint8_t uuid[IDRM_UUID_SIZE],
+ const String8& appPackageName);
+
+ virtual DrmStatus destroyPlugin();
+
+ virtual DrmStatus openSession(DrmPlugin::SecurityLevel securityLevel,
+ Vector<uint8_t>& sessionId);
+
+ virtual DrmStatus closeSession(Vector<uint8_t> const& sessionId);
+
+ virtual DrmStatus getKeyRequest(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& initData, String8 const& mimeType,
+ DrmPlugin::KeyType keyType,
+ KeyedVector<String8, String8> const& optionalParameters,
+ Vector<uint8_t>& request, String8& defaultUrl,
+ DrmPlugin::KeyRequestType* keyRequestType);
+
+ virtual DrmStatus provideKeyResponse(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& response,
+ Vector<uint8_t>& keySetId);
+
+ virtual DrmStatus removeKeys(Vector<uint8_t> const& keySetId);
+
+ virtual DrmStatus restoreKeys(Vector<uint8_t> const& sessionId,
+ Vector<uint8_t> const& keySetId);
+
+ virtual DrmStatus queryKeyStatus(Vector<uint8_t> const& sessionId,
+ KeyedVector<String8, String8>& infoMap) const;
+
+ virtual DrmStatus getProvisionRequest(String8 const& certType, String8 const& certAuthority,
+ Vector<uint8_t>& request, String8& defaultUrl);
+
+ virtual DrmStatus provideProvisionResponse(Vector<uint8_t> const& response,
+ Vector<uint8_t>& certificate,
+ Vector<uint8_t>& wrappedKey);
+
+ virtual DrmStatus getSecureStops(List<Vector<uint8_t>>& secureStops);
+ virtual DrmStatus getSecureStopIds(List<Vector<uint8_t>>& secureStopIds);
+ virtual DrmStatus getSecureStop(Vector<uint8_t> const& ssid, Vector<uint8_t>& secureStop);
+
+ virtual DrmStatus releaseSecureStops(Vector<uint8_t> const& ssRelease);
+ virtual DrmStatus removeSecureStop(Vector<uint8_t> const& ssid);
+ virtual DrmStatus removeAllSecureStops();
+
+ virtual DrmStatus getHdcpLevels(DrmPlugin::HdcpLevel* connectedLevel,
+ DrmPlugin::HdcpLevel* maxLevel) const;
+ virtual DrmStatus getNumberOfSessions(uint32_t* currentSessions, uint32_t* maxSessions) const;
+ virtual DrmStatus getSecurityLevel(Vector<uint8_t> const& sessionId,
+ DrmPlugin::SecurityLevel* level) const;
+
+ virtual DrmStatus getOfflineLicenseKeySetIds(List<Vector<uint8_t>>& keySetIds) const;
+ virtual DrmStatus removeOfflineLicense(Vector<uint8_t> const& keySetId);
+ virtual DrmStatus getOfflineLicenseState(Vector<uint8_t> const& keySetId,
+ DrmPlugin::OfflineLicenseState* licenseState) const;
+
+ virtual DrmStatus getPropertyString(String8 const& name, String8& value) const;
+ virtual DrmStatus getPropertyByteArray(String8 const& name, Vector<uint8_t>& value) const;
+ virtual DrmStatus setPropertyString(String8 const& name, String8 const& value) const;
+ virtual DrmStatus setPropertyByteArray(String8 const& name, Vector<uint8_t> const& value) const;
+
+ virtual DrmStatus getMetrics(const sp<IDrmMetricsConsumer>& consumer);
+
+ virtual DrmStatus setCipherAlgorithm(Vector<uint8_t> const& sessionId,
+ String8 const& algorithm);
+
+ virtual DrmStatus setMacAlgorithm(Vector<uint8_t> const& sessionId, String8 const& algorithm);
+
+ virtual DrmStatus encrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output);
+
+ virtual DrmStatus decrypt(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& input, Vector<uint8_t> const& iv,
+ Vector<uint8_t>& output);
+
+ virtual DrmStatus sign(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t>& signature);
+
+ virtual DrmStatus verify(Vector<uint8_t> const& sessionId, Vector<uint8_t> const& keyId,
+ Vector<uint8_t> const& message, Vector<uint8_t> const& signature,
+ bool& match);
+
+ virtual DrmStatus signRSA(Vector<uint8_t> const& sessionId, String8 const& algorithm,
+ Vector<uint8_t> const& message, Vector<uint8_t> const& wrappedKey,
+ Vector<uint8_t>& signature);
+
+ virtual DrmStatus setListener(const sp<IDrmClient>& listener);
+
+ virtual DrmStatus requiresSecureDecoder(const char* mime, bool* required) const;
+
+ virtual DrmStatus requiresSecureDecoder(const char* mime,
+ DrmPlugin::SecurityLevel securityLevel,
+ bool* required) const;
+
+ virtual DrmStatus setPlaybackId(Vector<uint8_t> const& sessionId, const char* playbackId);
+
+ virtual DrmStatus getLogMessages(Vector<drm::V1_4::LogMessage>& logs) const;
+
+ virtual DrmStatus getSupportedSchemes(std::vector<uint8_t>& schemes) const;
+
+ void reportMediaDrmCreated() const;
+
+ void reportMediaDrmSessionOpened(const std::vector<uint8_t>& sessionId) const;
+
+ void reportMediaDrmErrored(
+ const DrmStatus& error_code, const char* api,
+ const std::vector<uint8_t>& sessionId = std::vector<uint8_t>()) const;
+
+ DrmStatus generateNonce(std::string* out, size_t size, const char* api);
+
+ private:
+ static const size_t kNonceSize = 16;
+ static const std::map<std::array<int64_t, 2>, std::string> kUuidSchemeMap;
+ sp<IDrm> mImpl;
+ std::array<int64_t, 2> mUuid;
+ std::string mObjNonce;
+ std::string mScheme;
+ std::map<std::vector<uint8_t>, SessionContext> mSessionMap;
+ mutable std::mutex mSessionMapMutex;
+ IDrmFrontend mFrontend;
+ DISALLOW_EVIL_CONSTRUCTORS(DrmMetricsLogger);
+};
+
+} // namespace android
+
+#endif // DRM_METRICS_LOGGER_H
\ No newline at end of file
diff --git a/drm/libmediadrm/include/mediadrm/IDrm.h b/drm/libmediadrm/include/mediadrm/IDrm.h
index 3dc7485..ff5f63b 100644
--- a/drm/libmediadrm/include/mediadrm/IDrm.h
+++ b/drm/libmediadrm/include/mediadrm/IDrm.h
@@ -24,6 +24,8 @@
#define ANDROID_IDRM_H_
+#define IDRM_UUID_SIZE (16)
+
namespace android {
namespace hardware {
namespace drm {
@@ -33,6 +35,13 @@
} // namespace drm
} // namespace hardware
+enum IDrmFrontend : int32_t {
+ IDRM_UNKNOWN = 0,
+ IDRM_JNI = 1,
+ IDRM_NDK = 2,
+ IDRM_NUPLAYER = 3,
+};
+
namespace drm = ::android::hardware::drm;
struct AString;
diff --git a/drm/libmediadrm/interface/mediadrm/DrmUtils.h b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
index 0044bac..ce8536c 100644
--- a/drm/libmediadrm/interface/mediadrm/DrmUtils.h
+++ b/drm/libmediadrm/interface/mediadrm/DrmUtils.h
@@ -22,6 +22,7 @@
#include <android/hardware/drm/1.4/IDrmPlugin.h>
#include <android/hardware/drm/1.4/types.h>
#include <media/stagefright/MediaErrors.h>
+#include <mediadrm/DrmMetricsLogger.h>
#include <mediadrm/DrmStatus.h>
#include <utils/Errors.h> // for status_t
#include <utils/Log.h>
@@ -119,7 +120,7 @@
bool UseDrmService();
-sp<IDrm> MakeDrm(status_t *pstatus = nullptr);
+sp<IDrm> MakeDrm(IDrmFrontend frontend = IDRM_JNI, status_t* pstatus = nullptr);
sp<ICrypto> MakeCrypto(status_t *pstatus = nullptr);
diff --git a/drm/mediadrm/plugins/clearkey/aidl/Android.bp b/drm/mediadrm/plugins/clearkey/aidl/Android.bp
index 2732aa7..eaf5051 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/aidl/Android.bp
@@ -37,6 +37,7 @@
static_libs: [
"android.hardware.common-V2-ndk",
"libclearkeybase",
+ "libjsoncpp",
],
local_include_dirs: ["include"],
@@ -99,6 +100,7 @@
static_libs: [
"android.hardware.common-V2-ndk",
"libclearkeybase_fuzz",
+ "libjsoncpp",
],
local_include_dirs: ["include"],
@@ -114,13 +116,10 @@
"fuzz_aidl_clearkey_service_defaults",
"service_fuzzer_defaults",
],
- static_libs: [
- "liblog",
- ],
srcs: ["fuzzer.cpp"],
fuzz_config: {
cc: [
"hamzeh@google.com",
],
},
-}
\ No newline at end of file
+}
diff --git a/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp b/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
index 054eabd..31cb7c0 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
+++ b/drm/mediadrm/plugins/clearkey/aidl/DrmPlugin.cpp
@@ -16,6 +16,7 @@
#define LOG_TAG "clearkey-DrmPlugin"
#include <aidl/android/hardware/drm/DrmMetric.h>
+#include <android-base/parseint.h>
#include <utils/Log.h>
#include <inttypes.h>
@@ -83,12 +84,14 @@
void DrmPlugin::initProperties() {
mStringProperties.clear();
mStringProperties[kVendorKey] = kAidlVendorValue;
- mStringProperties[kVersionKey] = kAidlVersionValue;
+ mStringProperties[kVersionKey] = kVersionValue;
mStringProperties[kPluginDescriptionKey] = kAidlPluginDescriptionValue;
mStringProperties[kAlgorithmsKey] = kAidlAlgorithmsValue;
mStringProperties[kListenerTestSupportKey] = kAidlListenerTestSupportValue;
mStringProperties[kDrmErrorTestKey] = kAidlDrmErrorTestValue;
mStringProperties[kAidlVersionKey] = kAidlVersionValue;
+ mStringProperties[kOemErrorKey] = "0";
+ mStringProperties[kErrorContextKey] = "0";
std::vector<uint8_t> valueVector;
valueVector.clear();
@@ -102,6 +105,26 @@
mByteArrayProperties[kMetricsKey] = valueVector;
}
+int32_t DrmPlugin::getIntProperty(const std::string& prop, int32_t defaultVal) const {
+ if (!mStringProperties.count(prop)) {
+ return defaultVal;
+ }
+ int32_t out = defaultVal;
+ if (!::android::base::ParseInt(mStringProperties.at(prop), &out)) {
+ return defaultVal;
+ }
+ return out;
+}
+
+int32_t DrmPlugin::getOemError() const {
+ return getIntProperty(kOemErrorKey);
+}
+
+int32_t DrmPlugin::getErrorContext() const {
+ return getIntProperty(kErrorContextKey);
+}
+
+//
// The secure stop in ClearKey implementation is not installed securely.
// This function merely creates a test environment for testing secure stops APIs.
// The content in this secure stop is implementation dependent, the clearkey
@@ -127,7 +150,10 @@
mSessionLibrary->destroySession(session);
if (session->getMockError() != clearkeydrm::OK) {
sendSessionLostState(in_sessionId);
- return toNdkScopedAStatus(Status::ERROR_DRM_INVALID_STATE);
+ return toNdkScopedAStatus(Status::ERROR_DRM_INVALID_STATE,
+ nullptr,
+ getOemError(),
+ getErrorContext());
}
mCloseSessionOkCount++;
return toNdkScopedAStatus(Status::OK);
@@ -198,7 +224,8 @@
if (!session.get()) {
return toNdkScopedAStatus(Status::ERROR_DRM_SESSION_NOT_OPENED);
} else if (session->getMockError() != clearkeydrm::OK) {
- return toNdkScopedAStatus(session->getMockError());
+ auto err = static_cast<Status>(session->getMockError());
+ return toNdkScopedAStatus(err, nullptr, getOemError(), getErrorContext());
}
keyRequestType = KeyRequestType::INITIAL;
}
@@ -381,6 +408,10 @@
value = mStringProperties[kDrmErrorTestKey];
} else if (name == kAidlVersionKey) {
value = mStringProperties[kAidlVersionKey];
+ } else if (name == kOemErrorKey) {
+ value = mStringProperties[kOemErrorKey];
+ } else if (name == kErrorContextKey) {
+ value = mStringProperties[kErrorContextKey];
} else {
ALOGE("App requested unknown string property %s", name.c_str());
status = Status::ERROR_DRM_CANNOT_HANDLE;
@@ -921,6 +952,13 @@
}
}
+ if (in_propertyName == kOemErrorKey || in_propertyName == kErrorContextKey) {
+ int32_t err = 0;
+ if (!::android::base::ParseInt(in_value, &err)) {
+ return toNdkScopedAStatus(Status::BAD_VALUE);
+ }
+ }
+
mStringProperties[key] = std::string(in_value.c_str());
return toNdkScopedAStatus(Status::OK);
}
diff --git a/drm/mediadrm/plugins/clearkey/aidl/include/AidlUtils.h b/drm/mediadrm/plugins/clearkey/aidl/include/AidlUtils.h
index 9257b17..0db3c37 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/include/AidlUtils.h
+++ b/drm/mediadrm/plugins/clearkey/aidl/include/AidlUtils.h
@@ -15,9 +15,12 @@
*/
#pragma once
+#include <cstdint>
#include <string>
#include <vector>
+#include <json/json.h>
+
#include <android/binder_auto_utils.h>
#include "aidl/android/hardware/drm/Status.h"
#include "ClearKeyTypes.h"
@@ -41,17 +44,32 @@
}
inline ::ndk::ScopedAStatus toNdkScopedAStatus(::aidl::android::hardware::drm::Status status,
- const char* msg = nullptr) {
+ const char* msg = nullptr,
+ int32_t oemError = 0,
+ int32_t errorContext = 0) {
+
+
if (Status::OK == status) {
return ::ndk::ScopedAStatus::ok();
- } else {
- auto err = static_cast<int32_t>(status);
- if (msg) {
- return ::ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(err, msg);
- } else {
- return ::ndk::ScopedAStatus::fromServiceSpecificError(err);
- }
}
+
+ Json::Value errObj(Json::objectValue);
+ auto err = static_cast<int32_t>(status);
+ errObj["cdmError"] = err;
+
+ if (oemError) {
+ errObj["oemError"] = oemError;
+ }
+ if (errorContext) {
+ errObj["context"] = errorContext;
+ }
+ if (msg) {
+ errObj["errorMessage"] = msg;
+ }
+
+ Json::FastWriter writer;
+ return ::ndk::ScopedAStatus::fromServiceSpecificErrorWithMessage(
+ err, writer.write(errObj).c_str());
}
inline ::ndk::ScopedAStatus toNdkScopedAStatus(clearkeydrm::CdmResponseType res) {
diff --git a/drm/mediadrm/plugins/clearkey/aidl/include/DrmPlugin.h b/drm/mediadrm/plugins/clearkey/aidl/include/DrmPlugin.h
index 7acc1b6..694013a 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/include/DrmPlugin.h
+++ b/drm/mediadrm/plugins/clearkey/aidl/include/DrmPlugin.h
@@ -148,6 +148,9 @@
private:
void initProperties();
+ int32_t getIntProperty(const std::string& prop, int32_t defaultVal = 0) const;
+ int32_t getOemError() const;
+ int32_t getErrorContext() const;
void installSecureStop(const std::vector<uint8_t>& sessionId);
bool makeKeySetId(std::string* keySetId);
void setPlayPolicy();
diff --git a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyDrmProperties.h b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyDrmProperties.h
index bfda388..d4e641e 100644
--- a/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyDrmProperties.h
+++ b/drm/mediadrm/plugins/clearkey/common/include/clearkeydrm/ClearKeyDrmProperties.h
@@ -21,7 +21,7 @@
static const std::string kVendorKey("vendor");
static const std::string kVendorValue("Google");
static const std::string kVersionKey("version");
-static const std::string kVersionValue("1.2");
+static const std::string kVersionValue("14"); // sync with Android OS version
static const std::string kPluginDescriptionKey("description");
static const std::string kPluginDescriptionValue("ClearKey CDM");
static const std::string kAlgorithmsKey("algorithms");
@@ -35,6 +35,8 @@
static const std::string kFrameTooLargeValue("frameTooLarge");
static const std::string kInvalidStateValue("invalidState");
static const std::string kAidlVersionKey("aidlVersion");
+static const std::string kOemErrorKey("oemError");
+static const std::string kErrorContextKey("errorContext");
static const std::string kDeviceIdKey("deviceId");
static const uint8_t kTestDeviceIdData[] = {0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
diff --git a/include/media/MicrophoneInfo.h b/include/media/MicrophoneInfo.h
index a5045b9..6d6c594 100644
--- a/include/media/MicrophoneInfo.h
+++ b/include/media/MicrophoneInfo.h
@@ -70,6 +70,9 @@
}
virtual status_t writeToParcelable(MicrophoneInfoData* parcelable) const {
+#if defined(BACKEND_NDK)
+ using ::aidl::android::convertReinterpret;
+#endif
parcelable->deviceId = mDeviceId;
parcelable->portId = mPortId;
parcelable->type = VALUE_OR_RETURN_STATUS(convertReinterpret<int32_t>(mType));
@@ -98,6 +101,9 @@
}
virtual status_t readFromParcelable(const MicrophoneInfoData& parcelable) {
+#if defined(BACKEND_NDK)
+ using ::aidl::android::convertReinterpret;
+#endif
mDeviceId = parcelable.deviceId;
mPortId = parcelable.portId;
mType = VALUE_OR_RETURN_STATUS(convertReinterpret<uint32_t>(parcelable.type));
@@ -208,6 +214,10 @@
int32_t mDirectionality;
};
+#if defined(BACKEND_NDK)
+using ::aidl::ConversionResult;
+#endif
+
// Conversion routines, according to AidlConversion.h conventions.
inline ConversionResult<MicrophoneInfo>
aidl2legacy_MicrophoneInfo(const media::MicrophoneInfoData& aidl) {
diff --git a/media/audioaidlconversion/AidlConversionCppNdk.cpp b/media/audioaidlconversion/AidlConversionCppNdk.cpp
index 6dfcc6b..d0ff091 100644
--- a/media/audioaidlconversion/AidlConversionCppNdk.cpp
+++ b/media/audioaidlconversion/AidlConversionCppNdk.cpp
@@ -69,6 +69,8 @@
using media::audio::common::AudioOffloadInfo;
using media::audio::common::AudioOutputFlags;
using media::audio::common::AudioPlaybackRate;
+using media::audio::common::AudioPort;
+using media::audio::common::AudioPortConfig;
using media::audio::common::AudioPortDeviceExt;
using media::audio::common::AudioPortExt;
using media::audio::common::AudioPortMixExt;
@@ -477,6 +479,15 @@
{
AUDIO_DEVICE_IN_ECHO_REFERENCE, make_AudioDeviceDescription(
AudioDeviceType::IN_ECHO_REFERENCE)
+ },
+ {
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, make_AudioDeviceDescription(
+ AudioDeviceType::IN_SUBMIX)
+ },
+ {
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, make_AudioDeviceDescription(
+ AudioDeviceType::OUT_SUBMIX,
+ GET_DEVICE_DESC_CONNECTION(VIRTUAL))
}
}};
append_AudioDeviceDescription(pairs,
@@ -495,9 +506,6 @@
AudioDeviceType::IN_DEVICE, AudioDeviceType::OUT_DEVICE,
GET_DEVICE_DESC_CONNECTION(HDMI));
append_AudioDeviceDescription(pairs,
- AUDIO_DEVICE_IN_REMOTE_SUBMIX, AUDIO_DEVICE_OUT_REMOTE_SUBMIX,
- AudioDeviceType::IN_SUBMIX, AudioDeviceType::OUT_SUBMIX);
- append_AudioDeviceDescription(pairs,
AUDIO_DEVICE_IN_ANLG_DOCK_HEADSET, AUDIO_DEVICE_OUT_ANLG_DOCK_HEADSET,
AudioDeviceType::IN_DOCK, AudioDeviceType::OUT_DOCK,
GET_DEVICE_DESC_CONNECTION(ANALOG));
@@ -1768,6 +1776,60 @@
return aidl;
}
+ConversionResult<AudioPortDirection> portDirection(audio_port_role_t role, audio_port_type_t type) {
+ switch (type) {
+ case AUDIO_PORT_TYPE_NONE:
+ case AUDIO_PORT_TYPE_SESSION:
+ break; // must be listed -Werror,-Wswitch
+ case AUDIO_PORT_TYPE_DEVICE:
+ switch (role) {
+ case AUDIO_PORT_ROLE_NONE:
+ break; // must be listed -Werror,-Wswitch
+ case AUDIO_PORT_ROLE_SOURCE:
+ return AudioPortDirection::INPUT;
+ case AUDIO_PORT_ROLE_SINK:
+ return AudioPortDirection::OUTPUT;
+ }
+ break;
+ case AUDIO_PORT_TYPE_MIX:
+ switch (role) {
+ case AUDIO_PORT_ROLE_NONE:
+ break; // must be listed -Werror,-Wswitch
+ case AUDIO_PORT_ROLE_SOURCE:
+ return AudioPortDirection::OUTPUT;
+ case AUDIO_PORT_ROLE_SINK:
+ return AudioPortDirection::INPUT;
+ }
+ break;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<audio_port_role_t> portRole(AudioPortDirection direction, audio_port_type_t type) {
+ switch (type) {
+ case AUDIO_PORT_TYPE_NONE:
+ case AUDIO_PORT_TYPE_SESSION:
+ break; // must be listed -Werror,-Wswitch
+ case AUDIO_PORT_TYPE_DEVICE:
+ switch (direction) {
+ case AudioPortDirection::INPUT:
+ return AUDIO_PORT_ROLE_SOURCE;
+ case AudioPortDirection::OUTPUT:
+ return AUDIO_PORT_ROLE_SINK;
+ }
+ break;
+ case AUDIO_PORT_TYPE_MIX:
+ switch (direction) {
+ case AudioPortDirection::OUTPUT:
+ return AUDIO_PORT_ROLE_SOURCE;
+ case AudioPortDirection::INPUT:
+ return AUDIO_PORT_ROLE_SINK;
+ }
+ break;
+ }
+ return unexpected(BAD_VALUE);
+}
+
ConversionResult<audio_config_t>
aidl2legacy_AudioConfig_audio_config_t(const AudioConfig& aidl, bool isInput) {
const audio_config_base_t legacyBase = VALUE_OR_RETURN(
@@ -1911,6 +1973,396 @@
enumToMask_index<int32_t, AudioEncapsulationMetadataType>);
}
+ConversionResult<audio_port_config_mix_ext_usecase>
+aidl2legacy_AudioPortMixExtUseCase_audio_port_config_mix_ext_usecase(
+ const AudioPortMixExtUseCase& aidl, bool isInput) {
+ audio_port_config_mix_ext_usecase legacy{};
+ if (aidl.getTag() != AudioPortMixExtUseCase::Tag::unspecified) {
+ if (!isInput) {
+ legacy.stream = VALUE_OR_RETURN(aidl2legacy_AudioStreamType_audio_stream_type_t(
+ VALUE_OR_RETURN(UNION_GET(aidl, stream))));
+ } else {
+ legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(
+ VALUE_OR_RETURN(UNION_GET(aidl, source))));
+ }
+ }
+ return legacy;
+}
+
+ConversionResult<AudioPortMixExtUseCase>
+legacy2aidl_audio_port_config_mix_ext_usecase_AudioPortMixExtUseCase(
+ const audio_port_config_mix_ext_usecase& legacy, bool isInput) {
+ AudioPortMixExtUseCase aidl;
+ if (!isInput) {
+ UNION_SET(aidl, stream, VALUE_OR_RETURN(
+ legacy2aidl_audio_stream_type_t_AudioStreamType(legacy.stream)));
+ } else {
+ UNION_SET(aidl, source, VALUE_OR_RETURN(
+ legacy2aidl_audio_source_t_AudioSource(legacy.source)));
+ }
+ return aidl;
+}
+
+ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt_audio_port_config_mix_ext(
+ const AudioPortMixExt& aidl, bool isInput) {
+ audio_port_config_mix_ext legacy{};
+ legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
+ legacy.usecase = VALUE_OR_RETURN(
+ aidl2legacy_AudioPortMixExtUseCase_audio_port_config_mix_ext_usecase(
+ aidl.usecase, isInput));
+ return legacy;
+}
+
+ConversionResult<AudioPortMixExt> legacy2aidl_audio_port_config_mix_ext_AudioPortMixExt(
+ const audio_port_config_mix_ext& legacy, bool isInput) {
+ AudioPortMixExt aidl;
+ aidl.handle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+ aidl.usecase = VALUE_OR_RETURN(
+ legacy2aidl_audio_port_config_mix_ext_usecase_AudioPortMixExtUseCase(
+ legacy.usecase, isInput));
+ return aidl;
+}
+
+ConversionResult<audio_port_config_device_ext>
+aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(const AudioPortDeviceExt& aidl) {
+ audio_port_config_device_ext legacy{};
+ RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+ aidl.device, &legacy.type, legacy.address));
+ return legacy;
+}
+
+ConversionResult<AudioPortDeviceExt> legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+ const audio_port_config_device_ext& legacy) {
+ AudioPortDeviceExt aidl;
+ aidl.device = VALUE_OR_RETURN(
+ legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+ return aidl;
+}
+
+// This type is unnamed in the original definition, thus we name it here.
+using audio_port_config_ext = decltype(audio_port_config::ext);
+
+status_t aidl2legacy_AudioPortExt_audio_port_config_ext(
+ const AudioPortExt& aidl, bool isInput,
+ audio_port_config_ext* legacy, audio_port_type_t* type) {
+ switch (aidl.getTag()) {
+ case AudioPortExt::Tag::unspecified:
+ // Just verify that the union is empty.
+ VALUE_OR_RETURN_STATUS(UNION_GET(aidl, unspecified));
+ *legacy = {};
+ *type = AUDIO_PORT_TYPE_NONE;
+ return OK;
+ case AudioPortExt::Tag::device:
+ legacy->device = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+ VALUE_OR_RETURN_STATUS(UNION_GET(aidl, device))));
+ *type = AUDIO_PORT_TYPE_DEVICE;
+ return OK;
+ case AudioPortExt::Tag::mix:
+ legacy->mix = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioPortMixExt_audio_port_config_mix_ext(
+ VALUE_OR_RETURN_STATUS(UNION_GET(aidl, mix)), isInput));
+ *type = AUDIO_PORT_TYPE_MIX;
+ return OK;
+ case AudioPortExt::Tag::session:
+ // This variant is not used in the HAL scenario.
+ legacy->session.session = AUDIO_SESSION_NONE;
+ *type = AUDIO_PORT_TYPE_SESSION;
+ return OK;
+
+ }
+ LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
+}
+
+ConversionResult<AudioPortExt> legacy2aidl_audio_port_config_ext_AudioPortExt(
+ const audio_port_config_ext& legacy, audio_port_type_t type, bool isInput) {
+ AudioPortExt aidl;
+ switch (type) {
+ case AUDIO_PORT_TYPE_NONE:
+ UNION_SET(aidl, unspecified, false);
+ return aidl;
+ case AUDIO_PORT_TYPE_DEVICE: {
+ AudioPortDeviceExt device = VALUE_OR_RETURN(
+ legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(legacy.device));
+ UNION_SET(aidl, device, device);
+ return aidl;
+ }
+ case AUDIO_PORT_TYPE_MIX: {
+ AudioPortMixExt mix = VALUE_OR_RETURN(
+ legacy2aidl_audio_port_config_mix_ext_AudioPortMixExt(legacy.mix, isInput));
+ UNION_SET(aidl, mix, mix);
+ return aidl;
+ }
+ case AUDIO_PORT_TYPE_SESSION:
+ // This variant is not used in the HAL scenario.
+ UNION_SET(aidl, unspecified, false);
+ return aidl;
+ }
+ LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
+}
+
+status_t aidl2legacy_AudioPortConfig_audio_port_config(
+ const AudioPortConfig& aidl, bool isInput, audio_port_config* legacy, int32_t* portId) {
+ legacy->id = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_port_handle_t(aidl.id));
+ *portId = aidl.portId;
+ if (aidl.sampleRate.has_value()) {
+ legacy->sample_rate = VALUE_OR_RETURN_STATUS(
+ convertIntegral<unsigned int>(aidl.sampleRate.value().value));
+ legacy->config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
+ }
+ if (aidl.channelMask.has_value()) {
+ legacy->channel_mask =
+ VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ aidl.channelMask.value(), isInput));
+ legacy->config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
+ }
+ if (aidl.format.has_value()) {
+ legacy->format = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format.value()));
+ legacy->config_mask |= AUDIO_PORT_CONFIG_FORMAT;
+ }
+ if (aidl.gain.has_value()) {
+ legacy->gain = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioGainConfig_audio_gain_config(
+ aidl.gain.value(), isInput));
+ legacy->config_mask |= AUDIO_PORT_CONFIG_GAIN;
+ }
+ if (aidl.flags.has_value()) {
+ legacy->flags = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioIoFlags_audio_io_flags(aidl.flags.value(), isInput));
+ legacy->config_mask |= AUDIO_PORT_CONFIG_FLAGS;
+ }
+ RETURN_STATUS_IF_ERROR(aidl2legacy_AudioPortExt_audio_port_config_ext(
+ aidl.ext, isInput, &legacy->ext, &legacy->type));
+ legacy->role = VALUE_OR_RETURN_STATUS(portRole(isInput ?
+ AudioPortDirection::INPUT : AudioPortDirection::OUTPUT, legacy->type));
+ return OK;
+}
+
+ConversionResult<AudioPortConfig>
+legacy2aidl_audio_port_config_AudioPortConfig(
+ const audio_port_config& legacy, bool isInput, int32_t portId) {
+ AudioPortConfig aidl;
+ aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+ aidl.portId = portId;
+ if (legacy.config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
+ Int aidl_sampleRate;
+ aidl_sampleRate.value = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
+ aidl.sampleRate = aidl_sampleRate;
+ }
+ if (legacy.config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
+ aidl.channelMask = VALUE_OR_RETURN(
+ legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
+ }
+ if (legacy.config_mask & AUDIO_PORT_CONFIG_FORMAT) {
+ aidl.format = VALUE_OR_RETURN(
+ legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
+ }
+ if (legacy.config_mask & AUDIO_PORT_CONFIG_GAIN) {
+ aidl.gain = VALUE_OR_RETURN(
+ legacy2aidl_audio_gain_config_AudioGainConfig(legacy.gain, isInput));
+ }
+ if (legacy.config_mask & AUDIO_PORT_CONFIG_FLAGS) {
+ aidl.flags = VALUE_OR_RETURN(
+ legacy2aidl_audio_io_flags_AudioIoFlags(legacy.flags, isInput));
+ }
+ aidl.ext = VALUE_OR_RETURN(
+ legacy2aidl_audio_port_config_ext_AudioPortExt(legacy.ext, legacy.type, isInput));
+ return aidl;
+}
+
+ConversionResult<audio_port_mix_ext> aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+ const AudioPortMixExt& aidl) {
+ audio_port_mix_ext legacy{};
+ legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
+ return legacy;
+}
+
+ConversionResult<AudioPortMixExt> legacy2aidl_audio_port_mix_ext_AudioPortMixExt(
+ const audio_port_mix_ext& legacy) {
+ AudioPortMixExt aidl;
+ aidl.handle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
+ return aidl;
+}
+
+ConversionResult<audio_port_device_ext>
+aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(const AudioPortDeviceExt& aidl) {
+ audio_port_device_ext legacy{};
+ RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
+ aidl.device, &legacy.type, legacy.address));
+ return legacy;
+}
+
+ConversionResult<AudioPortDeviceExt> legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
+ const audio_port_device_ext& legacy) {
+ AudioPortDeviceExt aidl;
+ aidl.device = VALUE_OR_RETURN(
+ legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
+ return aidl;
+}
+
+// This type is unnamed in the original definition, thus we name it here.
+using audio_port_v7_ext = decltype(audio_port_v7::ext);
+
+status_t aidl2legacy_AudioPortExt_audio_port_v7_ext(
+ const AudioPortExt& aidl, audio_port_v7_ext* legacy, audio_port_type_t* type) {
+ switch (aidl.getTag()) {
+ case AudioPortExt::Tag::unspecified:
+ // Just verify that the union is empty.
+ VALUE_OR_RETURN_STATUS(UNION_GET(aidl, unspecified));
+ *legacy = {};
+ *type = AUDIO_PORT_TYPE_NONE;
+ return OK;
+ case AudioPortExt::Tag::device:
+ legacy->device = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
+ VALUE_OR_RETURN_STATUS(UNION_GET(aidl, device))));
+ *type = AUDIO_PORT_TYPE_DEVICE;
+ return OK;
+ case AudioPortExt::Tag::mix:
+ legacy->mix = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+ VALUE_OR_RETURN_STATUS(UNION_GET(aidl, mix))));
+ *type = AUDIO_PORT_TYPE_MIX;
+ return OK;
+ case AudioPortExt::Tag::session:
+ // This variant is not used in the HAL scenario.
+ legacy->session.session = AUDIO_SESSION_NONE;
+ *type = AUDIO_PORT_TYPE_SESSION;
+ return OK;
+
+ }
+ LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
+}
+
+ConversionResult<AudioPortExt> legacy2aidl_audio_port_v7_ext_AudioPortExt(
+ const audio_port_v7_ext& legacy, audio_port_type_t type) {
+ AudioPortExt aidl;
+ switch (type) {
+ case AUDIO_PORT_TYPE_NONE:
+ UNION_SET(aidl, unspecified, false);
+ return aidl;
+ case AUDIO_PORT_TYPE_DEVICE: {
+ AudioPortDeviceExt device = VALUE_OR_RETURN(
+ legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(legacy.device));
+ UNION_SET(aidl, device, device);
+ return aidl;
+ }
+ case AUDIO_PORT_TYPE_MIX: {
+ AudioPortMixExt mix = VALUE_OR_RETURN(
+ legacy2aidl_audio_port_mix_ext_AudioPortMixExt(legacy.mix));
+ UNION_SET(aidl, mix, mix);
+ return aidl;
+ }
+ case AUDIO_PORT_TYPE_SESSION:
+ // This variant is not used in the HAL scenario.
+ UNION_SET(aidl, unspecified, false);
+ return aidl;
+ }
+ LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
+}
+
+ConversionResult<audio_port_v7>
+aidl2legacy_AudioPort_audio_port_v7(const AudioPort& aidl, bool isInput) {
+ audio_port_v7 legacy;
+ legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.id));
+ RETURN_IF_ERROR(aidl2legacy_string(aidl.name, legacy.name, sizeof(legacy.name)));
+
+ if (aidl.profiles.size() > std::size(legacy.audio_profiles)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(convertRange(
+ aidl.profiles.begin(), aidl.profiles.end(), legacy.audio_profiles,
+ [isInput](const AudioProfile& p) {
+ return aidl2legacy_AudioProfile_audio_profile(p, isInput);
+ }));
+ legacy.num_audio_profiles = aidl.profiles.size();
+
+ if (aidl.extraAudioDescriptors.size() > std::size(legacy.extra_audio_descriptors)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(
+ aidl.extraAudioDescriptors.begin(), aidl.extraAudioDescriptors.end(),
+ legacy.extra_audio_descriptors,
+ aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor));
+ legacy.num_extra_audio_descriptors = aidl.extraAudioDescriptors.size();
+
+ if (aidl.gains.size() > std::size(legacy.gains)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(convertRange(aidl.gains.begin(), aidl.gains.end(), legacy.gains,
+ [isInput](const AudioGain& g) {
+ return aidl2legacy_AudioGain_audio_gain(g, isInput);
+ }));
+ legacy.num_gains = aidl.gains.size();
+
+ RETURN_IF_ERROR(aidl2legacy_AudioPortExt_audio_port_v7_ext(
+ aidl.ext, &legacy.ext, &legacy.type));
+ legacy.role = VALUE_OR_RETURN(portRole(
+ isInput ? AudioPortDirection::INPUT : AudioPortDirection::OUTPUT, legacy.type));
+
+ AudioPortConfig aidlPortConfig;
+ int32_t portId;
+ aidlPortConfig.flags = aidl.flags;
+ aidlPortConfig.ext = aidl.ext;
+ RETURN_IF_ERROR(aidl2legacy_AudioPortConfig_audio_port_config(
+ aidlPortConfig, isInput, &legacy.active_config, &portId));
+ return legacy;
+}
+
+ConversionResult<AudioPort>
+legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy, bool isInput) {
+ AudioPort aidl;
+ aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+ aidl.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
+
+ if (legacy.num_audio_profiles > std::size(legacy.audio_profiles)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(legacy.audio_profiles, legacy.audio_profiles + legacy.num_audio_profiles,
+ std::back_inserter(aidl.profiles),
+ [isInput](const audio_profile& p) {
+ return legacy2aidl_audio_profile_AudioProfile(p, isInput);
+ }));
+
+ if (legacy.num_extra_audio_descriptors > std::size(legacy.extra_audio_descriptors)) {
+ return unexpected(BAD_VALUE);
+ }
+ aidl.profiles.resize(legacy.num_audio_profiles);
+ RETURN_IF_ERROR(
+ convertRange(legacy.extra_audio_descriptors,
+ legacy.extra_audio_descriptors + legacy.num_extra_audio_descriptors,
+ std::back_inserter(aidl.extraAudioDescriptors),
+ legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor));
+
+ if (legacy.num_gains > std::size(legacy.gains)) {
+ return unexpected(BAD_VALUE);
+ }
+ RETURN_IF_ERROR(
+ convertRange(legacy.gains, legacy.gains + legacy.num_gains,
+ std::back_inserter(aidl.gains),
+ [isInput](const audio_gain& g) {
+ return legacy2aidl_audio_gain_AudioGain(g, isInput);
+ }));
+ aidl.gains.resize(legacy.num_gains);
+
+ aidl.ext = VALUE_OR_RETURN(
+ legacy2aidl_audio_port_v7_ext_AudioPortExt(legacy.ext, legacy.type));
+
+ AudioPortConfig aidlPortConfig = VALUE_OR_RETURN(legacy2aidl_audio_port_config_AudioPortConfig(
+ legacy.active_config, isInput, aidl.id));
+ if (aidlPortConfig.flags.has_value()) {
+ aidl.flags = aidlPortConfig.flags.value();
+ } else {
+ aidl.flags = isInput ?
+ AudioIoFlags::make<AudioIoFlags::Tag::input>(0) :
+ AudioIoFlags::make<AudioIoFlags::Tag::output>(0);
+ }
+ return aidl;
+}
+
ConversionResult<audio_profile>
aidl2legacy_AudioProfile_audio_profile(const AudioProfile& aidl, bool isInput) {
audio_profile legacy;
diff --git a/media/audioaidlconversion/AidlConversionEffect.cpp b/media/audioaidlconversion/AidlConversionEffect.cpp
new file mode 100644
index 0000000..ad27c64
--- /dev/null
+++ b/media/audioaidlconversion/AidlConversionEffect.cpp
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <utility>
+
+#define LOG_TAG "AidlConversionEffect"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <media/AidlConversionCppNdk.h>
+#include <media/AidlConversionEffect.h>
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// AIDL NDK backend to legacy audio data structure conversion utilities.
+
+namespace aidl {
+namespace android {
+
+using ::aidl::android::hardware::audio::effect::AcousticEchoCanceler;
+using ::aidl::android::hardware::audio::effect::AutomaticGainControl;
+using ::aidl::android::hardware::audio::effect::BassBoost;
+using ::aidl::android::hardware::audio::effect::Descriptor;
+using ::aidl::android::hardware::audio::effect::Downmix;
+using ::aidl::android::hardware::audio::effect::DynamicsProcessing;
+using ::aidl::android::hardware::audio::effect::Flags;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::PresetReverb;
+using ::aidl::android::media::audio::common::AudioDeviceDescription;
+
+using ::android::BAD_VALUE;
+using ::android::base::unexpected;
+
+////////////////////////////////////////////////////////////////////////////////////////////////////
+// Converters
+
+ConversionResult<uint32_t> aidl2legacy_Flags_Type_uint32(Flags::Type type) {
+ switch (type) {
+ case Flags::Type::INSERT:
+ return EFFECT_FLAG_TYPE_INSERT;
+ case Flags::Type::AUXILIARY:
+ return EFFECT_FLAG_TYPE_AUXILIARY;
+ case Flags::Type::REPLACE:
+ return EFFECT_FLAG_TYPE_REPLACE;
+ case Flags::Type::PRE_PROC:
+ return EFFECT_FLAG_TYPE_PRE_PROC;
+ case Flags::Type::POST_PROC:
+ return EFFECT_FLAG_TYPE_POST_PROC;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<Flags::Type> legacy2aidl_uint32_Flags_Type(uint32_t legacy) {
+ switch (legacy & EFFECT_FLAG_TYPE_MASK) {
+ case EFFECT_FLAG_TYPE_INSERT:
+ return Flags::Type::INSERT;
+ case EFFECT_FLAG_TYPE_AUXILIARY:
+ return Flags::Type::AUXILIARY;
+ case EFFECT_FLAG_TYPE_REPLACE:
+ return Flags::Type::REPLACE;
+ case EFFECT_FLAG_TYPE_PRE_PROC:
+ return Flags::Type::PRE_PROC;
+ case EFFECT_FLAG_TYPE_POST_PROC:
+ return Flags::Type::POST_PROC;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<uint32_t> aidl2legacy_Flags_Insert_uint32(Flags::Insert insert) {
+ switch (insert) {
+ case Flags::Insert::ANY:
+ return EFFECT_FLAG_INSERT_ANY;
+ case Flags::Insert::FIRST:
+ return EFFECT_FLAG_INSERT_FIRST;
+ case Flags::Insert::LAST:
+ return EFFECT_FLAG_INSERT_LAST;
+ case Flags::Insert::EXCLUSIVE:
+ return EFFECT_FLAG_INSERT_EXCLUSIVE;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<Flags::Insert> legacy2aidl_uint32_Flags_Insert(uint32_t legacy) {
+ switch (legacy & EFFECT_FLAG_INSERT_MASK) {
+ case EFFECT_FLAG_INSERT_ANY:
+ return Flags::Insert::ANY;
+ case EFFECT_FLAG_INSERT_FIRST:
+ return Flags::Insert::FIRST;
+ case EFFECT_FLAG_INSERT_LAST:
+ return Flags::Insert::LAST;
+ case EFFECT_FLAG_INSERT_EXCLUSIVE:
+ return Flags::Insert::EXCLUSIVE;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<uint32_t> aidl2legacy_Flags_Volume_uint32(Flags::Volume volume) {
+ switch (volume) {
+ case Flags::Volume::NONE:
+ return 0;
+ case Flags::Volume::CTRL:
+ return EFFECT_FLAG_VOLUME_CTRL;
+ case Flags::Volume::IND:
+ return EFFECT_FLAG_VOLUME_IND;
+ case Flags::Volume::MONITOR:
+ return EFFECT_FLAG_VOLUME_MONITOR;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<Flags::Volume> legacy2aidl_uint32_Flags_Volume(uint32_t legacy) {
+ switch (legacy & EFFECT_FLAG_VOLUME_MASK) {
+ case EFFECT_FLAG_VOLUME_CTRL:
+ return Flags::Volume::CTRL;
+ case EFFECT_FLAG_VOLUME_IND:
+ return Flags::Volume::IND;
+ case EFFECT_FLAG_VOLUME_MONITOR:
+ return Flags::Volume::MONITOR;
+ case EFFECT_FLAG_VOLUME_NONE:
+ return Flags::Volume::NONE;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<uint32_t> aidl2legacy_Flags_uint32(Flags aidl) {
+ uint32_t legacy = 0;
+ legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_Type_uint32(aidl.type));
+ legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_Insert_uint32(aidl.insert));
+ legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_Volume_uint32(aidl.volume));
+ legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_HardwareAccelerator_uint32(aidl.hwAcceleratorMode));
+
+ if (aidl.offloadIndication) {
+ legacy |= EFFECT_FLAG_OFFLOAD_SUPPORTED;
+ }
+ if (aidl.deviceIndication) {
+ legacy |= EFFECT_FLAG_DEVICE_IND;
+ }
+ if (aidl.audioModeIndication) {
+ legacy |= EFFECT_FLAG_AUDIO_MODE_IND;
+ }
+ if (aidl.audioSourceIndication) {
+ legacy |= EFFECT_FLAG_AUDIO_SOURCE_IND;
+ }
+ if (aidl.bypass) {
+ legacy |= EFFECT_FLAG_NO_PROCESS;
+ }
+ return legacy;
+}
+
+ConversionResult<Flags> legacy2aidl_uint32_Flags(uint32_t legacy) {
+ Flags aidl;
+
+ aidl.type = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_Type(legacy));
+ aidl.insert = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_Insert(legacy));
+ aidl.volume = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_Volume(legacy));
+ aidl.hwAcceleratorMode = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_HardwareAccelerator(legacy));
+ aidl.offloadIndication = (legacy & EFFECT_FLAG_OFFLOAD_SUPPORTED);
+ aidl.deviceIndication = (legacy & EFFECT_FLAG_DEVICE_IND);
+ aidl.audioModeIndication = (legacy & EFFECT_FLAG_AUDIO_MODE_IND);
+ aidl.audioSourceIndication = (legacy & EFFECT_FLAG_AUDIO_SOURCE_IND);
+ aidl.bypass = (legacy & EFFECT_FLAG_NO_PROCESS);
+ return aidl;
+}
+
+ConversionResult<uint32_t> aidl2legacy_Flags_HardwareAccelerator_uint32(
+ Flags::HardwareAccelerator hwAcceleratorMode) {
+ switch (hwAcceleratorMode) {
+ case Flags::HardwareAccelerator::NONE:
+ return 0;
+ case Flags::HardwareAccelerator::SIMPLE:
+ return EFFECT_FLAG_HW_ACC_SIMPLE;
+ case Flags::HardwareAccelerator::TUNNEL:
+ return EFFECT_FLAG_HW_ACC_TUNNEL;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<Flags::HardwareAccelerator> legacy2aidl_uint32_Flags_HardwareAccelerator(
+ uint32_t legacy) {
+ switch (legacy & EFFECT_FLAG_HW_ACC_MASK) {
+ case EFFECT_FLAG_HW_ACC_SIMPLE:
+ return Flags::HardwareAccelerator::SIMPLE;
+ case EFFECT_FLAG_HW_ACC_TUNNEL:
+ return Flags::HardwareAccelerator::TUNNEL;
+ case 0:
+ return Flags::HardwareAccelerator::NONE;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<effect_descriptor_t>
+aidl2legacy_Descriptor_effect_descriptor(const Descriptor& aidl) {
+ effect_descriptor_t legacy;
+ legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioUuid_audio_uuid_t(aidl.common.id.type));
+ legacy.uuid = VALUE_OR_RETURN(aidl2legacy_AudioUuid_audio_uuid_t(aidl.common.id.uuid));
+ // legacy descriptor doesn't have proxy information
+ // proxy = VALUE_OR_RETURN(aidl2legacy_AudioUuid_audio_uuid_t(aidl.proxy));
+ legacy.apiVersion = EFFECT_CONTROL_API_VERSION;
+ legacy.flags = VALUE_OR_RETURN(aidl2legacy_Flags_uint32(aidl.common.flags));
+ legacy.cpuLoad = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.common.cpuLoad));
+ legacy.memoryUsage = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.common.memoryUsage));
+ RETURN_IF_ERROR(aidl2legacy_string(aidl.common.name, legacy.name, sizeof(legacy.name)));
+ RETURN_IF_ERROR(aidl2legacy_string(aidl.common.implementor, legacy.implementor,
+ sizeof(legacy.implementor)));
+ return legacy;
+}
+
+ConversionResult<Descriptor>
+legacy2aidl_effect_descriptor_Descriptor(const effect_descriptor_t& legacy) {
+ Descriptor aidl;
+ aidl.common.id.type = VALUE_OR_RETURN(legacy2aidl_audio_uuid_t_AudioUuid(legacy.type));
+ aidl.common.id.uuid = VALUE_OR_RETURN(legacy2aidl_audio_uuid_t_AudioUuid(legacy.uuid));
+ // legacy descriptor doesn't have proxy information
+ // aidl.common.id.proxy
+ aidl.common.flags = VALUE_OR_RETURN(legacy2aidl_uint32_Flags(legacy.flags));
+ aidl.common.cpuLoad = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.cpuLoad));
+ aidl.common.memoryUsage = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.memoryUsage));
+ aidl.common.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
+ aidl.common.implementor =
+ VALUE_OR_RETURN(legacy2aidl_string(legacy.implementor, sizeof(legacy.implementor)));
+ return aidl;
+}
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_aec_uint32_echoDelay(const Parameter& aidl) {
+ int echoDelay = VALUE_OR_RETURN(
+ GET_PARAMETER_SPECIFIC_FIELD(aidl, AcousticEchoCanceler, acousticEchoCanceler,
+ AcousticEchoCanceler::echoDelayUs, int));
+ return VALUE_OR_RETURN(convertReinterpret<uint32_t>(echoDelay));
+}
+
+ConversionResult<Parameter> legacy2aidl_uint32_echoDelay_Parameter_aec(uint32_t legacy) {
+ int delay = VALUE_OR_RETURN(convertReinterpret<int32_t>(legacy));
+ return MAKE_SPECIFIC_PARAMETER(AcousticEchoCanceler, acousticEchoCanceler, echoDelayUs, delay);
+}
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_aec_uint32_mobileMode(const Parameter& aidl) {
+ bool mobileMode = VALUE_OR_RETURN(
+ GET_PARAMETER_SPECIFIC_FIELD(aidl, AcousticEchoCanceler, acousticEchoCanceler,
+ AcousticEchoCanceler::mobileMode, bool));
+ return VALUE_OR_RETURN(convertIntegral<uint32_t>(mobileMode));
+}
+
+ConversionResult<Parameter> legacy2aidl_uint32_mobileMode_Parameter_aec(uint32_t legacy) {
+ bool mode = VALUE_OR_RETURN(convertIntegral<bool>(legacy));
+ return MAKE_SPECIFIC_PARAMETER(AcousticEchoCanceler, acousticEchoCanceler, mobileMode, mode);
+}
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_agc_uint32_fixedDigitalGain(
+ const Parameter& aidl) {
+ int gain = VALUE_OR_RETURN(
+ GET_PARAMETER_SPECIFIC_FIELD(aidl, AutomaticGainControl, automaticGainControl,
+ AutomaticGainControl::fixedDigitalGainMb, int));
+ return VALUE_OR_RETURN(convertReinterpret<uint32_t>(gain));
+}
+
+ConversionResult<Parameter> legacy2aidl_uint32_fixedDigitalGain_Parameter_agc(uint32_t legacy) {
+ int gain = VALUE_OR_RETURN(convertReinterpret<int>(legacy));
+ return MAKE_SPECIFIC_PARAMETER(AutomaticGainControl, automaticGainControl, fixedDigitalGainMb,
+ gain);
+}
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_agc_uint32_levelEstimator(
+ const Parameter& aidl) {
+ const auto& le = VALUE_OR_RETURN(GET_PARAMETER_SPECIFIC_FIELD(
+ aidl, AutomaticGainControl, automaticGainControl, AutomaticGainControl::levelEstimator,
+ AutomaticGainControl::LevelEstimator));
+ return static_cast<uint32_t>(le);
+}
+
+ConversionResult<Parameter> legacy2aidl_uint32_levelEstimator_Parameter_agc(uint32_t legacy) {
+ if (legacy > (uint32_t) AutomaticGainControl::LevelEstimator::PEAK) {
+ return unexpected(BAD_VALUE);
+ }
+ AutomaticGainControl::LevelEstimator le =
+ static_cast<AutomaticGainControl::LevelEstimator>(legacy);
+ return MAKE_SPECIFIC_PARAMETER(AutomaticGainControl, automaticGainControl, levelEstimator, le);
+}
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_agc_uint32_saturationMargin(
+ const Parameter& aidl) {
+ int saturationMargin = VALUE_OR_RETURN(
+ GET_PARAMETER_SPECIFIC_FIELD(aidl, AutomaticGainControl, automaticGainControl,
+ AutomaticGainControl::saturationMarginMb, int));
+ return VALUE_OR_RETURN(convertIntegral<uint32_t>(saturationMargin));
+}
+
+ConversionResult<Parameter> legacy2aidl_uint32_saturationMargin_Parameter_agc(uint32_t legacy) {
+ int saturationMargin = VALUE_OR_RETURN(convertIntegral<int>(legacy));
+ return MAKE_SPECIFIC_PARAMETER(AutomaticGainControl, automaticGainControl, saturationMarginMb,
+ saturationMargin);
+}
+
+ConversionResult<uint16_t> aidl2legacy_Parameter_BassBoost_uint16_strengthPm(
+ const Parameter& aidl) {
+ int strength = VALUE_OR_RETURN(
+ GET_PARAMETER_SPECIFIC_FIELD(aidl, BassBoost, bassBoost, BassBoost::strengthPm, int));
+ return VALUE_OR_RETURN(convertIntegral<uint16_t>(strength));
+}
+
+ConversionResult<Parameter> legacy2aidl_uint16_strengthPm_Parameter_BassBoost(uint16_t legacy) {
+ int strength = VALUE_OR_RETURN(convertIntegral<int>(legacy));
+ return MAKE_SPECIFIC_PARAMETER(BassBoost, bassBoost, strengthPm, strength);
+}
+
+ConversionResult<int16_t> aidl2legacy_Parameter_Downmix_int16_type(const Parameter& aidl) {
+ Downmix::Type aidlType = VALUE_OR_RETURN(
+ GET_PARAMETER_SPECIFIC_FIELD(aidl, Downmix, downmix, Downmix::type, Downmix::Type));
+ return VALUE_OR_RETURN(convertIntegral<int16_t>(static_cast<uint32_t>(aidlType)));
+}
+
+ConversionResult<Parameter> legacy2aidl_int16_type_Parameter_Downmix(int16_t legacy) {
+ if (legacy > (uint32_t) Downmix::Type::FOLD) {
+ return unexpected(BAD_VALUE);
+ }
+ Downmix::Type aidlType = static_cast<Downmix::Type>(legacy);
+ return MAKE_SPECIFIC_PARAMETER(Downmix, downmix, type, aidlType);
+}
+
+ConversionResult<int16_t> aidl2legacy_DynamicsProcessing_ResolutionPreference_uint32_(
+ const Parameter& aidl) {
+ Downmix::Type aidlType = VALUE_OR_RETURN(
+ GET_PARAMETER_SPECIFIC_FIELD(aidl, Downmix, downmix, Downmix::type, Downmix::Type));
+ return VALUE_OR_RETURN(convertIntegral<int16_t>(static_cast<uint32_t>(aidlType)));
+}
+
+ConversionResult<DynamicsProcessing::ResolutionPreference>
+legacy2aidl_int32_DynamicsProcessing_ResolutionPreference(int32_t legacy) {
+ if (legacy > (int32_t)DynamicsProcessing::ResolutionPreference::FAVOR_TIME_RESOLUTION) {
+ return unexpected(BAD_VALUE);
+ }
+ return static_cast<DynamicsProcessing::ResolutionPreference>(legacy);
+}
+
+ConversionResult<int32_t> aidl2legacy_DynamicsProcessing_ResolutionPreference_int32(
+ DynamicsProcessing::ResolutionPreference aidl) {
+ return static_cast<int32_t>(aidl);
+}
+
+} // namespace android
+} // aidl
diff --git a/media/audioaidlconversion/AidlConversionNdk.cpp b/media/audioaidlconversion/AidlConversionNdk.cpp
index a3e39c7..7c63339 100644
--- a/media/audioaidlconversion/AidlConversionNdk.cpp
+++ b/media/audioaidlconversion/AidlConversionNdk.cpp
@@ -29,197 +29,7 @@
namespace aidl {
namespace android {
-using ::aidl::android::hardware::audio::effect::Descriptor;
-using ::aidl::android::hardware::audio::effect::Flags;
-
-using ::android::BAD_VALUE;
-using ::android::base::unexpected;
-
-////////////////////////////////////////////////////////////////////////////////////////////////////
-// Converters
-
-ConversionResult<uint32_t> aidl2legacy_Flags_Type_uint32(Flags::Type type) {
- switch (type) {
- case Flags::Type::INSERT:
- return EFFECT_FLAG_TYPE_INSERT;
- case Flags::Type::AUXILIARY:
- return EFFECT_FLAG_TYPE_AUXILIARY;
- case Flags::Type::REPLACE:
- return EFFECT_FLAG_TYPE_REPLACE;
- case Flags::Type::PRE_PROC:
- return EFFECT_FLAG_TYPE_PRE_PROC;
- case Flags::Type::POST_PROC:
- return EFFECT_FLAG_TYPE_POST_PROC;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<uint32_t> aidl2legacy_Flags_Insert_uint32(Flags::Insert insert) {
- switch (insert) {
- case Flags::Insert::ANY:
- return EFFECT_FLAG_INSERT_ANY;
- case Flags::Insert::FIRST:
- return EFFECT_FLAG_INSERT_FIRST;
- case Flags::Insert::LAST:
- return EFFECT_FLAG_INSERT_LAST;
- case Flags::Insert::EXCLUSIVE:
- return EFFECT_FLAG_INSERT_EXCLUSIVE;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<uint32_t> aidl2legacy_Flags_Volume_uint32(Flags::Volume volume) {
- switch (volume) {
- case Flags::Volume::NONE:
- return 0;
- case Flags::Volume::CTRL:
- return EFFECT_FLAG_VOLUME_CTRL;
- case Flags::Volume::IND:
- return EFFECT_FLAG_VOLUME_IND;
- case Flags::Volume::MONITOR:
- return EFFECT_FLAG_VOLUME_MONITOR;
- }
- return unexpected(BAD_VALUE);
-}
-ConversionResult<uint32_t> aidl2legacy_Flags_HardwareAccelerator_uint32(
- Flags::HardwareAccelerator hwAcceleratorMode) {
- switch (hwAcceleratorMode) {
- case Flags::HardwareAccelerator::NONE:
- return 0;
- case Flags::HardwareAccelerator::SIMPLE:
- return EFFECT_FLAG_HW_ACC_SIMPLE;
- case Flags::HardwareAccelerator::TUNNEL:
- return EFFECT_FLAG_HW_ACC_TUNNEL;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<uint32_t> aidl2legacy_Flags_uint32(Flags aidl) {
- uint32_t legacy = 0;
- legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_Type_uint32(aidl.type));
- legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_Insert_uint32(aidl.insert));
- legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_Volume_uint32(aidl.volume));
- legacy |= VALUE_OR_RETURN(aidl2legacy_Flags_HardwareAccelerator_uint32(aidl.hwAcceleratorMode));
-
- if (aidl.offloadIndication) {
- legacy |= EFFECT_FLAG_OFFLOAD_SUPPORTED;
- }
- if (aidl.deviceIndication) {
- legacy |= EFFECT_FLAG_DEVICE_IND;
- }
- if (aidl.audioModeIndication) {
- legacy |= EFFECT_FLAG_AUDIO_MODE_IND;
- }
- if (aidl.audioSourceIndication) {
- legacy |= EFFECT_FLAG_AUDIO_SOURCE_IND;
- }
- if (aidl.noProcessing) {
- legacy |= EFFECT_FLAG_NO_PROCESS;
- }
- return legacy;
-}
-
-ConversionResult<Flags::Type> legacy2aidl_uint32_Flags_Type(uint32_t legacy) {
- switch (legacy & EFFECT_FLAG_TYPE_MASK) {
- case EFFECT_FLAG_TYPE_INSERT:
- return Flags::Type::INSERT;
- case EFFECT_FLAG_TYPE_AUXILIARY:
- return Flags::Type::AUXILIARY;
- case EFFECT_FLAG_TYPE_REPLACE:
- return Flags::Type::REPLACE;
- case EFFECT_FLAG_TYPE_PRE_PROC:
- return Flags::Type::PRE_PROC;
- case EFFECT_FLAG_TYPE_POST_PROC:
- return Flags::Type::POST_PROC;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<Flags::Insert> legacy2aidl_uint32_Flags_Insert(uint32_t legacy) {
- switch (legacy & EFFECT_FLAG_INSERT_MASK) {
- case EFFECT_FLAG_INSERT_ANY:
- return Flags::Insert::ANY;
- case EFFECT_FLAG_INSERT_FIRST:
- return Flags::Insert::FIRST;
- case EFFECT_FLAG_INSERT_LAST:
- return Flags::Insert::LAST;
- case EFFECT_FLAG_INSERT_EXCLUSIVE:
- return Flags::Insert::EXCLUSIVE;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<Flags::Volume> legacy2aidl_uint32_Flags_Volume(uint32_t legacy) {
- switch (legacy & EFFECT_FLAG_VOLUME_MASK) {
- case EFFECT_FLAG_VOLUME_IND:
- return Flags::Volume::IND;
- case EFFECT_FLAG_VOLUME_MONITOR:
- return Flags::Volume::MONITOR;
- case EFFECT_FLAG_VOLUME_NONE:
- return Flags::Volume::NONE;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<Flags::HardwareAccelerator> legacy2aidl_uint32_Flags_HardwareAccelerator(
- uint32_t legacy) {
- switch (legacy & EFFECT_FLAG_HW_ACC_MASK) {
- case EFFECT_FLAG_HW_ACC_SIMPLE:
- return Flags::HardwareAccelerator::SIMPLE;
- case EFFECT_FLAG_HW_ACC_TUNNEL:
- return Flags::HardwareAccelerator::TUNNEL;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<Flags> legacy2aidl_uint32_Flags(uint32_t legacy) {
- Flags aidl;
-
- aidl.type = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_Type(legacy));
- aidl.insert = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_Insert(legacy));
- aidl.volume = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_Volume(legacy));
- aidl.hwAcceleratorMode = VALUE_OR_RETURN(legacy2aidl_uint32_Flags_HardwareAccelerator(legacy));
- aidl.offloadIndication = (legacy & EFFECT_FLAG_OFFLOAD_SUPPORTED);
- aidl.deviceIndication = (legacy & EFFECT_FLAG_DEVICE_IND);
- aidl.audioModeIndication = (legacy & EFFECT_FLAG_AUDIO_MODE_IND);
- aidl.audioSourceIndication = (legacy & EFFECT_FLAG_AUDIO_SOURCE_IND);
- aidl.noProcessing = (legacy & EFFECT_FLAG_NO_PROCESS);
- return aidl;
-}
-
-ConversionResult<effect_descriptor_t>
-aidl2legacy_Descriptor_effect_descriptor(const Descriptor& aidl) {
- effect_descriptor_t legacy;
- legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioUuid_audio_uuid_t(aidl.common.id.type));
- legacy.uuid = VALUE_OR_RETURN(aidl2legacy_AudioUuid_audio_uuid_t(aidl.common.id.uuid));
- // legacy descriptor doesn't have proxy information
- // proxy = VALUE_OR_RETURN(aidl2legacy_AudioUuid_audio_uuid_t(aidl.proxy));
- legacy.apiVersion = EFFECT_CONTROL_API_VERSION;
- legacy.flags = VALUE_OR_RETURN(aidl2legacy_Flags_uint32(aidl.common.flags));
- legacy.cpuLoad = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.common.cpuLoad));
- legacy.memoryUsage = VALUE_OR_RETURN(convertIntegral<uint16_t>(aidl.common.memoryUsage));
- RETURN_IF_ERROR(aidl2legacy_string(aidl.common.name, legacy.name, sizeof(legacy.name)));
- RETURN_IF_ERROR(aidl2legacy_string(aidl.common.implementor, legacy.implementor,
- sizeof(legacy.implementor)));
- return legacy;
-}
-
-ConversionResult<Descriptor>
-legacy2aidl_effect_descriptor_Descriptor(const effect_descriptor_t& legacy) {
- Descriptor aidl;
- aidl.common.id.type = VALUE_OR_RETURN(legacy2aidl_audio_uuid_t_AudioUuid(legacy.type));
- aidl.common.id.uuid = VALUE_OR_RETURN(legacy2aidl_audio_uuid_t_AudioUuid(legacy.uuid));
- // legacy descriptor doesn't have proxy information
- // aidl.common.id.proxy
- aidl.common.flags = VALUE_OR_RETURN(legacy2aidl_uint32_Flags(legacy.flags));
- aidl.common.cpuLoad = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.cpuLoad));
- aidl.common.memoryUsage = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.memoryUsage));
- aidl.common.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
- aidl.common.implementor =
- VALUE_OR_RETURN(legacy2aidl_string(legacy.implementor, sizeof(legacy.implementor)));
- return aidl;
-}
-
+// buffer_provider_t is not supported thus skipped
ConversionResult<buffer_config_t> aidl2legacy_AudioConfigBase_buffer_config_t(
const media::audio::common::AudioConfigBase& aidl, bool isInput) {
buffer_config_t legacy;
@@ -234,11 +44,12 @@
legacy.format = VALUE_OR_RETURN(aidl2legacy_AudioFormatDescription_audio_format_t(aidl.format));
legacy.mask |= EFFECT_CONFIG_FORMAT;
+ // TODO: add accessMode and mask
return legacy;
}
ConversionResult<media::audio::common::AudioConfigBase>
-legacy2aidl_AudioConfigBase_buffer_config_t(const buffer_config_t& legacy, bool isInput) {
+legacy2aidl_buffer_config_t_AudioConfigBase(const buffer_config_t& legacy, bool isInput) {
media::audio::common::AudioConfigBase aidl;
if (legacy.mask & EFFECT_CONFIG_SMP_RATE) {
@@ -252,6 +63,8 @@
aidl.format = VALUE_OR_RETURN(legacy2aidl_audio_format_t_AudioFormatDescription(
static_cast<audio_format_t>(legacy.format)));
}
+
+ // TODO: add accessMode and mask
return aidl;
}
diff --git a/media/audioaidlconversion/Android.bp b/media/audioaidlconversion/Android.bp
index 86f455e..c0024ef 100644
--- a/media/audioaidlconversion/Android.bp
+++ b/media/audioaidlconversion/Android.bp
@@ -135,11 +135,40 @@
],
defaults: [
"audio_aidl_conversion_common_default",
+ "latest_android_media_audio_common_types_ndk_shared",
+ ],
+ shared_libs: [
+ "libbinder_ndk",
+ "libbase",
+ ],
+ cflags: [
+ "-DBACKEND_NDK",
+ ],
+ min_sdk_version: "31", //AParcelableHolder has been introduced in 31
+}
+
+/**
+ * Only including AIDL effect HAL conversion.
+ */
+cc_library {
+ name: "libaudio_aidl_conversion_effect_ndk",
+ srcs: [
+ "AidlConversionEffect.cpp",
+ ],
+ header_libs: [
+ "libaudio_aidl_conversion_common_util_ndk",
+ ],
+ export_header_lib_headers: [
+ "libaudio_aidl_conversion_common_util_ndk",
+ ],
+ defaults: [
+ "audio_aidl_conversion_common_default",
"latest_android_hardware_audio_common_ndk_shared",
"latest_android_hardware_audio_effect_ndk_shared",
"latest_android_media_audio_common_types_ndk_shared",
],
shared_libs: [
+ "libaudio_aidl_conversion_common_ndk",
"libbinder_ndk",
"libbase",
],
diff --git a/media/audioaidlconversion/include/media/AidlConversionCppNdk.h b/media/audioaidlconversion/include/media/AidlConversionCppNdk.h
index c25ddb1..c412238 100644
--- a/media/audioaidlconversion/include/media/AidlConversionCppNdk.h
+++ b/media/audioaidlconversion/include/media/AidlConversionCppNdk.h
@@ -49,6 +49,8 @@
#include PREFIX(android/media/audio/common/AudioMode.h)
#include PREFIX(android/media/audio/common/AudioOffloadInfo.h)
#include PREFIX(android/media/audio/common/AudioOutputFlags.h)
+#include PREFIX(android/media/audio/common/AudioPort.h)
+#include PREFIX(android/media/audio/common/AudioPortConfig.h)
#include PREFIX(android/media/audio/common/AudioPortExt.h)
#include PREFIX(android/media/audio/common/AudioPortMixExt.h)
#include PREFIX(android/media/audio/common/AudioPlaybackRate.h)
@@ -67,6 +69,7 @@
using ::android::String16;
using ::android::String8;
+using ::android::status_t;
#if defined(BACKEND_NDK)
namespace aidl {
@@ -76,7 +79,7 @@
// maxSize is the size of the C-string buffer (including the 0-terminator), NOT the max length of
// the string.
-::android::status_t aidl2legacy_string(std::string_view aidl, char* dest, size_t maxSize);
+status_t aidl2legacy_string(std::string_view aidl, char* dest, size_t maxSize);
ConversionResult<std::string> legacy2aidl_string(const char* legacy, size_t maxSize);
ConversionResult<audio_module_handle_t> aidl2legacy_int32_t_audio_module_handle_t(int32_t aidl);
@@ -122,6 +125,12 @@
ConversionResult<media::audio::common::AudioChannelLayout>
legacy2aidl_audio_channel_mask_t_AudioChannelLayout(audio_channel_mask_t legacy, bool isInput);
+enum class AudioPortDirection {
+ INPUT, OUTPUT
+};
+ConversionResult<AudioPortDirection> portDirection(audio_port_role_t role, audio_port_type_t type);
+ConversionResult<audio_port_role_t> portRole(AudioPortDirection direction, audio_port_type_t type);
+
ConversionResult<audio_config_t>
aidl2legacy_AudioConfig_audio_config_t(const media::audio::common::AudioConfig& aidl, bool isInput);
ConversionResult<media::audio::common::AudioConfig>
@@ -172,13 +181,13 @@
ConversionResult<media::audio::common::AudioDeviceDescription>
legacy2aidl_audio_devices_t_AudioDeviceDescription(audio_devices_t legacy);
-::android::status_t aidl2legacy_AudioDevice_audio_device(
+status_t aidl2legacy_AudioDevice_audio_device(
const media::audio::common::AudioDevice& aidl, audio_devices_t* legacyType,
char* legacyAddress);
-::android::status_t aidl2legacy_AudioDevice_audio_device(
+status_t aidl2legacy_AudioDevice_audio_device(
const media::audio::common::AudioDevice& aidl, audio_devices_t* legacyType,
String8* legacyAddress);
-::android::status_t aidl2legacy_AudioDevice_audio_device(
+status_t aidl2legacy_AudioDevice_audio_device(
const media::audio::common::AudioDevice& aidl, audio_devices_t* legacyType,
std::string* legacyAddress);
@@ -265,6 +274,48 @@
ConversionResult<media::audio::common::AudioOutputFlags>
legacy2aidl_audio_output_flags_t_AudioOutputFlags(audio_output_flags_t legacy);
+// This type is unnamed in the original definition, thus we name it here.
+using audio_port_config_mix_ext_usecase = decltype(audio_port_config_mix_ext::usecase);
+ConversionResult<audio_port_config_mix_ext_usecase>
+aidl2legacy_AudioPortMixExtUseCase_audio_port_config_mix_ext_usecase(
+ const media::audio::common::AudioPortMixExtUseCase& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioPortMixExtUseCase>
+legacy2aidl_audio_port_config_mix_ext_usecase_AudioPortMixExtUseCase(
+ const audio_port_config_mix_ext_usecase& legacy, bool isInput);
+
+ConversionResult<audio_port_config_device_ext>
+aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
+ const media::audio::common::AudioPortDeviceExt& aidl);
+ConversionResult<media::audio::common::AudioPortDeviceExt>
+ legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
+ const audio_port_config_device_ext& legacy);
+
+status_t aidl2legacy_AudioPortConfig_audio_port_config(
+ const media::audio::common::AudioPortConfig& aidl, bool isInput,
+ audio_port_config* legacy, int32_t* portId);
+ConversionResult<media::audio::common::AudioPortConfig>
+legacy2aidl_audio_port_config_AudioPortConfig(
+ const audio_port_config& legacy, bool isInput, int32_t portId);
+
+ConversionResult<audio_port_mix_ext> aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
+ const media::audio::common::AudioPortMixExt& aidl);
+ConversionResult<media::audio::common::AudioPortMixExt>
+legacy2aidl_audio_port_mix_ext_AudioPortMixExt(
+ const audio_port_mix_ext& legacy);
+
+ConversionResult<audio_port_device_ext>
+aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
+ const media::audio::common::AudioPortDeviceExt& aidl);
+ConversionResult<media::audio::common::AudioPortDeviceExt>
+legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
+ const audio_port_device_ext& legacy);
+
+ConversionResult<audio_port_v7>
+aidl2legacy_AudioPort_audio_port_v7(
+ const media::audio::common::AudioPort& aidl, bool isInput);
+ConversionResult<media::audio::common::AudioPort>
+legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy, bool isInput);
+
ConversionResult<audio_profile> aidl2legacy_AudioProfile_audio_profile(
const media::audio::common::AudioProfile& aidl, bool isInput);
ConversionResult<media::audio::common::AudioProfile> legacy2aidl_audio_profile_AudioProfile(
diff --git a/media/audioaidlconversion/include/media/AidlConversionEffect.h b/media/audioaidlconversion/include/media/AidlConversionEffect.h
new file mode 100644
index 0000000..83aa614
--- /dev/null
+++ b/media/audioaidlconversion/include/media/AidlConversionEffect.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android/binder_auto_utils.h>
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+
+/**
+ * Can only handle conversion between AIDL (NDK backend) and legacy type.
+ */
+#include <hardware/audio_effect.h>
+#include <media/AidlConversionUtil.h>
+#include <system/audio_effect.h>
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+
+namespace aidl {
+namespace android {
+
+template <typename P, typename T, typename P::Specific::Tag tag>
+ConversionResult<T> getParameterSpecific(const P& u) {
+ const auto& spec = VALUE_OR_RETURN(UNION_GET(u, specific));
+ return unionGetField<typename P::Specific, tag>(spec);
+}
+
+template <typename P, typename T, typename P::Specific::Tag tag, typename T::Tag field, typename F>
+ConversionResult<F> getParameterSpecificField(const P& u) {
+ const auto& spec =
+ VALUE_OR_RETURN((getParameterSpecific<std::decay_t<decltype(u)>, T, tag>(u)));
+ return VALUE_OR_RETURN((unionGetField<T, field>(spec)));
+}
+
+#define GET_PARAMETER_SPECIFIC_FIELD(u, specific, tag, field, fieldType) \
+ getParameterSpecificField<std::decay_t<decltype(u)>, specific, \
+ aidl::android::hardware::audio::effect::Parameter::Specific::tag, \
+ specific::field, fieldType>(u)
+
+#define MAKE_SPECIFIC_PARAMETER(spec, tag, field, value) \
+ UNION_MAKE(aidl::android::hardware::audio::effect::Parameter, specific, \
+ UNION_MAKE(aidl::android::hardware::audio::effect::Parameter::Specific, tag, \
+ UNION_MAKE(spec, field, value)))
+
+#define MAKE_SPECIFIC_PARAMETER_ID(spec, tag, field) \
+ UNION_MAKE(aidl::android::hardware::audio::effect::Parameter::Id, tag, \
+ UNION_MAKE(spec::Id, commonTag, field))
+
+ConversionResult<uint32_t> aidl2legacy_Flags_Type_uint32(
+ ::aidl::android::hardware::audio::effect::Flags::Type type);
+ConversionResult<uint32_t> aidl2legacy_Flags_Insert_uint32(
+ ::aidl::android::hardware::audio::effect::Flags::Insert insert);
+ConversionResult<uint32_t> aidl2legacy_Flags_Volume_uint32(
+ ::aidl::android::hardware::audio::effect::Flags::Volume volume);
+ConversionResult<uint32_t> aidl2legacy_Flags_HardwareAccelerator_uint32(
+ ::aidl::android::hardware::audio::effect::Flags::HardwareAccelerator hwAcceleratorMode);
+ConversionResult<uint32_t> aidl2legacy_Flags_uint32(
+ const ::aidl::android::hardware::audio::effect::Flags aidl);
+
+ConversionResult<::aidl::android::hardware::audio::effect::Flags::Type>
+legacy2aidl_uint32_Flags_Type(uint32_t legacy);
+ConversionResult<::aidl::android::hardware::audio::effect::Flags::Insert>
+legacy2aidl_uint32_Flags_Insert(uint32_t legacy);
+ConversionResult<::aidl::android::hardware::audio::effect::Flags::Volume>
+legacy2aidl_uint32_Flags_Volume(uint32_t legacy);
+ConversionResult<::aidl::android::hardware::audio::effect::Flags::HardwareAccelerator>
+legacy2aidl_uint32_Flags_HardwareAccelerator(uint32_t legacy);
+ConversionResult<::aidl::android::hardware::audio::effect::Flags> legacy2aidl_uint32_Flags(
+ uint32_t hal);
+
+ConversionResult<effect_descriptor_t> aidl2legacy_Descriptor_effect_descriptor(
+ const ::aidl::android::hardware::audio::effect::Descriptor& aidl);
+ConversionResult<::aidl::android::hardware::audio::effect::Descriptor>
+legacy2aidl_effect_descriptor_Descriptor(const effect_descriptor_t& hal);
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_aec_uint32_echoDelay(
+ const ::aidl::android::hardware::audio::effect::Parameter& aidl);
+ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
+legacy2aidl_uint32_echoDelay_Parameter_aec(uint32_t legacy);
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_aec_uint32_mobileMode(
+ const ::aidl::android::hardware::audio::effect::Parameter& aidl);
+ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
+legacy2aidl_uint32_mobileMode_Parameter_aec(uint32_t legacy);
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_agc_uint32_fixedDigitalGain(
+ const ::aidl::android::hardware::audio::effect::Parameter& aidl);
+ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
+legacy2aidl_uint32_fixedDigitalGain_Parameter_agc(uint32_t legacy);
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_agc_uint32_levelEstimator(
+ const ::aidl::android::hardware::audio::effect::Parameter& aidl);
+ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
+legacy2aidl_uint32_levelEstimator_Parameter_agc(uint32_t legacy);
+
+ConversionResult<uint32_t> aidl2legacy_Parameter_agc_uint32_saturationMargin(
+ const ::aidl::android::hardware::audio::effect::Parameter& aidl);
+ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
+legacy2aidl_uint32_saturationMargin_Parameter_agc(uint32_t legacy);
+
+ConversionResult<uint16_t> aidl2legacy_Parameter_BassBoost_uint16_strengthPm(
+ const ::aidl::android::hardware::audio::effect::Parameter& aidl);
+ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
+legacy2aidl_uint16_strengthPm_Parameter_BassBoost(uint16_t legacy);
+
+ConversionResult<int16_t> aidl2legacy_Parameter_Downmix_int16_type(
+ const ::aidl::android::hardware::audio::effect::Parameter& aidl);
+ConversionResult<::aidl::android::hardware::audio::effect::Parameter>
+legacy2aidl_int16_type_Parameter_Downmix(int16_t legacy);
+
+ConversionResult<::aidl::android::hardware::audio::effect::DynamicsProcessing::ResolutionPreference>
+legacy2aidl_int32_DynamicsProcessing_ResolutionPreference(int32_t legacy);
+ConversionResult<int32_t> aidl2legacy_DynamicsProcessing_ResolutionPreference_int32(
+ ::aidl::android::hardware::audio::effect::DynamicsProcessing::ResolutionPreference aidl);
+
+} // namespace android
+} // namespace aidl
diff --git a/media/audioaidlconversion/include/media/AidlConversionNdk.h b/media/audioaidlconversion/include/media/AidlConversionNdk.h
index a3176f6..98a7d41 100644
--- a/media/audioaidlconversion/include/media/AidlConversionNdk.h
+++ b/media/audioaidlconversion/include/media/AidlConversionNdk.h
@@ -26,42 +26,14 @@
#include <hardware/audio_effect.h>
#include <media/AidlConversionUtil.h>
#include <system/audio_effect.h>
-
-#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include <aidl/android/media/audio/common/AudioConfig.h>
namespace aidl {
namespace android {
-ConversionResult<uint32_t> aidl2legacy_Flags_Type_uint32(
- ::aidl::android::hardware::audio::effect::Flags::Type type);
-ConversionResult<uint32_t> aidl2legacy_Flags_Insert_uint32(
- ::aidl::android::hardware::audio::effect::Flags::Insert insert);
-ConversionResult<uint32_t> aidl2legacy_Flags_Volume_uint32(
- ::aidl::android::hardware::audio::effect::Flags::Volume volume);
-ConversionResult<uint32_t> aidl2legacy_Flags_HardwareAccelerator_uint32(
- ::aidl::android::hardware::audio::effect::Flags::HardwareAccelerator hwAcceleratorMode);
-ConversionResult<uint32_t> aidl2legacy_Flags_uint32(
- const ::aidl::android::hardware::audio::effect::Flags aidl);
-
-ConversionResult<::aidl::android::hardware::audio::effect::Flags::Type>
-legacy2aidl_uint32_Flags_Type(uint32_t legacy);
-ConversionResult<::aidl::android::hardware::audio::effect::Flags::Insert>
-legacy2aidl_uint32_Flags_Insert(uint32_t legacy);
-ConversionResult<::aidl::android::hardware::audio::effect::Flags::Volume>
-legacy2aidl_uint32_Flags_Volume(uint32_t legacy);
-ConversionResult<::aidl::android::hardware::audio::effect::Flags::HardwareAccelerator>
-legacy2aidl_uint32_Flags_HardwareAccelerator(uint32_t legacy);
-ConversionResult<::aidl::android::hardware::audio::effect::Flags> legacy2aidl_uint32_Flags(
- uint32_t hal);
-
-ConversionResult<effect_descriptor_t> aidl2legacy_Descriptor_effect_descriptor(
- const ::aidl::android::hardware::audio::effect::Descriptor& aidl);
-ConversionResult<::aidl::android::hardware::audio::effect::Descriptor>
-legacy2aidl_effect_descriptor_Descriptor(const effect_descriptor_t& hal);
-
ConversionResult<buffer_config_t> aidl2legacy_AudioConfigBase_buffer_config_t(
const media::audio::common::AudioConfigBase& aidl, bool isInput);
-ConversionResult<media::audio::common::AudioConfigBase> legacy2aidl_AudioConfigBase_buffer_config_t(
+ConversionResult<media::audio::common::AudioConfigBase> legacy2aidl_buffer_config_t_AudioConfigBase(
const buffer_config_t& legacy, bool isInput);
} // namespace android
diff --git a/media/audioaidlconversion/include/media/AidlConversionUtil.h b/media/audioaidlconversion/include/media/AidlConversionUtil.h
index 28c7522..8b2e0de 100644
--- a/media/audioaidlconversion/include/media/AidlConversionUtil.h
+++ b/media/audioaidlconversion/include/media/AidlConversionUtil.h
@@ -25,6 +25,7 @@
#include <error/Result.h>
#if defined(BACKEND_NDK)
+#include <android/binder_auto_utils.h>
#include <android/binder_enums.h>
#include <android/binder_status.h>
@@ -277,6 +278,8 @@
#define UNION_SET(u, field, value) \
(u).set<std::decay_t<decltype(u)>::Tag::field>(value)
+#define UNION_MAKE(u, field, value) u::make<u::Tag::field>(value)
+
namespace aidl_utils {
/**
@@ -362,6 +365,20 @@
// standard Java exception (fromExceptionCode)
}
+#if defined(BACKEND_NDK)
+static inline ::android::status_t statusTFromBinderStatus(const ::ndk::ScopedAStatus &status) {
+ // What we want to do is to 'return statusTFromBinderStatus(status.get()->get())'
+ // However, since the definition of AStatus is not exposed, we have to do the same
+ // via methods of ScopedAStatus:
+ return status.isOk() ? ::android::OK // check ::android::OK,
+ : status.getServiceSpecificError() // service-side error, not standard Java exception
+ // (fromServiceSpecificError)
+ ?: status.getStatus() // a native binder transaction error (fromStatusT)
+ ?: statusTFromExceptionCode(status.getExceptionCode()); // a service-side error with a
+ // standard Java exception (fromExceptionCode)
+}
+#endif
+
/**
* Return a binder::Status from native service status.
*
@@ -396,11 +413,10 @@
return Status::fromServiceSpecificError(status, emptyIfNull);
}
-
} // namespace aidl_utils
} // namespace android
#if defined(BACKEND_NDK)
} // namespace aidl
-#endif
\ No newline at end of file
+#endif
diff --git a/media/audioserver/main_audioserver.cpp b/media/audioserver/main_audioserver.cpp
index e3db5b4..1e3bfe0 100644
--- a/media/audioserver/main_audioserver.cpp
+++ b/media/audioserver/main_audioserver.cpp
@@ -50,6 +50,8 @@
int main(int argc __unused, char **argv)
{
+ ALOGD("%s: starting", __func__);
+ const auto startTime = std::chrono::steady_clock::now();
// TODO: update with refined parameters
limitProcessMemory(
"audio.maxmem", /* "ro.audio.maxmem", property that defines limit */
@@ -144,11 +146,36 @@
setpgid(0, 0); // but if I die first, don't kill my parent
}
android::hardware::configureRpcThreadpool(4, false /*callerWillJoin*/);
- sp<ProcessState> proc(ProcessState::self());
+
+ // Ensure threads for possible callbacks. Note that get_audio_flinger() does
+ // this automatically when called from AudioPolicy, but we do this anyways here.
+ ProcessState::self()->startThreadPool();
+
+ // Instantiating AudioFlinger (making it public, e.g. through ::initialize())
+ // and then instantiating AudioPolicy (and making it public)
+ // leads to situations where AudioFlinger is accessed remotely before
+ // AudioPolicy is initialized. Not only might this
+ // cause inaccurate results, but if AudioPolicy has slow audio HAL
+ // initialization, it can cause a TimeCheck abort to occur on an AudioFlinger
+ // call which tries to access AudioPolicy.
+ //
+ // We create AudioFlinger and AudioPolicy locally then make it public to ServiceManager.
+ // This requires both AudioFlinger and AudioPolicy to be in-proc.
+ //
+ const auto af = sp<AudioFlinger>::make();
+ const auto afAdapter = sp<AudioFlingerServerAdapter>::make(af);
+ ALOGD("%s: AudioFlinger created", __func__);
+ ALOGW_IF(AudioSystem::setLocalAudioFlinger(af) != OK,
+ "%s: AudioSystem already has an AudioFlinger instance!", __func__);
+ const auto aps = sp<AudioPolicyService>::make();
+ ALOGD("%s: AudioPolicy created", __func__);
+
+ // Add AudioFlinger and AudioPolicy to ServiceManager.
sp<IServiceManager> sm = defaultServiceManager();
- ALOGI("ServiceManager: %p", sm.get());
- AudioFlinger::instantiate();
- AudioPolicyService::instantiate();
+ sm->addService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME), afAdapter,
+ false /* allowIsolated */, IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT);
+ sm->addService(String16(AudioPolicyService::getServiceName()), aps,
+ false /* allowIsolated */, IServiceManager::DUMP_FLAG_PRIORITY_DEFAULT);
// AAudioService should only be used in OC-MR1 and later.
// And only enable the AAudioService if the system MMAP policy explicitly allows it.
@@ -156,7 +183,6 @@
// If we cannot get audio flinger here, there must be some serious problems. In that case,
// attempting to call audio flinger on a null pointer could make the process crash
// and attract attentions.
- sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
std::vector<AudioMMapPolicyInfo> policyInfos;
status_t status = af->getMmapPolicyInfos(
AudioMMapPolicyType::DEFAULT, &policyInfos);
@@ -169,11 +195,14 @@
})) {
AAudioService::instantiate();
} else {
- ALOGD("Do not init aaudio service, status %d, policy info size %zu",
- status, policyInfos.size());
+ ALOGD("%s: Do not init aaudio service, status %d, policy info size %zu",
+ __func__, status, policyInfos.size());
}
-
- ProcessState::self()->startThreadPool();
+ const auto endTime = std::chrono::steady_clock::now();
+ using FloatMillis = std::chrono::duration<float, std::milli>;
+ const float timeTaken = std::chrono::duration_cast<FloatMillis>(
+ endTime - startTime).count();
+ ALOGI("%s: initialization done in %.3f ms, joining thread pool", __func__, timeTaken);
IPCThreadState::self()->joinThreadPool();
}
}
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 96b81d7..0eb47f4 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -578,7 +578,8 @@
size_t srcVStride = img->stride[AOM_PLANE_V];
C2PlanarLayout layout = wView.layout();
size_t dstYStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
- size_t dstUVStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+ size_t dstUStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+ size_t dstVStride = layout.planes[C2PlanarLayout::PLANE_V].rowInc;
if (img->fmt == AOM_IMG_FMT_I42016) {
const uint16_t *srcY = (const uint16_t *)img->planes[AOM_PLANE_Y];
@@ -592,7 +593,7 @@
std::static_pointer_cast<const C2ColorAspectsStruct>(defaultColorAspects));
} else {
convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
- srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
+ srcUStride / 2, srcVStride / 2, dstYStride, dstUStride,
mWidth, mHeight);
}
} else {
@@ -600,7 +601,7 @@
const uint8_t *srcU = (const uint8_t *)img->planes[AOM_PLANE_U];
const uint8_t *srcV = (const uint8_t *)img->planes[AOM_PLANE_V];
convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
- srcVStride, dstYStride, dstUVStride, mWidth, mHeight);
+ srcVStride, dstYStride, dstUStride, dstVStride, mWidth, mHeight);
}
finishWork(*(int64_t*)img->user_priv, work, std::move(block));
block = nullptr;
diff --git a/media/codec2/components/aom/C2SoftAomEnc.cpp b/media/codec2/components/aom/C2SoftAomEnc.cpp
index f5620a4..9102a97 100644
--- a/media/codec2/components/aom/C2SoftAomEnc.cpp
+++ b/media/codec2/components/aom/C2SoftAomEnc.cpp
@@ -22,6 +22,8 @@
#include <media/stagefright/foundation/MediaDefs.h>
#include <C2Debug.h>
+#include <Codec2CommonUtils.h>
+#include <Codec2Mapper.h>
#include <C2PlatformSupport.h>
#include <SimpleC2Interface.h>
@@ -62,7 +64,8 @@
0u, C2Config::BITRATE_VARIABLE))
.withFields({C2F(mBitrateMode, value)
.oneOf({C2Config::BITRATE_CONST,
- C2Config::BITRATE_VARIABLE})})
+ C2Config::BITRATE_VARIABLE,
+ C2Config::BITRATE_IGNORE})})
.withSetter(Setter<decltype(*mBitrateMode)>::StrictValueWithNoDeps)
.build());
@@ -85,6 +88,12 @@
.withSetter(BitrateSetter)
.build());
+ addParameter(DefineParam(mQuality, C2_PARAMKEY_QUALITY)
+ .withDefault(new C2StreamQualityTuning::output(0u, 80))
+ .withFields({C2F(mQuality, value).inRange(0, 100)})
+ .withSetter(Setter<decltype(*mQuality)>::NonStrictValueWithNoDeps)
+ .build());
+
addParameter(DefineParam(mIntraRefresh, C2_PARAMKEY_INTRA_REFRESH)
.withConstValue(new C2StreamIntraRefreshTuning::output(
0u, C2Config::INTRA_REFRESH_DISABLED, 0.))
@@ -104,19 +113,18 @@
.withSetter(ProfileLevelSetter)
.build());
+ std::vector<uint32_t> pixelFormats = {HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
+ HAL_PIXEL_FORMAT_YCBCR_420_888};
+ if (isHalPixelFormatSupported((AHardwareBuffer_Format)HAL_PIXEL_FORMAT_YCBCR_P010)) {
+ pixelFormats.push_back(HAL_PIXEL_FORMAT_YCBCR_P010);
+ }
addParameter(DefineParam(mPixelFormat, C2_PARAMKEY_PIXEL_FORMAT)
- .withDefault(new C2StreamPixelFormatInfo::output(
+ .withDefault(new C2StreamPixelFormatInfo::input(
0u, HAL_PIXEL_FORMAT_YCBCR_420_888))
- .withFields({C2F(mPixelFormat, value).oneOf({
- HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED,
- HAL_PIXEL_FORMAT_YCBCR_420_888,
- HAL_PIXEL_FORMAT_YCBCR_P010
- })
- })
+ .withFields({C2F(mPixelFormat, value).oneOf({pixelFormats})})
.withSetter((Setter<decltype(*mPixelFormat)>::StrictValueWithNoDeps))
.build());
-
addParameter(DefineParam(mRequestSync, C2_PARAMKEY_REQUEST_SYNC_FRAME)
.withDefault(new C2StreamRequestSyncFrameTuning::output(0u, C2_FALSE))
.withFields({C2F(mRequestSync, value).oneOf({C2_FALSE, C2_TRUE})})
@@ -292,6 +300,12 @@
return onStop();
}
+// c2Quality is in range of 0-100 (the more - the better),
+// for AOM quality we are using a range of 15-50 (the less - the better)
+static int MapC2QualityToAOMQuality (int c2Quality) {
+ return 15 + 35 * (100 - c2Quality) / 100;
+}
+
aom_codec_err_t C2SoftAomEnc::setupCodecParameters() {
aom_codec_err_t codec_return = AOM_CODEC_OK;
@@ -390,6 +404,45 @@
codec_return = aom_codec_control(mCodecContext, AV1E_SET_MAX_REFERENCE_FRAMES, 3);
if (codec_return != AOM_CODEC_OK) goto BailOut;
+ if (mBitrateControlMode == AOM_Q) {
+ const int aomCQLevel = MapC2QualityToAOMQuality(mQuality->value);
+ ALOGV("Set Q from %d to CQL %d",
+ mQuality->value, aomCQLevel);
+
+ codec_return = aom_codec_control(mCodecContext, AOME_SET_CQ_LEVEL, aomCQLevel);
+ if (codec_return != AOM_CODEC_OK) goto BailOut;
+ }
+
+ ColorAspects sfAspects;
+ if (!C2Mapper::map(mColorAspects->primaries, &sfAspects.mPrimaries)) {
+ sfAspects.mPrimaries = android::ColorAspects::PrimariesUnspecified;
+ }
+ if (!C2Mapper::map(mColorAspects->range, &sfAspects.mRange)) {
+ sfAspects.mRange = android::ColorAspects::RangeUnspecified;
+ }
+ if (!C2Mapper::map(mColorAspects->matrix, &sfAspects.mMatrixCoeffs)) {
+ sfAspects.mMatrixCoeffs = android::ColorAspects::MatrixUnspecified;
+ }
+ if (!C2Mapper::map(mColorAspects->transfer, &sfAspects.mTransfer)) {
+ sfAspects.mTransfer = android::ColorAspects::TransferUnspecified;
+ }
+ int32_t primaries, transfer, matrixCoeffs;
+ bool range;
+ ColorUtils::convertCodecColorAspectsToIsoAspects(sfAspects,
+ &primaries,
+ &transfer,
+ &matrixCoeffs,
+ &range);
+
+ codec_return = aom_codec_control(mCodecContext, AV1E_SET_COLOR_RANGE, range);
+ if (codec_return != AOM_CODEC_OK) goto BailOut;
+ codec_return = aom_codec_control(mCodecContext, AV1E_SET_COLOR_PRIMARIES, primaries);
+ if (codec_return != AOM_CODEC_OK) goto BailOut;
+ codec_return = aom_codec_control(mCodecContext, AV1E_SET_TRANSFER_CHARACTERISTICS, transfer);
+ if (codec_return != AOM_CODEC_OK) goto BailOut;
+ codec_return = aom_codec_control(mCodecContext, AV1E_SET_MATRIX_COEFFICIENTS, matrixCoeffs);
+ if (codec_return != AOM_CODEC_OK) goto BailOut;
+
BailOut:
return codec_return;
}
@@ -406,6 +459,8 @@
mFrameRate = mIntf->getFrameRate_l();
mIntraRefresh = mIntf->getIntraRefresh_l();
mRequestSync = mIntf->getRequestSync_l();
+ mColorAspects = mIntf->getCodedColorAspects_l();
+ mQuality = mIntf->getQuality_l();
}
@@ -413,6 +468,9 @@
case C2Config::BITRATE_CONST:
mBitrateControlMode = AOM_CBR;
break;
+ case C2Config::BITRATE_IGNORE:
+ mBitrateControlMode = AOM_Q;
+ break;
case C2Config::BITRATE_VARIABLE:
[[fallthrough]];
default:
@@ -452,7 +510,7 @@
mCodecConfiguration->g_timebase.den = 1000000;
// rc_target_bitrate is in kbps, mBitrate in bps
mCodecConfiguration->rc_target_bitrate = (mBitrate->value + 500) / 1000;
- mCodecConfiguration->rc_end_usage = AOM_CBR;
+ mCodecConfiguration->rc_end_usage = mBitrateControlMode == AOM_Q ? AOM_Q : AOM_CBR;
// Disable frame drop - not allowed in MediaCodec now.
mCodecConfiguration->rc_dropframe_thresh = 0;
// Disable lagged encoding.
diff --git a/media/codec2/components/aom/C2SoftAomEnc.h b/media/codec2/components/aom/C2SoftAomEnc.h
index 2d1bb07..d7832dd 100644
--- a/media/codec2/components/aom/C2SoftAomEnc.h
+++ b/media/codec2/components/aom/C2SoftAomEnc.h
@@ -102,8 +102,10 @@
std::shared_ptr<C2StreamIntraRefreshTuning::output> mIntraRefresh;
std::shared_ptr<C2StreamFrameRateInfo::output> mFrameRate;
std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
+ std::shared_ptr<C2StreamQualityTuning::output> mQuality;
std::shared_ptr<C2StreamBitrateModeTuning::output> mBitrateMode;
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
+ std::shared_ptr<C2StreamColorAspectsInfo::output> mColorAspects;
aom_codec_err_t setupCodecParameters();
};
@@ -126,6 +128,7 @@
}
std::shared_ptr<C2StreamFrameRateInfo::output> getFrameRate_l() const { return mFrameRate; }
std::shared_ptr<C2StreamBitrateInfo::output> getBitrate_l() const { return mBitrate; }
+ std::shared_ptr<C2StreamQualityTuning::output> getQuality_l() const { return mQuality; }
std::shared_ptr<C2StreamBitrateModeTuning::output> getBitrateMode_l() const {
return mBitrateMode;
}
@@ -135,7 +138,7 @@
std::shared_ptr<C2StreamColorAspectsInfo::output> getCodedColorAspects_l() const {
return mCodedColorAspects;
}
- std::shared_ptr<C2StreamPixelFormatInfo::output> getPixelFormat_l() const {
+ std::shared_ptr<C2StreamPixelFormatInfo::input> getPixelFormat_l() const {
return mPixelFormat;
}
uint32_t getSyncFramePeriod() const;
@@ -151,11 +154,12 @@
std::shared_ptr<C2StreamRequestSyncFrameTuning::output> mRequestSync;
std::shared_ptr<C2StreamSyncFrameIntervalTuning::output> mSyncFramePeriod;
std::shared_ptr<C2StreamBitrateInfo::output> mBitrate;
+ std::shared_ptr<C2StreamQualityTuning::output> mQuality;
std::shared_ptr<C2StreamBitrateModeTuning::output> mBitrateMode;
std::shared_ptr<C2StreamProfileLevelInfo::output> mProfileLevel;
std::shared_ptr<C2StreamColorAspectsInfo::input> mColorAspects;
std::shared_ptr<C2StreamColorAspectsInfo::output> mCodedColorAspects;
- std::shared_ptr<C2StreamPixelFormatInfo::output> mPixelFormat;
+ std::shared_ptr<C2StreamPixelFormatInfo::input> mPixelFormat;
};
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index d549c3b..32f8fa8 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -38,8 +38,8 @@
void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
- size_t dstUVStride, uint32_t width, uint32_t height,
- bool isMonochrome) {
+ size_t dstUStride, size_t dstVStride, uint32_t width,
+ uint32_t height, bool isMonochrome) {
for (size_t i = 0; i < height; ++i) {
memcpy(dstY, srcY, width);
srcY += srcYStride;
@@ -51,8 +51,8 @@
for (size_t i = 0; i < (height + 1) / 2; ++i) {
memset(dstV, kNeutralUVBitDepth8, (width + 1) / 2);
memset(dstU, kNeutralUVBitDepth8, (width + 1) / 2);
- dstV += dstUVStride;
- dstU += dstUVStride;
+ dstV += dstVStride;
+ dstU += dstUStride;
}
return;
}
@@ -60,13 +60,13 @@
for (size_t i = 0; i < (height + 1) / 2; ++i) {
memcpy(dstV, srcV, (width + 1) / 2);
srcV += srcVStride;
- dstV += dstUVStride;
+ dstV += dstVStride;
}
for (size_t i = 0; i < (height + 1) / 2; ++i) {
memcpy(dstU, srcU, (width + 1) / 2);
srcU += srcUStride;
- dstU += dstUVStride;
+ dstU += dstUStride;
}
}
diff --git a/media/codec2/components/base/include/SimpleC2Component.h b/media/codec2/components/base/include/SimpleC2Component.h
index 38b7825..051f798 100644
--- a/media/codec2/components/base/include/SimpleC2Component.h
+++ b/media/codec2/components/base/include/SimpleC2Component.h
@@ -33,8 +33,8 @@
void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
- size_t dstUVStride, uint32_t width, uint32_t height,
- bool isMonochrome = false);
+ size_t dstUStride, size_t dstVStride, uint32_t width,
+ uint32_t height, bool isMonochrome = false);
void convertYUV420Planar16ToY410OrRGBA1010102(
uint32_t *dst, const uint16_t *srcY,
diff --git a/media/codec2/components/gav1/Android.bp b/media/codec2/components/gav1/Android.bp
index 162339f..9781b6d 100644
--- a/media/codec2/components/gav1/Android.bp
+++ b/media/codec2/components/gav1/Android.bp
@@ -21,7 +21,10 @@
],
srcs: ["C2SoftGav1Dec.cpp"],
- static_libs: ["libgav1"],
+ static_libs: [
+ "libgav1",
+ "libyuv_static",
+ ],
apex_available: [
"//apex_available:platform",
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index d234f21..8aed623 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -24,6 +24,7 @@
#include <Codec2CommonUtils.h>
#include <Codec2Mapper.h>
#include <SimpleC2Interface.h>
+#include <libyuv.h>
#include <log/log.h>
#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/foundation/MediaDefs.h>
@@ -725,6 +726,24 @@
}
}
+void C2SoftGav1Dec::setError(const std::unique_ptr<C2Work> &work, c2_status_t error) {
+ mSignalledError = true;
+ work->result = error;
+ work->workletsProcessed = 1u;
+}
+
+bool C2SoftGav1Dec::allocTmpFrameBuffer(size_t size) {
+ if (size > mTmpFrameBufferSize) {
+ mTmpFrameBuffer = std::make_unique<uint16_t[]>(size);
+ if (mTmpFrameBuffer == nullptr) {
+ mTmpFrameBufferSize = 0;
+ return false;
+ }
+ mTmpFrameBufferSize = size;
+ }
+ return true;
+}
+
bool C2SoftGav1Dec::outputBuffer(const std::shared_ptr<C2BlockPool> &pool,
const std::unique_ptr<C2Work> &work) {
if (!(work && pool)) return false;
@@ -771,14 +790,6 @@
getHDRStaticParams(buffer, work);
getHDR10PlusInfoData(buffer, work);
- if (!(buffer->image_format == libgav1::kImageFormatYuv420 ||
- buffer->image_format == libgav1::kImageFormatMonochrome400)) {
- ALOGE("image_format %d not supported", buffer->image_format);
- mSignalledError = true;
- work->workletsProcessed = 1u;
- work->result = C2_CORRUPTED;
- return false;
- }
const bool isMonochrome =
buffer->image_format == libgav1::kImageFormatMonochrome400;
@@ -851,40 +862,112 @@
uint8_t *dstY = const_cast<uint8_t *>(wView.data()[C2PlanarLayout::PLANE_Y]);
uint8_t *dstU = const_cast<uint8_t *>(wView.data()[C2PlanarLayout::PLANE_U]);
uint8_t *dstV = const_cast<uint8_t *>(wView.data()[C2PlanarLayout::PLANE_V]);
- size_t srcYStride = buffer->stride[0];
- size_t srcUStride = buffer->stride[1];
- size_t srcVStride = buffer->stride[2];
C2PlanarLayout layout = wView.layout();
size_t dstYStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
- size_t dstUVStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+ size_t dstUStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+ size_t dstVStride = layout.planes[C2PlanarLayout::PLANE_V].rowInc;
if (buffer->bitdepth == 10) {
const uint16_t *srcY = (const uint16_t *)buffer->plane[0];
const uint16_t *srcU = (const uint16_t *)buffer->plane[1];
const uint16_t *srcV = (const uint16_t *)buffer->plane[2];
+ size_t srcYStride = buffer->stride[0] / 2;
+ size_t srcUStride = buffer->stride[1] / 2;
+ size_t srcVStride = buffer->stride[2] / 2;
if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
convertYUV420Planar16ToY410OrRGBA1010102(
- (uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
- srcUStride / 2, srcVStride / 2,
+ (uint32_t *)dstY, srcY, srcU, srcV, srcYStride,
+ srcUStride, srcVStride,
dstYStride / sizeof(uint32_t), mWidth, mHeight,
std::static_pointer_cast<const C2ColorAspectsStruct>(codedColorAspects));
} else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
- convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
- srcYStride / 2, srcUStride / 2, srcVStride / 2, dstYStride / 2,
- dstUVStride / 2, mWidth, mHeight, isMonochrome);
+ dstYStride /= 2;
+ dstUStride /= 2;
+ dstVStride /= 2;
+ if (buffer->image_format == libgav1::kImageFormatYuv444 ||
+ buffer->image_format == libgav1::kImageFormatYuv422) {
+ // TODO(https://crbug.com/libyuv/952): replace this block with libyuv::I410ToP010 and
+ // libyuv::I210ToP010 when they are available.
+ // Note it may be safe to alias dstY in I010ToP010, but the libyuv API doesn't make any
+ // guarantees.
+ const size_t tmpSize = dstYStride * mHeight + dstUStride * align(mHeight, 2);
+ if (!allocTmpFrameBuffer(tmpSize)) {
+ ALOGE("Error allocating temp conversion buffer (%zu bytes)", tmpSize);
+ setError(work, C2_NO_MEMORY);
+ return false;
+ }
+ uint16_t *const tmpY = mTmpFrameBuffer.get();
+ uint16_t *const tmpU = tmpY + dstYStride * mHeight;
+ uint16_t *const tmpV = tmpU + dstUStride * align(mHeight, 2) / 2;
+ if (buffer->image_format == libgav1::kImageFormatYuv444) {
+ libyuv::I410ToI010(srcY, srcYStride, srcU, srcUStride, srcV, srcVStride,
+ tmpY, dstYStride, tmpU, dstUStride, tmpV, dstUStride,
+ mWidth, mHeight);
+ } else {
+ libyuv::I210ToI010(srcY, srcYStride, srcU, srcUStride, srcV, srcVStride,
+ tmpY, dstYStride, tmpU, dstUStride, tmpV, dstUStride,
+ mWidth, mHeight);
+ }
+ libyuv::I010ToP010(tmpY, dstYStride, tmpU, dstUStride, tmpV, dstVStride,
+ (uint16_t*)dstY, dstYStride, (uint16_t*)dstU, dstUStride,
+ mWidth, mHeight);
+ } else {
+ convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
+ srcYStride, srcUStride, srcVStride, dstYStride,
+ dstUStride, mWidth, mHeight, isMonochrome);
+ }
} else {
- convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
- srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride, mWidth,
- mHeight, isMonochrome);
+ if (buffer->image_format == libgav1::kImageFormatYuv444) {
+ // TODO(https://crbug.com/libyuv/950): replace this block with libyuv::I410ToI420 when
+ // it's available.
+ const size_t tmpSize = dstYStride * mHeight + dstUStride * align(mHeight, 2);
+ if (!allocTmpFrameBuffer(tmpSize)) {
+ ALOGE("Error allocating temp conversion buffer (%zu bytes)", tmpSize);
+ setError(work, C2_NO_MEMORY);
+ return false;
+ }
+ uint16_t *const tmpY = mTmpFrameBuffer.get();
+ uint16_t *const tmpU = tmpY + dstYStride * mHeight;
+ uint16_t *const tmpV = tmpU + dstUStride * align(mHeight, 2) / 2;
+ libyuv::I410ToI010(srcY, srcYStride, srcU, srcUStride, srcV, srcVStride,
+ tmpY, dstYStride, tmpU, dstUStride, tmpV, dstVStride,
+ mWidth, mHeight);
+ libyuv::I010ToI420(tmpY, dstYStride, tmpU, dstUStride, tmpV, dstUStride,
+ dstY, dstYStride, dstU, dstUStride, dstV, dstVStride,
+ mWidth, mHeight);
+ } else if (buffer->image_format == libgav1::kImageFormatYuv422) {
+ libyuv::I210ToI420(srcY, srcYStride, srcU, srcUStride, srcV, srcVStride,
+ dstY, dstYStride, dstU, dstUStride, dstV, dstVStride,
+ mWidth, mHeight);
+ } else {
+ convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2, dstYStride, dstUStride,
+ mWidth, mHeight, isMonochrome);
+ }
}
} else {
const uint8_t *srcY = (const uint8_t *)buffer->plane[0];
const uint8_t *srcU = (const uint8_t *)buffer->plane[1];
const uint8_t *srcV = (const uint8_t *)buffer->plane[2];
- convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
- srcVStride, dstYStride, dstUVStride, mWidth, mHeight, isMonochrome);
+ size_t srcYStride = buffer->stride[0];
+ size_t srcUStride = buffer->stride[1];
+ size_t srcVStride = buffer->stride[2];
+
+ if (buffer->image_format == libgav1::kImageFormatYuv444) {
+ libyuv::I444ToI420(srcY, srcYStride, srcU, srcUStride, srcV, srcVStride,
+ dstY, dstYStride, dstU, dstUStride, dstV, dstVStride,
+ mWidth, mHeight);
+ } else if (buffer->image_format == libgav1::kImageFormatYuv422) {
+ libyuv::I422ToI420(srcY, srcYStride, srcU, srcUStride, srcV, srcVStride,
+ dstY, dstYStride, dstU, dstUStride, dstV, dstVStride,
+ mWidth, mHeight);
+ } else {
+ convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
+ srcVStride, dstYStride, dstUStride, dstVStride, mWidth, mHeight,
+ isMonochrome);
+ }
}
finishWork(buffer->user_private_data, work, std::move(block));
block = nullptr;
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.h b/media/codec2/components/gav1/C2SoftGav1Dec.h
index f0e14d7..c3b27ea 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.h
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.h
@@ -19,6 +19,8 @@
#include <inttypes.h>
+#include <memory>
+
#include <media/stagefright/foundation/ColorUtils.h>
#include <SimpleC2Component.h>
@@ -60,6 +62,9 @@
uint32_t mHeight;
bool mSignalledOutputEos;
bool mSignalledError;
+ // Used during 10-bit I444/I422 to 10-bit P010 & 8-bit I420 conversions.
+ std::unique_ptr<uint16_t[]> mTmpFrameBuffer;
+ size_t mTmpFrameBufferSize = 0;
C2StreamHdrStaticMetadataInfo::output mHdrStaticMetadataInfo;
std::unique_ptr<C2StreamHdr10PlusInfo::output> mHdr10PlusInfo = nullptr;
@@ -97,6 +102,9 @@
void destroyDecoder();
void finishWork(uint64_t index, const std::unique_ptr<C2Work>& work,
const std::shared_ptr<C2GraphicBlock>& block);
+ // Sets |work->result| and mSignalledError. Returns false.
+ void setError(const std::unique_ptr<C2Work> &work, c2_status_t error);
+ bool allocTmpFrameBuffer(size_t size);
bool outputBuffer(const std::shared_ptr<C2BlockPool>& pool,
const std::unique_ptr<C2Work>& work);
c2_status_t drainInternal(uint32_t drainMode,
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 3bf9c48..2137964 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -603,7 +603,8 @@
C2PlanarLayout layout = wView.layout();
size_t dstYStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
- size_t dstUVStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+ size_t dstUStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+ size_t dstVStride = layout.planes[C2PlanarLayout::PLANE_V].rowInc;
size_t srcYStride = align(mWidth, 16);
size_t srcUStride = srcYStride / 2;
size_t srcVStride = srcYStride / 2;
@@ -613,8 +614,8 @@
const uint8_t *srcV = (const uint8_t *)srcY + vStride * srcYStride * 5 / 4;
convertYUV420Planar8ToYV12(outputBufferY, outputBufferU, outputBufferV, srcY, srcU, srcV,
- srcYStride, srcUStride, srcVStride, dstYStride, dstUVStride,
- mWidth, mHeight);
+ srcYStride, srcUStride, srcVStride, dstYStride, dstUStride,
+ dstVStride, mWidth, mHeight);
inPos += inSize - (size_t)tmpInSize;
finishWork(workIndex, work);
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 18cd1bf..dab7b89 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -766,7 +766,8 @@
size_t srcVStride = img->stride[VPX_PLANE_V];
C2PlanarLayout layout = wView.layout();
size_t dstYStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
- size_t dstUVStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+ size_t dstUStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
+ size_t dstVStride = layout.planes[C2PlanarLayout::PLANE_V].rowInc;
if (img->fmt == VPX_IMG_FMT_I42016) {
const uint16_t *srcY = (const uint16_t *)img->planes[VPX_PLANE_Y];
@@ -804,10 +805,10 @@
} else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
srcYStride / 2, srcUStride / 2, srcVStride / 2,
- dstYStride / 2, dstUVStride / 2, mWidth, mHeight);
+ dstYStride / 2, dstUStride / 2, mWidth, mHeight);
} else {
convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
- srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
+ srcUStride / 2, srcVStride / 2, dstYStride, dstUStride,
mWidth, mHeight);
}
} else {
@@ -816,7 +817,7 @@
const uint8_t *srcV = (const uint8_t *)img->planes[VPX_PLANE_V];
convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
- srcVStride, dstYStride, dstUVStride, mWidth, mHeight);
+ srcVStride, dstYStride, dstVStride, dstVStride, mWidth, mHeight);
}
finishWork(((c2_cntr64_t *)img->user_priv)->peekull(), work, std::move(block));
return OK;
diff --git a/media/codec2/hal/client/client.cpp b/media/codec2/hal/client/client.cpp
index 09452c4..f5128ca 100644
--- a/media/codec2/hal/client/client.cpp
+++ b/media/codec2/hal/client/client.cpp
@@ -1722,6 +1722,8 @@
static_cast<uint64_t>(blockPoolId),
bqId == 0 ? nullHgbp : igbp);
+ mOutputBufferQueue->expireOldWaiters();
+
if (!transStatus.isOk()) {
LOG(ERROR) << "setOutputSurface -- transaction failed.";
return C2_TRANSACTION_FAILED;
@@ -1763,6 +1765,7 @@
<< status << ".";
}
}
+ mOutputBufferQueue->expireOldWaiters();
}
c2_status_t Codec2Client::Component::connectToInputSurface(
diff --git a/media/codec2/hal/client/include/codec2/hidl/output.h b/media/codec2/hal/client/include/codec2/hidl/output.h
index a13edf3..c208df0 100644
--- a/media/codec2/hal/client/include/codec2/hidl/output.h
+++ b/media/codec2/hal/client/include/codec2/hidl/output.h
@@ -50,6 +50,10 @@
int maxDequeueBufferCount,
std::shared_ptr<V1_2::SurfaceSyncObj> *syncObj);
+ // If there are waiters to allocate from the old surface, wake up and expire
+ // them.
+ void expireOldWaiters();
+
// Stop using the current output surface. Pending buffer opeations will not
// perform anymore.
void stop();
@@ -86,6 +90,8 @@
std::weak_ptr<_C2BlockPoolData> mPoolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
std::shared_ptr<C2SurfaceSyncMemory> mSyncMem;
bool mStopped;
+ std::mutex mOldMutex;
+ std::shared_ptr<C2SurfaceSyncMemory> mOldMem;
bool registerBuffer(const C2ConstGraphicBlock& block);
};
diff --git a/media/codec2/hal/client/output.cpp b/media/codec2/hal/client/output.cpp
index f789030..6aaf9ab 100644
--- a/media/codec2/hal/client/output.cpp
+++ b/media/codec2/hal/client/output.cpp
@@ -217,6 +217,7 @@
sp<GraphicBuffer> buffers[BufferQueueDefs::NUM_BUFFER_SLOTS];
std::weak_ptr<_C2BlockPoolData>
poolDatas[BufferQueueDefs::NUM_BUFFER_SLOTS];
+ std::shared_ptr<C2SurfaceSyncMemory> oldMem;
{
std::scoped_lock<std::mutex> l(mMutex);
bool stopped = mStopped;
@@ -238,7 +239,7 @@
}
return false;
}
- std::shared_ptr<C2SurfaceSyncMemory> oldMem = mSyncMem;
+ oldMem = mSyncMem;
C2SyncVariables *oldSync = mSyncMem ? mSyncMem->mem() : nullptr;
if (oldSync) {
oldSync->lock();
@@ -314,11 +315,26 @@
newSync->unlock();
}
}
+ {
+ std::scoped_lock<std::mutex> l(mOldMutex);
+ mOldMem = oldMem;
+ }
ALOGD("remote graphic buffer migration %zu/%zu",
success, tryNum);
return true;
}
+void OutputBufferQueue::expireOldWaiters() {
+ std::scoped_lock<std::mutex> l(mOldMutex);
+ if (mOldMem) {
+ C2SyncVariables *oldSync = mOldMem->mem();
+ if (oldSync) {
+ oldSync->notifyAll();
+ }
+ mOldMem.reset();
+ }
+}
+
void OutputBufferQueue::stop() {
std::scoped_lock<std::mutex> l(mMutex);
mStopped = true;
diff --git a/media/codec2/hal/plugin/samples/Android.bp b/media/codec2/hal/plugin/samples/Android.bp
index 32b760d..e0f8280 100644
--- a/media/codec2/hal/plugin/samples/Android.bp
+++ b/media/codec2/hal/plugin/samples/Android.bp
@@ -28,6 +28,7 @@
"libGLESv1_CM",
"libGLESv2",
"libGLESv3",
+ "libvulkan",
"libbase",
"libcodec2",
"libcutils",
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index a008dc2..f258bff 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -1868,18 +1868,14 @@
}
state->set(STOPPING);
}
- {
- Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
- const std::unique_ptr<Config> &config = *configLocked;
- if (config->mPushBlankBuffersOnStop) {
- mChannel->pushBlankBufferToOutputSurface();
- }
- }
mChannel->reset();
- (new AMessage(kWhatStop, this))->post();
+ bool pushBlankBuffer = mConfig.lock().get()->mPushBlankBuffersOnStop;
+ sp<AMessage> stopMessage(new AMessage(kWhatStop, this));
+ stopMessage->setInt32("pushBlankBuffer", pushBlankBuffer);
+ stopMessage->post();
}
-void CCodec::stop() {
+void CCodec::stop(bool pushBlankBuffer) {
std::shared_ptr<Codec2Client::Component> comp;
{
Mutexed<State>::Locked state(mState);
@@ -1898,7 +1894,7 @@
comp = state->comp;
}
status_t err = comp->stop();
- mChannel->stopUseOutputSurface();
+ mChannel->stopUseOutputSurface(pushBlankBuffer);
if (err != C2_OK) {
// TODO: convert err into status_t
mCallback->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
@@ -1963,21 +1959,16 @@
config->mInputSurfaceDataspace = HAL_DATASPACE_UNKNOWN;
}
}
- {
- Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
- const std::unique_ptr<Config> &config = *configLocked;
- if (config->mPushBlankBuffersOnStop) {
- mChannel->pushBlankBufferToOutputSurface();
- }
- }
mChannel->reset();
+ bool pushBlankBuffer = mConfig.lock().get()->mPushBlankBuffersOnStop;
// thiz holds strong ref to this while the thread is running.
sp<CCodec> thiz(this);
- std::thread([thiz, sendCallback] { thiz->release(sendCallback); }).detach();
+ std::thread([thiz, sendCallback, pushBlankBuffer]
+ { thiz->release(sendCallback, pushBlankBuffer); }).detach();
}
-void CCodec::release(bool sendCallback) {
+void CCodec::release(bool sendCallback, bool pushBlankBuffer) {
std::shared_ptr<Codec2Client::Component> comp;
{
Mutexed<State>::Locked state(mState);
@@ -1992,7 +1983,7 @@
comp = state->comp;
}
comp->release();
- mChannel->stopUseOutputSurface();
+ mChannel->stopUseOutputSurface(pushBlankBuffer);
{
Mutexed<State>::Locked state(mState);
@@ -2006,6 +1997,7 @@
}
status_t CCodec::setSurface(const sp<Surface> &surface) {
+ bool pushBlankBuffer = false;
{
Mutexed<std::unique_ptr<Config>>::Locked configLocked(mConfig);
const std::unique_ptr<Config> &config = *configLocked;
@@ -2031,8 +2023,9 @@
return err;
}
}
+ pushBlankBuffer = config->mPushBlankBuffersOnStop;
}
- return mChannel->setSurface(surface);
+ return mChannel->setSurface(surface, pushBlankBuffer);
}
void CCodec::signalFlush() {
@@ -2331,7 +2324,11 @@
case kWhatStop: {
// C2Component::stop() should return within 500ms.
setDeadline(now, 1500ms, "stop");
- stop();
+ int32_t pushBlankBuffer;
+ if (!msg->findInt32("pushBlankBuffer", &pushBlankBuffer)) {
+ pushBlankBuffer = 0;
+ }
+ stop(static_cast<bool>(pushBlankBuffer));
break;
}
case kWhatFlush: {
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index b54d35d..fff008b 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -1581,14 +1581,22 @@
mFirstValidFrameIndex = mFrameIndex.load(std::memory_order_relaxed);
}
-void CCodecBufferChannel::stopUseOutputSurface() {
- if (mOutputSurface.lock()->surface) {
+void CCodecBufferChannel::stopUseOutputSurface(bool pushBlankBuffer) {
+ sp<Surface> surface = mOutputSurface.lock()->surface;
+ if (surface) {
C2BlockPool::local_id_t outputPoolId;
{
Mutexed<BlockPools>::Locked pools(mBlockPools);
outputPoolId = pools->outputPoolId;
}
if (mComponent) mComponent->stopUsingOutputSurface(outputPoolId);
+
+ if (pushBlankBuffer) {
+ sp<ANativeWindow> anw = static_cast<ANativeWindow *>(surface.get());
+ if (anw) {
+ pushBlankBuffersToNativeWindow(anw.get());
+ }
+ }
}
}
@@ -2106,14 +2114,20 @@
}
}
-status_t CCodecBufferChannel::setSurface(const sp<Surface> &newSurface) {
+status_t CCodecBufferChannel::setSurface(const sp<Surface> &newSurface, bool pushBlankBuffer) {
static std::atomic_uint32_t surfaceGeneration{0};
uint32_t generation = (getpid() << 10) |
((surfaceGeneration.fetch_add(1, std::memory_order_relaxed) + 1)
& ((1 << 10) - 1));
sp<IGraphicBufferProducer> producer;
- int maxDequeueCount = mOutputSurface.lock()->maxDequeueBuffers;
+ int maxDequeueCount;
+ sp<Surface> oldSurface;
+ {
+ Mutexed<OutputSurface>::Locked outputSurface(mOutputSurface);
+ maxDequeueCount = outputSurface->maxDequeueBuffers;
+ oldSurface = outputSurface->surface;
+ }
if (newSurface) {
newSurface->setScalingMode(NATIVE_WINDOW_SCALING_MODE_SCALE_TO_WINDOW);
newSurface->setDequeueTimeout(kDequeueTimeoutNs);
@@ -2150,6 +2164,15 @@
output->generation = generation;
}
+ if (oldSurface && pushBlankBuffer) {
+ // When ReleaseSurface was set from MediaCodec,
+ // pushing a blank buffer at the end might be necessary.
+ sp<ANativeWindow> anw = static_cast<ANativeWindow *>(oldSurface.get());
+ if (anw) {
+ pushBlankBuffersToNativeWindow(anw.get());
+ }
+ }
+
return OK;
}
@@ -2239,13 +2262,4 @@
}
}
-status_t CCodecBufferChannel::pushBlankBufferToOutputSurface() {
- Mutexed<OutputSurface>::Locked output(mOutputSurface);
- sp<ANativeWindow> nativeWindow = static_cast<ANativeWindow *>(output->surface.get());
- if (nativeWindow == nullptr) {
- return INVALID_OPERATION;
- }
- return pushBlankBuffersToNativeWindow(nativeWindow.get());
-}
-
} // namespace android
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.h b/media/codec2/sfplugin/CCodecBufferChannel.h
index 61fb06f..a52d4dc 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.h
+++ b/media/codec2/sfplugin/CCodecBufferChannel.h
@@ -102,7 +102,7 @@
/**
* Set output graphic surface for rendering.
*/
- status_t setSurface(const sp<Surface> &surface);
+ status_t setSurface(const sp<Surface> &surface, bool pushBlankBuffer);
/**
* Set GraphicBufferSource object from which the component extracts input
@@ -151,8 +151,10 @@
/**
* Stop using buffers of the current output surface for other Codec
* instances to use the surface safely.
+ *
+ * \param pushBlankBuffer[in] push a blank buffer at the end if true
*/
- void stopUseOutputSurface();
+ void stopUseOutputSurface(bool pushBlankBuffer);
/**
* Stop queueing buffers to the component. This object should never queue
@@ -201,11 +203,6 @@
void setMetaMode(MetaMode mode);
- /**
- * Push a blank buffer to the configured native output surface.
- */
- status_t pushBlankBufferToOutputSurface();
-
private:
class QueueGuard;
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index b9270de..82c31a8 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -22,6 +22,7 @@
#include <aidl/android/hardware/graphics/common/Cta861_3.h>
#include <aidl/android/hardware/graphics/common/Smpte2086.h>
+#include <android-base/no_destructor.h>
#include <android-base/properties.h>
#include <android/hardware/cas/native/1.0/types.h>
#include <android/hardware/drm/1.0/types.h>
@@ -1018,8 +1019,8 @@
namespace {
sp<IMapper4> GetMapper4() {
- static sp<IMapper4> sMapper = IMapper4::getService();
- return sMapper;
+ static ::android::base::NoDestructor<sp<IMapper4>> sMapper(IMapper4::getService());
+ return *sMapper;
}
class Gralloc4Buffer {
@@ -1137,6 +1138,11 @@
err = C2_CORRUPTED;
}
}
+
+ if (err != C2_OK) {
+ staticInfo->reset();
+ }
+
if (dynamicInfo) {
ALOGV("Grabbing dynamic HDR info from gralloc4 metadata");
dynamicInfo->reset();
diff --git a/media/codec2/sfplugin/include/media/stagefright/CCodec.h b/media/codec2/sfplugin/include/media/stagefright/CCodec.h
index ec18128..13713bc 100644
--- a/media/codec2/sfplugin/include/media/stagefright/CCodec.h
+++ b/media/codec2/sfplugin/include/media/stagefright/CCodec.h
@@ -109,9 +109,9 @@
void allocate(const sp<MediaCodecInfo> &codecInfo);
void configure(const sp<AMessage> &msg);
void start();
- void stop();
+ void stop(bool pushBlankBuffer);
void flush();
- void release(bool sendCallback);
+ void release(bool sendCallback, bool pushBlankBuffer);
/**
* Creates an input surface for the current device configuration compatible with CCodec.
diff --git a/media/codec2/vndk/include/C2SurfaceSyncObj.h b/media/codec2/vndk/include/C2SurfaceSyncObj.h
index d858f27..b193b4a 100644
--- a/media/codec2/vndk/include/C2SurfaceSyncObj.h
+++ b/media/codec2/vndk/include/C2SurfaceSyncObj.h
@@ -112,6 +112,11 @@
*/
c2_status_t waitForChange(uint32_t waitId, c2_nsecs_t timeoutNs);
+ /**
+ * Wake up and expire all waitors.
+ */
+ void notifyAll();
+
C2SyncVariables() {}
private:
diff --git a/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp b/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp
index bf4ca32..d55a3d8 100644
--- a/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp
+++ b/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp
@@ -244,6 +244,12 @@
return C2_BAD_VALUE;
}
+void C2SyncVariables::notifyAll() {
+ this->lock();
+ this->broadcast();
+ this->unlock();
+}
+
int C2SyncVariables::signal() {
mCond++;
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 6c364c9..13e430a 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -758,7 +758,8 @@
*
* @return pointer to a text representation of an AAudio result code.
*/
-AAUDIO_API const char * AAudio_convertResultToText(aaudio_result_t returnCode) __INTRODUCED_IN(26);
+AAUDIO_API const char * _Nonnull AAudio_convertResultToText(aaudio_result_t returnCode)
+ __INTRODUCED_IN(26);
/**
* The text is the ASCII symbol corresponding to the stream state,
@@ -770,7 +771,7 @@
*
* @return pointer to a text representation of an AAudio state.
*/
-AAUDIO_API const char * AAudio_convertStreamStateToText(aaudio_stream_state_t state)
+AAUDIO_API const char * _Nonnull AAudio_convertStreamStateToText(aaudio_stream_state_t state)
__INTRODUCED_IN(26);
// ============================================================
@@ -791,8 +792,8 @@
*
* Available since API level 26.
*/
-AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder** builder)
- __INTRODUCED_IN(26);
+AAUDIO_API aaudio_result_t AAudio_createStreamBuilder(AAudioStreamBuilder* _Nullable* _Nonnull
+ builder) __INTRODUCED_IN(26);
/**
* Request an audio device identified by an ID.
@@ -814,7 +815,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param deviceId device identifier or {@link #AAUDIO_UNSPECIFIED}
*/
-AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setDeviceId(AAudioStreamBuilder* _Nonnull builder,
int32_t deviceId) __INTRODUCED_IN(26);
/**
@@ -834,8 +835,8 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param packageName packageName of the calling app.
*/
-AAUDIO_API void AAudioStreamBuilder_setPackageName(AAudioStreamBuilder* builder,
- const char * packageName) __INTRODUCED_IN(31);
+AAUDIO_API void AAudioStreamBuilder_setPackageName(AAudioStreamBuilder* _Nonnull builder,
+ const char * _Nonnull packageName) __INTRODUCED_IN(31);
/**
* Declare the attribution tag of the context creating the stream.
@@ -849,8 +850,8 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param attributionTag attributionTag of the calling context.
*/
-AAUDIO_API void AAudioStreamBuilder_setAttributionTag(AAudioStreamBuilder* builder,
- const char * attributionTag) __INTRODUCED_IN(31);
+AAUDIO_API void AAudioStreamBuilder_setAttributionTag(AAudioStreamBuilder* _Nonnull builder,
+ const char * _Nonnull attributionTag) __INTRODUCED_IN(31);
/**
* Request a sample rate in Hertz.
@@ -868,7 +869,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param sampleRate frames per second. Common rates include 44100 and 48000 Hz.
*/
-AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setSampleRate(AAudioStreamBuilder* _Nonnull builder,
int32_t sampleRate) __INTRODUCED_IN(26);
/**
@@ -898,7 +899,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param channelCount Number of channels desired.
*/
-AAUDIO_API void AAudioStreamBuilder_setChannelCount(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setChannelCount(AAudioStreamBuilder* _Nonnull builder,
int32_t channelCount) __INTRODUCED_IN(26);
/**
@@ -911,7 +912,7 @@
*
* @deprecated use {@link AAudioStreamBuilder_setChannelCount}
*/
-AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* _Nonnull builder,
int32_t samplesPerFrame) __INTRODUCED_IN(26);
/**
@@ -931,7 +932,7 @@
* @param format common formats are {@link #AAUDIO_FORMAT_PCM_FLOAT} and
* {@link #AAUDIO_FORMAT_PCM_I16}.
*/
-AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setFormat(AAudioStreamBuilder* _Nonnull builder,
aaudio_format_t format) __INTRODUCED_IN(26);
/**
@@ -947,7 +948,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param sharingMode {@link #AAUDIO_SHARING_MODE_SHARED} or {@link #AAUDIO_SHARING_MODE_EXCLUSIVE}
*/
-AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setSharingMode(AAudioStreamBuilder* _Nonnull builder,
aaudio_sharing_mode_t sharingMode) __INTRODUCED_IN(26);
/**
@@ -960,7 +961,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param direction {@link #AAUDIO_DIRECTION_OUTPUT} or {@link #AAUDIO_DIRECTION_INPUT}
*/
-AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* _Nonnull builder,
aaudio_direction_t direction) __INTRODUCED_IN(26);
/**
@@ -974,8 +975,8 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param numFrames the desired buffer capacity in frames or {@link #AAUDIO_UNSPECIFIED}
*/
-AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(AAudioStreamBuilder* builder,
- int32_t numFrames) __INTRODUCED_IN(26);
+AAUDIO_API void AAudioStreamBuilder_setBufferCapacityInFrames(
+ AAudioStreamBuilder* _Nonnull builder, int32_t numFrames) __INTRODUCED_IN(26);
/**
* Set the requested performance mode.
@@ -994,7 +995,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param mode the desired performance mode, eg. {@link #AAUDIO_PERFORMANCE_MODE_LOW_LATENCY}
*/
-AAUDIO_API void AAudioStreamBuilder_setPerformanceMode(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setPerformanceMode(AAudioStreamBuilder* _Nonnull builder,
aaudio_performance_mode_t mode) __INTRODUCED_IN(26);
/**
@@ -1011,7 +1012,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param usage the desired usage, eg. {@link #AAUDIO_USAGE_GAME}
*/
-AAUDIO_API void AAudioStreamBuilder_setUsage(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setUsage(AAudioStreamBuilder* _Nonnull builder,
aaudio_usage_t usage) __INTRODUCED_IN(28);
/**
@@ -1028,7 +1029,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param contentType the type of audio data, eg. {@link #AAUDIO_CONTENT_TYPE_SPEECH}
*/
-AAUDIO_API void AAudioStreamBuilder_setContentType(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setContentType(AAudioStreamBuilder* _Nonnull builder,
aaudio_content_type_t contentType) __INTRODUCED_IN(28);
/**
@@ -1043,7 +1044,8 @@
* @param spatializationBehavior the desired behavior with regards to spatialization, eg.
* {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
*/
-AAUDIO_API void AAudioStreamBuilder_setSpatializationBehavior(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setSpatializationBehavior(
+ AAudioStreamBuilder* _Nonnull builder,
aaudio_spatialization_behavior_t spatializationBehavior) __INTRODUCED_IN(32);
/**
@@ -1059,7 +1061,7 @@
* @param isSpatialized true if the content is already processed for binaural or transaural spatial
* rendering, false otherwise.
*/
-AAUDIO_API void AAudioStreamBuilder_setIsContentSpatialized(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setIsContentSpatialized(AAudioStreamBuilder* _Nonnull builder,
bool isSpatialized) __INTRODUCED_IN(32);
/**
@@ -1079,7 +1081,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param inputPreset the desired configuration for recording
*/
-AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* _Nonnull builder,
aaudio_input_preset_t inputPreset) __INTRODUCED_IN(28);
/**
@@ -1097,7 +1099,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param capturePolicy the desired level of opt-out from being captured.
*/
-AAUDIO_API void AAudioStreamBuilder_setAllowedCapturePolicy(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setAllowedCapturePolicy(AAudioStreamBuilder* _Nonnull builder,
aaudio_allowed_capture_policy_t capturePolicy) __INTRODUCED_IN(29);
/** Set the requested session ID.
@@ -1127,7 +1129,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param sessionId an allocated sessionID or {@link #AAUDIO_SESSION_ID_ALLOCATE}
*/
-AAUDIO_API void AAudioStreamBuilder_setSessionId(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setSessionId(AAudioStreamBuilder* _Nonnull builder,
aaudio_session_id_t sessionId) __INTRODUCED_IN(28);
@@ -1149,7 +1151,7 @@
* @param privacySensitive true if capture from this stream must be marked as privacy sensitive,
* false otherwise.
*/
-AAUDIO_API void AAudioStreamBuilder_setPrivacySensitive(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setPrivacySensitive(AAudioStreamBuilder* _Nonnull builder,
bool privacySensitive) __INTRODUCED_IN(30);
/**
@@ -1223,9 +1225,9 @@
* @return AAUDIO_CALLBACK_RESULT_*
*/
typedef aaudio_data_callback_result_t (*AAudioStream_dataCallback)(
- AAudioStream *stream,
- void *userData,
- void *audioData,
+ AAudioStream* _Nonnull stream,
+ void* _Nullable userData,
+ void* _Nonnull audioData,
int32_t numFrames);
/**
@@ -1254,8 +1256,9 @@
* @param userData pointer to an application data structure that will be passed
* to the callback functions.
*/
-AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* builder,
- AAudioStream_dataCallback callback, void *userData) __INTRODUCED_IN(26);
+AAUDIO_API void AAudioStreamBuilder_setDataCallback(AAudioStreamBuilder* _Nonnull builder,
+ AAudioStream_dataCallback _Nullable callback, void* _Nullable userData)
+ __INTRODUCED_IN(26);
/**
* Set the requested data callback buffer size in frames.
@@ -1282,8 +1285,8 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param numFrames the desired buffer size in frames or {@link #AAUDIO_UNSPECIFIED}
*/
-AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* builder,
- int32_t numFrames) __INTRODUCED_IN(26);
+AAUDIO_API void AAudioStreamBuilder_setFramesPerDataCallback(AAudioStreamBuilder* _Nonnull builder,
+ int32_t numFrames) __INTRODUCED_IN(26);
/**
* Prototype for the callback function that is passed to
@@ -1310,8 +1313,8 @@
* @param error an AAUDIO_ERROR_* value.
*/
typedef void (*AAudioStream_errorCallback)(
- AAudioStream *stream,
- void *userData,
+ AAudioStream* _Nonnull stream,
+ void* _Nullable userData,
aaudio_result_t error);
/**
@@ -1337,8 +1340,9 @@
* @param userData pointer to an application data structure that will be passed
* to the callback functions.
*/
-AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* builder,
- AAudioStream_errorCallback callback, void *userData) __INTRODUCED_IN(26);
+AAUDIO_API void AAudioStreamBuilder_setErrorCallback(AAudioStreamBuilder* _Nonnull builder,
+ AAudioStream_errorCallback _Nullable callback, void* _Nullable userData)
+ __INTRODUCED_IN(26);
/**
* Open a stream based on the options in the StreamBuilder.
@@ -1352,8 +1356,8 @@
* @param stream pointer to a variable to receive the new stream reference
* @return {@link #AAUDIO_OK} or a negative error.
*/
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
- AAudioStream** stream) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder* _Nonnull builder,
+ AAudioStream* _Nullable* _Nonnull stream) __INTRODUCED_IN(26);
/**
* Delete the resources associated with the StreamBuilder.
@@ -1363,7 +1367,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @return {@link #AAUDIO_OK} or a negative error.
*/
-AAUDIO_API aaudio_result_t AAudioStreamBuilder_delete(AAudioStreamBuilder* builder)
+AAUDIO_API aaudio_result_t AAudioStreamBuilder_delete(AAudioStreamBuilder* _Nonnull builder)
__INTRODUCED_IN(26);
/**
@@ -1389,7 +1393,7 @@
* @param builder reference provided by AAudio_createStreamBuilder()
* @param channelMask Audio channel mask desired.
*/
-AAUDIO_API void AAudioStreamBuilder_setChannelMask(AAudioStreamBuilder* builder,
+AAUDIO_API void AAudioStreamBuilder_setChannelMask(AAudioStreamBuilder* _Nonnull builder,
aaudio_channel_mask_t channelMask) __INTRODUCED_IN(32);
// ============================================================
@@ -1418,7 +1422,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
-AAUDIO_API aaudio_result_t AAudioStream_release(AAudioStream* stream) __INTRODUCED_IN(30);
+AAUDIO_API aaudio_result_t AAudioStream_release(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(30);
/**
* Delete the internal data structures associated with the stream created
@@ -1431,7 +1436,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
-AAUDIO_API aaudio_result_t AAudioStream_close(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_result_t AAudioStream_close(AAudioStream* _Nonnull stream) __INTRODUCED_IN(26);
/**
* Asynchronously request to start playing the stream. For output streams, one should
@@ -1445,7 +1450,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
-AAUDIO_API aaudio_result_t AAudioStream_requestStart(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_result_t AAudioStream_requestStart(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* Asynchronous request for the stream to pause.
@@ -1462,7 +1468,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
-AAUDIO_API aaudio_result_t AAudioStream_requestPause(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_result_t AAudioStream_requestPause(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* Asynchronous request for the stream to flush.
@@ -1482,7 +1489,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
-AAUDIO_API aaudio_result_t AAudioStream_requestFlush(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_result_t AAudioStream_requestFlush(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* Asynchronous request for the stream to stop.
@@ -1495,7 +1503,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return {@link #AAUDIO_OK} or a negative error.
*/
-AAUDIO_API aaudio_result_t AAudioStream_requestStop(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_result_t AAudioStream_requestStop(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* Query the current state of the client, eg. {@link #AAUDIO_STREAM_STATE_PAUSING}
@@ -1509,7 +1518,8 @@
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
*/
-AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_stream_state_t AAudioStream_getState(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* Wait until the current state no longer matches the input state.
@@ -1535,8 +1545,8 @@
* @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
* @return {@link #AAUDIO_OK} or a negative error.
*/
-AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream* stream,
- aaudio_stream_state_t inputState, aaudio_stream_state_t *nextState,
+AAUDIO_API aaudio_result_t AAudioStream_waitForStateChange(AAudioStream* _Nonnull stream,
+ aaudio_stream_state_t inputState, aaudio_stream_state_t* _Nullable nextState,
int64_t timeoutNanoseconds) __INTRODUCED_IN(26);
// ============================================================
@@ -1565,8 +1575,8 @@
* @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
* @return The number of frames actually read or a negative error.
*/
-AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream* stream,
- void *buffer, int32_t numFrames, int64_t timeoutNanoseconds) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_result_t AAudioStream_read(AAudioStream* _Nonnull stream,
+ void* _Nonnull buffer, int32_t numFrames, int64_t timeoutNanoseconds) __INTRODUCED_IN(26);
/**
* Write data to the stream.
@@ -1590,8 +1600,9 @@
* @param timeoutNanoseconds Maximum number of nanoseconds to wait for completion.
* @return The number of frames actually written or a negative error.
*/
-AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream* stream,
- const void *buffer, int32_t numFrames, int64_t timeoutNanoseconds) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_result_t AAudioStream_write(AAudioStream* _Nonnull stream,
+ const void* _Nonnull buffer, int32_t numFrames, int64_t timeoutNanoseconds)
+ __INTRODUCED_IN(26);
// ============================================================
// Stream - queries
@@ -1615,7 +1626,7 @@
* @param numFrames requested number of frames that can be filled without blocking
* @return actual buffer size in frames or a negative error
*/
-AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream* stream,
+AAUDIO_API aaudio_result_t AAudioStream_setBufferSizeInFrames(AAudioStream* _Nonnull stream,
int32_t numFrames) __INTRODUCED_IN(26);
/**
@@ -1626,7 +1637,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return buffer size in frames.
*/
-AAUDIO_API int32_t AAudioStream_getBufferSizeInFrames(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API int32_t AAudioStream_getBufferSizeInFrames(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* Query the number of frames that the application should read or write at
@@ -1643,7 +1655,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return burst size
*/
-AAUDIO_API int32_t AAudioStream_getFramesPerBurst(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API int32_t AAudioStream_getFramesPerBurst(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* Query maximum buffer capacity in frames.
@@ -1653,7 +1666,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return buffer capacity in frames
*/
-AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API int32_t AAudioStream_getBufferCapacityInFrames(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* Query the size of the buffer that will be passed to the dataProc callback
@@ -1676,7 +1690,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return callback buffer size in frames or {@link #AAUDIO_UNSPECIFIED}
*/
-AAUDIO_API int32_t AAudioStream_getFramesPerDataCallback(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API int32_t AAudioStream_getFramesPerDataCallback(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* An XRun is an Underrun or an Overrun.
@@ -1695,7 +1710,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return the underrun or overrun count
*/
-AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API int32_t AAudioStream_getXRunCount(AAudioStream* _Nonnull stream) __INTRODUCED_IN(26);
/**
* Available since API level 26.
@@ -1703,13 +1718,13 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual sample rate of the stream
*/
-AAUDIO_API int32_t AAudioStream_getSampleRate(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API int32_t AAudioStream_getSampleRate(AAudioStream* _Nonnull stream) __INTRODUCED_IN(26);
/**
* There may be sample rate conversions in the Audio framework.
* The sample rate set in the stream builder may not be actual sample rate used in the hardware.
*
- * This returns the sample rate used by the hardware.
+ * This returns the sample rate used by the hardware in Hertz.
*
* If AAudioStreamBuilder_openStream() returned AAUDIO_OK, the result should always be valid.
*
@@ -1718,7 +1733,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual sample rate of the underlying hardware
*/
-AAUDIO_API int32_t AAudioStream_getHardwareSampleRate(AAudioStream* stream)
+AAUDIO_API int32_t AAudioStream_getHardwareSampleRate(AAudioStream* _Nonnull stream)
__INTRODUCED_IN(__ANDROID_API_U__);
/**
@@ -1730,7 +1745,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual number of channels of the stream
*/
-AAUDIO_API int32_t AAudioStream_getChannelCount(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API int32_t AAudioStream_getChannelCount(AAudioStream* _Nonnull stream) __INTRODUCED_IN(26);
/**
* There may be channel conversions in the Audio framework.
@@ -1746,7 +1761,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual number of channels of the underlying hardware
*/
-AAUDIO_API int32_t AAudioStream_getHardwareChannelCount(AAudioStream* stream)
+AAUDIO_API int32_t AAudioStream_getHardwareChannelCount(AAudioStream* _Nonnull stream)
__INTRODUCED_IN(__ANDROID_API_U__);
/**
@@ -1757,7 +1772,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual number of samples frame
*/
-AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API int32_t AAudioStream_getSamplesPerFrame(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* Available since API level 26.
@@ -1765,7 +1781,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual device ID
*/
-AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API int32_t AAudioStream_getDeviceId(AAudioStream* _Nonnull stream) __INTRODUCED_IN(26);
/**
* Available since API level 26.
@@ -1773,22 +1789,30 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual data format of the stream
*/
-AAUDIO_API aaudio_format_t AAudioStream_getFormat(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_format_t AAudioStream_getFormat(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* There may be data format conversions in the Audio framework.
* The data format set in the stream builder may not be actual format used in the hardware.
*
* This returns the audio format used by the hardware.
+ *
+ * If AAudioStreamBuilder_openStream() returned AAUDIO_OK, this should always return an
+ * aaudio_format_t.
+ *
* AUDIO_FORMAT_PCM_8_24_BIT is currently not supported in AAudio, but the hardware may use it.
* If AUDIO_FORMAT_PCM_8_24_BIT is used by the hardware, return AAUDIO_FORMAT_PCM_I24_PACKED.
*
+ * If any other format used by the hardware is not supported by AAudio, this will return
+ * AAUDIO_FORMAT_INVALID.
+ *
* Available since API level 34.
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual data format of the underlying hardware.
*/
-AAUDIO_API aaudio_format_t AAudioStream_getHardwareFormat(AAudioStream* stream)
+AAUDIO_API aaudio_format_t AAudioStream_getHardwareFormat(AAudioStream* _Nonnull stream)
__INTRODUCED_IN(__ANDROID_API_U__);
/**
@@ -1799,7 +1823,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual sharing mode
*/
-AAUDIO_API aaudio_sharing_mode_t AAudioStream_getSharingMode(AAudioStream* stream)
+AAUDIO_API aaudio_sharing_mode_t AAudioStream_getSharingMode(AAudioStream* _Nonnull stream)
__INTRODUCED_IN(26);
/**
@@ -1809,7 +1833,7 @@
*
* @param stream reference provided by AAudioStreamBuilder_openStream()
*/
-AAUDIO_API aaudio_performance_mode_t AAudioStream_getPerformanceMode(AAudioStream* stream)
+AAUDIO_API aaudio_performance_mode_t AAudioStream_getPerformanceMode(AAudioStream* _Nonnull stream)
__INTRODUCED_IN(26);
/**
@@ -1818,7 +1842,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return direction
*/
-AAUDIO_API aaudio_direction_t AAudioStream_getDirection(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_direction_t AAudioStream_getDirection(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* Passes back the number of frames that have been written since the stream was created.
@@ -1833,7 +1858,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return frames written
*/
-AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API int64_t AAudioStream_getFramesWritten(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(26);
/**
* Passes back the number of frames that have been read since the stream was created.
@@ -1848,7 +1874,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return frames read
*/
-AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream* stream) __INTRODUCED_IN(26);
+AAUDIO_API int64_t AAudioStream_getFramesRead(AAudioStream* _Nonnull stream) __INTRODUCED_IN(26);
/**
* Passes back the session ID associated with this stream.
@@ -1873,7 +1899,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return session ID or {@link #AAUDIO_SESSION_ID_NONE}
*/
-AAUDIO_API aaudio_session_id_t AAudioStream_getSessionId(AAudioStream* stream) __INTRODUCED_IN(28);
+AAUDIO_API aaudio_session_id_t AAudioStream_getSessionId(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(28);
/**
* Passes back the time at which a particular frame was presented.
@@ -1899,8 +1926,9 @@
* @param timeNanoseconds pointer to a variable to receive the time
* @return {@link #AAUDIO_OK} or a negative error
*/
-AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream* stream,
- clockid_t clockid, int64_t *framePosition, int64_t *timeNanoseconds) __INTRODUCED_IN(26);
+AAUDIO_API aaudio_result_t AAudioStream_getTimestamp(AAudioStream* _Nonnull stream,
+ clockid_t clockid, int64_t* _Nonnull framePosition, int64_t* _Nonnull timeNanoseconds)
+ __INTRODUCED_IN(26);
/**
* Return the use case for the stream.
@@ -1910,7 +1938,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return frames read
*/
-AAUDIO_API aaudio_usage_t AAudioStream_getUsage(AAudioStream* stream) __INTRODUCED_IN(28);
+AAUDIO_API aaudio_usage_t AAudioStream_getUsage(AAudioStream* _Nonnull stream) __INTRODUCED_IN(28);
/**
* Return the content type for the stream.
@@ -1920,7 +1948,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return content type, for example {@link #AAUDIO_CONTENT_TYPE_MUSIC}
*/
-AAUDIO_API aaudio_content_type_t AAudioStream_getContentType(AAudioStream* stream)
+AAUDIO_API aaudio_content_type_t AAudioStream_getContentType(AAudioStream* _Nonnull stream)
__INTRODUCED_IN(28);
/**
@@ -1935,7 +1963,7 @@
* @return spatialization behavior, for example {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
*/
AAUDIO_API aaudio_spatialization_behavior_t AAudioStream_getSpatializationBehavior(
- AAudioStream* stream) __INTRODUCED_IN(32);
+ AAudioStream* _Nonnull stream) __INTRODUCED_IN(32);
/**
* Return whether the content of the stream is spatialized.
@@ -1945,7 +1973,8 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return true if the content is spatialized
*/
-AAUDIO_API bool AAudioStream_isContentSpatialized(AAudioStream* stream) __INTRODUCED_IN(32);
+AAUDIO_API bool AAudioStream_isContentSpatialized(AAudioStream* _Nonnull stream)
+ __INTRODUCED_IN(32);
/**
@@ -1956,7 +1985,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return input preset, for example {@link #AAUDIO_INPUT_PRESET_CAMCORDER}
*/
-AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream)
+AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* _Nonnull stream)
__INTRODUCED_IN(28);
/**
@@ -1969,7 +1998,7 @@
* @return the allowed capture policy, for example {@link #AAUDIO_ALLOW_CAPTURE_BY_ALL}
*/
AAUDIO_API aaudio_allowed_capture_policy_t AAudioStream_getAllowedCapturePolicy(
- AAudioStream* stream) __INTRODUCED_IN(29);
+ AAudioStream* _Nonnull stream) __INTRODUCED_IN(29);
/**
@@ -1982,7 +2011,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return true if privacy sensitive, false otherwise
*/
-AAUDIO_API bool AAudioStream_isPrivacySensitive(AAudioStream* stream)
+AAUDIO_API bool AAudioStream_isPrivacySensitive(AAudioStream* _Nonnull stream)
__INTRODUCED_IN(30);
/**
@@ -1994,7 +2023,7 @@
* @param stream reference provided by AAudioStreamBuilder_openStream()
* @return actual channel mask
*/
-AAUDIO_API aaudio_channel_mask_t AAudioStream_getChannelMask(AAudioStream* stream)
+AAUDIO_API aaudio_channel_mask_t AAudioStream_getChannelMask(AAudioStream* _Nonnull stream)
__INTRODUCED_IN(32);
#ifdef __cplusplus
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index c31947f..56ef1e6 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -124,13 +124,17 @@
android::mediametrics::LogItem item(mMetricsId);
item.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_OPEN)
.set(AMEDIAMETRICS_PROP_PERFORMANCEMODEACTUAL,
- AudioGlobal_convertPerformanceModeToText(getPerformanceMode()))
+ AudioGlobal_convertPerformanceModeToText(getPerformanceMode()))
.set(AMEDIAMETRICS_PROP_SHARINGMODEACTUAL,
- AudioGlobal_convertSharingModeToText(getSharingMode()))
+ AudioGlobal_convertSharingModeToText(getSharingMode()))
.set(AMEDIAMETRICS_PROP_BUFFERCAPACITYFRAMES, getBufferCapacity())
.set(AMEDIAMETRICS_PROP_BURSTFRAMES, getFramesPerBurst())
.set(AMEDIAMETRICS_PROP_DIRECTION,
- AudioGlobal_convertDirectionToText(getDirection()));
+ AudioGlobal_convertDirectionToText(getDirection()))
+ .set(AMEDIAMETRICS_PROP_ENCODINGHARDWARE,
+ android::toString(getHardwareFormat()).c_str())
+ .set(AMEDIAMETRICS_PROP_CHANNELCOUNTHARDWARE, (int32_t)getHardwareSamplesPerFrame())
+ .set(AMEDIAMETRICS_PROP_SAMPLERATEHARDWARE, (int32_t)getHardwareSampleRate());
if (getDirection() == AAUDIO_DIRECTION_OUTPUT) {
item.set(AMEDIAMETRICS_PROP_PLAYERIID, mPlayerBase->getPlayerIId());
diff --git a/media/libaaudio/tests/test_attributes.cpp b/media/libaaudio/tests/test_attributes.cpp
index b88d562..e5676a7 100644
--- a/media/libaaudio/tests/test_attributes.cpp
+++ b/media/libaaudio/tests/test_attributes.cpp
@@ -20,6 +20,7 @@
// "test_aaudio_attributes.cpp". That other file is more current.
// So these tests could be deleted.
+#include <memory>
#include <stdio.h>
#include <unistd.h>
@@ -40,7 +41,7 @@
int privacyMode = DONT_SET,
aaudio_direction_t direction = AAUDIO_DIRECTION_OUTPUT) {
- float *buffer = new float[kNumFrames * kChannelCount];
+ std::unique_ptr<float[]> buffer(new float[kNumFrames * kChannelCount]);
AAudioStreamBuilder *aaudioBuilder = nullptr;
AAudioStream *aaudioStream = nullptr;
@@ -109,16 +110,15 @@
if (direction == AAUDIO_DIRECTION_INPUT) {
EXPECT_EQ(kNumFrames,
- AAudioStream_read(aaudioStream, buffer, kNumFrames, kNanosPerSecond));
+ AAudioStream_read(aaudioStream, buffer.get(), kNumFrames, kNanosPerSecond));
} else {
EXPECT_EQ(kNumFrames,
- AAudioStream_write(aaudioStream, buffer, kNumFrames, kNanosPerSecond));
+ AAudioStream_write(aaudioStream, buffer.get(), kNumFrames, kNanosPerSecond));
}
EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream));
EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream));
- delete[] buffer;
}
static const aaudio_usage_t sUsages[] = {
diff --git a/media/libaaudio/tests/test_recovery.cpp b/media/libaaudio/tests/test_recovery.cpp
index 6e89f83..11331af 100644
--- a/media/libaaudio/tests/test_recovery.cpp
+++ b/media/libaaudio/tests/test_recovery.cpp
@@ -16,6 +16,7 @@
// Play silence and recover from dead servers or disconnected devices.
+#include <memory>
#include <stdio.h>
#include <aaudio/AAudio.h>
@@ -32,7 +33,6 @@
int32_t triesLeft = 3;
int32_t bufferCapacity;
int32_t framesPerBurst = 0;
- float *buffer = nullptr;
int32_t actualChannelCount = 0;
int32_t actualSampleRate = 0;
@@ -83,7 +83,7 @@
bufferCapacity, framesPerBurst);
int samplesPerBurst = framesPerBurst * actualChannelCount;
- buffer = new float[samplesPerBurst];
+ std::unique_ptr<float[]> buffer(new float[samplesPerBurst]);
result = AAudioStream_requestStart(aaudioStream);
if (result != AAUDIO_OK) {
@@ -98,7 +98,7 @@
int64_t printAt = actualSampleRate;
while (result == AAUDIO_OK && framesTotal < framesMax) {
int32_t framesWritten = AAudioStream_write(aaudioStream,
- buffer, framesPerBurst,
+ buffer.get(), framesPerBurst,
DEFAULT_TIMEOUT_NANOS);
if (framesWritten < 0) {
result = framesWritten;
@@ -134,6 +134,5 @@
AAudioStream_close(aaudioStream);
}
AAudioStreamBuilder_delete(aaudioBuilder);
- delete[] buffer;
printf(" result = %d = %s\n", result, AAudio_convertResultToText(result));
}
diff --git a/media/libaaudio/tests/test_session_id.cpp b/media/libaaudio/tests/test_session_id.cpp
index 3f7d4fc..5968b5d 100644
--- a/media/libaaudio/tests/test_session_id.cpp
+++ b/media/libaaudio/tests/test_session_id.cpp
@@ -16,6 +16,7 @@
// Test AAudio SessionId, which is used to associate Effects with a stream
+#include <memory>
#include <stdio.h>
#include <unistd.h>
@@ -29,7 +30,7 @@
// Test AAUDIO_SESSION_ID_NONE default
static void checkSessionIdNone(aaudio_performance_mode_t perfMode) {
- float *buffer = new float[kNumFrames * kChannelCount];
+ std::unique_ptr<float[]> buffer(new float[kNumFrames * kChannelCount]);
AAudioStreamBuilder *aaudioBuilder = nullptr;
@@ -51,12 +52,12 @@
ASSERT_EQ(AAUDIO_OK, AAudioStream_requestStart(aaudioStream1));
- ASSERT_EQ(kNumFrames, AAudioStream_write(aaudioStream1, buffer, kNumFrames, kNanosPerSecond));
+ ASSERT_EQ(kNumFrames,
+ AAudioStream_write(aaudioStream1, buffer.get(), kNumFrames, kNanosPerSecond));
EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream1));
EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream1));
- delete[] buffer;
AAudioStreamBuilder_delete(aaudioBuilder);
}
@@ -72,7 +73,7 @@
static void checkSessionIdAllocate(aaudio_performance_mode_t perfMode,
aaudio_direction_t direction) {
- float *buffer = new float[kNumFrames * kChannelCount];
+ std::unique_ptr<float[]> buffer(new float[kNumFrames * kChannelCount]);
AAudioStreamBuilder *aaudioBuilder = nullptr;
@@ -106,10 +107,10 @@
if (direction == AAUDIO_DIRECTION_INPUT) {
ASSERT_EQ(kNumFrames, AAudioStream_read(aaudioStream1,
- buffer, kNumFrames, kNanosPerSecond));
+ buffer.get(), kNumFrames, kNanosPerSecond));
} else {
ASSERT_EQ(kNumFrames, AAudioStream_write(aaudioStream1,
- buffer, kNumFrames, kNanosPerSecond));
+ buffer.get(), kNumFrames, kNanosPerSecond));
}
EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream1));
@@ -135,10 +136,10 @@
if (otherDirection == AAUDIO_DIRECTION_INPUT) {
ASSERT_EQ(kNumFrames, AAudioStream_read(aaudioStream2,
- buffer, kNumFrames, kNanosPerSecond));
+ buffer.get(), kNumFrames, kNanosPerSecond));
} else {
ASSERT_EQ(kNumFrames, AAudioStream_write(aaudioStream2,
- buffer, kNumFrames, kNanosPerSecond));
+ buffer.get(), kNumFrames, kNanosPerSecond));
}
EXPECT_EQ(AAUDIO_OK, AAudioStream_requestStop(aaudioStream2));
@@ -147,7 +148,6 @@
EXPECT_EQ(AAUDIO_OK, AAudioStream_close(aaudioStream1));
- delete[] buffer;
AAudioStreamBuilder_delete(aaudioBuilder);
}
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 68a8590..b3c8643 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -66,125 +66,6 @@
using media::audio::common::Int;
using media::audio::common::PcmType;
-namespace {
-
-enum class Direction {
- INPUT, OUTPUT
-};
-
-ConversionResult<Direction> direction(media::AudioPortRole role, media::AudioPortType type) {
- switch (type) {
- case media::AudioPortType::NONE:
- case media::AudioPortType::SESSION:
- break; // must be listed -Werror,-Wswitch
- case media::AudioPortType::DEVICE:
- switch (role) {
- case media::AudioPortRole::NONE:
- break; // must be listed -Werror,-Wswitch
- case media::AudioPortRole::SOURCE:
- return Direction::INPUT;
- case media::AudioPortRole::SINK:
- return Direction::OUTPUT;
- }
- break;
- case media::AudioPortType::MIX:
- switch (role) {
- case media::AudioPortRole::NONE:
- break; // must be listed -Werror,-Wswitch
- case media::AudioPortRole::SOURCE:
- return Direction::OUTPUT;
- case media::AudioPortRole::SINK:
- return Direction::INPUT;
- }
- break;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<Direction> direction(audio_port_role_t role, audio_port_type_t type) {
- switch (type) {
- case AUDIO_PORT_TYPE_NONE:
- case AUDIO_PORT_TYPE_SESSION:
- break; // must be listed -Werror,-Wswitch
- case AUDIO_PORT_TYPE_DEVICE:
- switch (role) {
- case AUDIO_PORT_ROLE_NONE:
- break; // must be listed -Werror,-Wswitch
- case AUDIO_PORT_ROLE_SOURCE:
- return Direction::INPUT;
- case AUDIO_PORT_ROLE_SINK:
- return Direction::OUTPUT;
- }
- break;
- case AUDIO_PORT_TYPE_MIX:
- switch (role) {
- case AUDIO_PORT_ROLE_NONE:
- break; // must be listed -Werror,-Wswitch
- case AUDIO_PORT_ROLE_SOURCE:
- return Direction::OUTPUT;
- case AUDIO_PORT_ROLE_SINK:
- return Direction::INPUT;
- }
- break;
- }
- return unexpected(BAD_VALUE);
-}
-
-} // namespace
-
-////////////////////////////////////////////////////////////////////////////////////////////////////
-// Converters
-
-ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
- media::AudioIoConfigEvent aidl) {
- switch (aidl) {
- case media::AudioIoConfigEvent::OUTPUT_REGISTERED:
- return AUDIO_OUTPUT_REGISTERED;
- case media::AudioIoConfigEvent::OUTPUT_OPENED:
- return AUDIO_OUTPUT_OPENED;
- case media::AudioIoConfigEvent::OUTPUT_CLOSED:
- return AUDIO_OUTPUT_CLOSED;
- case media::AudioIoConfigEvent::OUTPUT_CONFIG_CHANGED:
- return AUDIO_OUTPUT_CONFIG_CHANGED;
- case media::AudioIoConfigEvent::INPUT_REGISTERED:
- return AUDIO_INPUT_REGISTERED;
- case media::AudioIoConfigEvent::INPUT_OPENED:
- return AUDIO_INPUT_OPENED;
- case media::AudioIoConfigEvent::INPUT_CLOSED:
- return AUDIO_INPUT_CLOSED;
- case media::AudioIoConfigEvent::INPUT_CONFIG_CHANGED:
- return AUDIO_INPUT_CONFIG_CHANGED;
- case media::AudioIoConfigEvent::CLIENT_STARTED:
- return AUDIO_CLIENT_STARTED;
- }
- return unexpected(BAD_VALUE);
-}
-
-ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(
- audio_io_config_event_t legacy) {
- switch (legacy) {
- case AUDIO_OUTPUT_REGISTERED:
- return media::AudioIoConfigEvent::OUTPUT_REGISTERED;
- case AUDIO_OUTPUT_OPENED:
- return media::AudioIoConfigEvent::OUTPUT_OPENED;
- case AUDIO_OUTPUT_CLOSED:
- return media::AudioIoConfigEvent::OUTPUT_CLOSED;
- case AUDIO_OUTPUT_CONFIG_CHANGED:
- return media::AudioIoConfigEvent::OUTPUT_CONFIG_CHANGED;
- case AUDIO_INPUT_REGISTERED:
- return media::AudioIoConfigEvent::INPUT_REGISTERED;
- case AUDIO_INPUT_OPENED:
- return media::AudioIoConfigEvent::INPUT_OPENED;
- case AUDIO_INPUT_CLOSED:
- return media::AudioIoConfigEvent::INPUT_CLOSED;
- case AUDIO_INPUT_CONFIG_CHANGED:
- return media::AudioIoConfigEvent::INPUT_CONFIG_CHANGED;
- case AUDIO_CLIENT_STARTED:
- return media::AudioIoConfigEvent::CLIENT_STARTED;
- }
- return unexpected(BAD_VALUE);
-}
-
ConversionResult<audio_port_role_t> aidl2legacy_AudioPortRole_audio_port_role_t(
media::AudioPortRole aidl) {
switch (aidl) {
@@ -241,52 +122,97 @@
return unexpected(BAD_VALUE);
}
-// This type is unnamed in the original definition, thus we name it here.
-using audio_port_config_mix_ext_usecase = decltype(audio_port_config_mix_ext::usecase);
+ConversionResult<AudioPortDirection> portDirection(
+ media::AudioPortRole role, media::AudioPortType type) {
+ audio_port_role_t legacyRole = VALUE_OR_RETURN(
+ aidl2legacy_AudioPortRole_audio_port_role_t(role));
+ audio_port_type_t legacyType = VALUE_OR_RETURN(
+ aidl2legacy_AudioPortType_audio_port_type_t(type));
+ return portDirection(legacyRole, legacyType);
+}
+ConversionResult<audio_io_config_event_t> aidl2legacy_AudioIoConfigEvent_audio_io_config_event_t(
+ media::AudioIoConfigEvent aidl) {
+ switch (aidl) {
+ case media::AudioIoConfigEvent::OUTPUT_REGISTERED:
+ return AUDIO_OUTPUT_REGISTERED;
+ case media::AudioIoConfigEvent::OUTPUT_OPENED:
+ return AUDIO_OUTPUT_OPENED;
+ case media::AudioIoConfigEvent::OUTPUT_CLOSED:
+ return AUDIO_OUTPUT_CLOSED;
+ case media::AudioIoConfigEvent::OUTPUT_CONFIG_CHANGED:
+ return AUDIO_OUTPUT_CONFIG_CHANGED;
+ case media::AudioIoConfigEvent::INPUT_REGISTERED:
+ return AUDIO_INPUT_REGISTERED;
+ case media::AudioIoConfigEvent::INPUT_OPENED:
+ return AUDIO_INPUT_OPENED;
+ case media::AudioIoConfigEvent::INPUT_CLOSED:
+ return AUDIO_INPUT_CLOSED;
+ case media::AudioIoConfigEvent::INPUT_CONFIG_CHANGED:
+ return AUDIO_INPUT_CONFIG_CHANGED;
+ case media::AudioIoConfigEvent::CLIENT_STARTED:
+ return AUDIO_CLIENT_STARTED;
+ }
+ return unexpected(BAD_VALUE);
+}
+
+ConversionResult<media::AudioIoConfigEvent> legacy2aidl_audio_io_config_event_t_AudioIoConfigEvent(
+ audio_io_config_event_t legacy) {
+ switch (legacy) {
+ case AUDIO_OUTPUT_REGISTERED:
+ return media::AudioIoConfigEvent::OUTPUT_REGISTERED;
+ case AUDIO_OUTPUT_OPENED:
+ return media::AudioIoConfigEvent::OUTPUT_OPENED;
+ case AUDIO_OUTPUT_CLOSED:
+ return media::AudioIoConfigEvent::OUTPUT_CLOSED;
+ case AUDIO_OUTPUT_CONFIG_CHANGED:
+ return media::AudioIoConfigEvent::OUTPUT_CONFIG_CHANGED;
+ case AUDIO_INPUT_REGISTERED:
+ return media::AudioIoConfigEvent::INPUT_REGISTERED;
+ case AUDIO_INPUT_OPENED:
+ return media::AudioIoConfigEvent::INPUT_OPENED;
+ case AUDIO_INPUT_CLOSED:
+ return media::AudioIoConfigEvent::INPUT_CLOSED;
+ case AUDIO_INPUT_CONFIG_CHANGED:
+ return media::AudioIoConfigEvent::INPUT_CONFIG_CHANGED;
+ case AUDIO_CLIENT_STARTED:
+ return media::AudioIoConfigEvent::CLIENT_STARTED;
+ }
+ return unexpected(BAD_VALUE);
+}
ConversionResult<audio_port_config_mix_ext_usecase> aidl2legacy_AudioPortMixExtUseCase(
const AudioPortMixExtUseCase& aidl, media::AudioPortRole role) {
- audio_port_config_mix_ext_usecase legacy;
-
switch (role) {
- case media::AudioPortRole::NONE:
+ case media::AudioPortRole::NONE: {
+ audio_port_config_mix_ext_usecase legacy;
// Just verify that the union is empty.
VALUE_OR_RETURN(UNION_GET(aidl, unspecified));
return legacy;
-
+ }
case media::AudioPortRole::SOURCE:
- // This is not a bug. A SOURCE role corresponds to the stream field.
- legacy.stream = VALUE_OR_RETURN(aidl2legacy_AudioStreamType_audio_stream_type_t(
- VALUE_OR_RETURN(UNION_GET(aidl, stream))));
- return legacy;
-
+ return aidl2legacy_AudioPortMixExtUseCase_audio_port_config_mix_ext_usecase(
+ aidl, false /*isInput*/);
case media::AudioPortRole::SINK:
- // This is not a bug. A SINK role corresponds to the source field.
- legacy.source = VALUE_OR_RETURN(aidl2legacy_AudioSource_audio_source_t(
- VALUE_OR_RETURN(UNION_GET(aidl, source))));
- return legacy;
+ return aidl2legacy_AudioPortMixExtUseCase_audio_port_config_mix_ext_usecase(
+ aidl, true /*isInput*/);
}
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
}
ConversionResult<AudioPortMixExtUseCase> legacy2aidl_AudioPortMixExtUseCase(
const audio_port_config_mix_ext_usecase& legacy, audio_port_role_t role) {
- AudioPortMixExtUseCase aidl;
-
switch (role) {
- case AUDIO_PORT_ROLE_NONE:
+ case AUDIO_PORT_ROLE_NONE: {
+ AudioPortMixExtUseCase aidl;
UNION_SET(aidl, unspecified, false);
return aidl;
+ }
case AUDIO_PORT_ROLE_SOURCE:
- // This is not a bug. A SOURCE role corresponds to the stream field.
- UNION_SET(aidl, stream, VALUE_OR_RETURN(
- legacy2aidl_audio_stream_type_t_AudioStreamType(legacy.stream)));
- return aidl;
+ return legacy2aidl_audio_port_config_mix_ext_usecase_AudioPortMixExtUseCase(
+ legacy, false /*isInput*/);
case AUDIO_PORT_ROLE_SINK:
- // This is not a bug. A SINK role corresponds to the source field.
- UNION_SET(aidl, source,
- VALUE_OR_RETURN(legacy2aidl_audio_source_t_AudioSource(legacy.source)));
- return aidl;
+ return legacy2aidl_audio_port_config_mix_ext_usecase_AudioPortMixExtUseCase(
+ legacy, true /*isInput*/);
}
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
}
@@ -294,6 +220,8 @@
ConversionResult<audio_port_config_mix_ext> aidl2legacy_AudioPortMixExt(
const AudioPortMixExt& aidl, media::AudioPortRole role,
const media::AudioPortMixExtSys& aidlMixExt) {
+ // Not using HAL-level 'aidl2legacy_AudioPortMixExt' as it does not support
+ // 'media::AudioPortRole::NONE'.
audio_port_config_mix_ext legacy;
legacy.hw_module = VALUE_OR_RETURN(
aidl2legacy_int32_t_audio_module_handle_t(aidlMixExt.hwModule));
@@ -305,6 +233,8 @@
status_t legacy2aidl_AudioPortMixExt(
const audio_port_config_mix_ext& legacy, audio_port_role_t role,
AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
+ // Not using HAL-level 'legacy2aidl_AudioPortMixExt' as it does not support
+ // 'AUDIO_PORT_ROLE_NONE'.
aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
@@ -329,21 +259,20 @@
ConversionResult<audio_port_config_device_ext>
aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(
const AudioPortDeviceExt& aidl, const media::AudioPortDeviceExtSys& aidlDeviceExt) {
- audio_port_config_device_ext legacy;
+ audio_port_config_device_ext legacy = VALUE_OR_RETURN(
+ aidl2legacy_AudioPortDeviceExt_audio_port_config_device_ext(aidl));
legacy.hw_module = VALUE_OR_RETURN(
aidl2legacy_int32_t_audio_module_handle_t(aidlDeviceExt.hwModule));
- RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
- aidl.device, &legacy.type, legacy.address));
return legacy;
}
status_t legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(
const audio_port_config_device_ext& legacy,
AudioPortDeviceExt* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
+ *aidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_port_config_device_ext_AudioPortDeviceExt(legacy));
aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidl->device = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
return OK;
}
@@ -353,6 +282,8 @@
ConversionResult<audio_port_config_ext> aidl2legacy_AudioPortExt_audio_port_config_ext(
const AudioPortExt& aidl, media::AudioPortType type,
media::AudioPortRole role, const media::AudioPortExtSys& aidlSys) {
+ // Not using HAL-level 'aidl2legacy_AudioPortExt_audio_port_config_ext' as it does not support
+ // 'media::AudioPortType::SESSION'.
audio_port_config_ext legacy;
switch (type) {
case media::AudioPortType::NONE:
@@ -384,6 +315,8 @@
status_t legacy2aidl_AudioPortExt(
const audio_port_config_ext& legacy, audio_port_type_t type, audio_port_role_t role,
AudioPortExt* aidl, media::AudioPortExtSys* aidlSys) {
+ // Not using HAL-level 'aidl2legacy_AudioPortExt_audio_port_config_ext' as it does not support
+ // 'AUDIO_PORT_TYPE_SESSION'.
switch (type) {
case AUDIO_PORT_TYPE_NONE:
UNION_SET(*aidl, unspecified, false);
@@ -416,82 +349,38 @@
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
}
-ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
- const media::AudioPortConfigFw& aidl) {
- audio_port_config legacy{};
- legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
+ConversionResult<audio_port_config> aidl2legacy_AudioPortConfigFw_audio_port_config(
+ const media::AudioPortConfigFw& aidl, int32_t* aidlPortId) {
+ const bool isInput = VALUE_OR_RETURN(
+ portDirection(aidl.sys.role, aidl.sys.type)) == AudioPortDirection::INPUT;
+ audio_port_config legacy;
+ int32_t aidlPortIdHolder;
+ RETURN_IF_ERROR(aidl2legacy_AudioPortConfig_audio_port_config(
+ aidl.hal, isInput, &legacy, &aidlPortIdHolder));
+ if (aidlPortId != nullptr) *aidlPortId = aidlPortIdHolder;
legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.sys.type));
- const bool isInput =
- VALUE_OR_RETURN(direction(aidl.sys.role, aidl.sys.type)) == Direction::INPUT;
- if (aidl.hal.sampleRate.has_value()) {
- legacy.sample_rate = VALUE_OR_RETURN(
- convertIntegral<unsigned int>(aidl.hal.sampleRate.value().value));
- legacy.config_mask |= AUDIO_PORT_CONFIG_SAMPLE_RATE;
- }
- if (aidl.hal.channelMask.has_value()) {
- legacy.channel_mask =
- VALUE_OR_RETURN(
- aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
- aidl.hal.channelMask.value(), isInput));
- legacy.config_mask |= AUDIO_PORT_CONFIG_CHANNEL_MASK;
- }
- if (aidl.hal.format.has_value()) {
- legacy.format = VALUE_OR_RETURN(
- aidl2legacy_AudioFormatDescription_audio_format_t(aidl.hal.format.value()));
- legacy.config_mask |= AUDIO_PORT_CONFIG_FORMAT;
- }
- if (aidl.hal.gain.has_value()) {
- legacy.gain = VALUE_OR_RETURN(aidl2legacy_AudioGainConfig_audio_gain_config(
- aidl.hal.gain.value(), isInput));
- legacy.config_mask |= AUDIO_PORT_CONFIG_GAIN;
- }
- if (aidl.hal.flags.has_value()) {
- legacy.flags = VALUE_OR_RETURN(
- aidl2legacy_AudioIoFlags_audio_io_flags(aidl.hal.flags.value(), isInput));
- legacy.config_mask |= AUDIO_PORT_CONFIG_FLAGS;
- }
legacy.ext = VALUE_OR_RETURN(
aidl2legacy_AudioPortExt_audio_port_config_ext(
aidl.hal.ext, aidl.sys.type, aidl.sys.role, aidl.sys.ext));
return legacy;
}
-ConversionResult<media::AudioPortConfigFw> legacy2aidl_audio_port_config_AudioPortConfig(
- const audio_port_config& legacy) {
+ConversionResult<media::AudioPortConfigFw> legacy2aidl_audio_port_config_AudioPortConfigFw(
+ const audio_port_config& legacy, int32_t portId) {
+ const bool isInput = VALUE_OR_RETURN(
+ portDirection(legacy.role, legacy.type)) == AudioPortDirection::INPUT;
media::AudioPortConfigFw aidl;
- aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+ aidl.hal = VALUE_OR_RETURN(
+ legacy2aidl_audio_port_config_AudioPortConfig(legacy, isInput, portId));
aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
- const bool isInput = VALUE_OR_RETURN(
- direction(legacy.role, legacy.type)) == Direction::INPUT;
- if (legacy.config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
- Int aidl_sampleRate;
- aidl_sampleRate.value = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy.sample_rate));
- aidl.hal.sampleRate = aidl_sampleRate;
- }
- if (legacy.config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
- aidl.hal.channelMask = VALUE_OR_RETURN(
- legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy.channel_mask, isInput));
- }
- if (legacy.config_mask & AUDIO_PORT_CONFIG_FORMAT) {
- aidl.hal.format = VALUE_OR_RETURN(
- legacy2aidl_audio_format_t_AudioFormatDescription(legacy.format));
- }
- if (legacy.config_mask & AUDIO_PORT_CONFIG_GAIN) {
- aidl.hal.gain = VALUE_OR_RETURN(
- legacy2aidl_audio_gain_config_AudioGainConfig(legacy.gain, isInput));
- }
- if (legacy.config_mask & AUDIO_PORT_CONFIG_FLAGS) {
- aidl.hal.flags = VALUE_OR_RETURN(
- legacy2aidl_audio_io_flags_AudioIoFlags(legacy.flags, isInput));
- }
RETURN_IF_ERROR(legacy2aidl_AudioPortExt(legacy.ext, legacy.type, legacy.role,
&aidl.hal.ext, &aidl.sys.ext));
return aidl;
}
-ConversionResult<struct audio_patch> aidl2legacy_AudioPatch_audio_patch(
+ConversionResult<struct audio_patch> aidl2legacy_AudioPatchFw_audio_patch(
const media::AudioPatchFw& aidl) {
struct audio_patch legacy;
legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_patch_handle_t(aidl.id));
@@ -501,7 +390,7 @@
}
for (size_t i = 0; i < legacy.num_sinks; ++i) {
legacy.sinks[i] =
- VALUE_OR_RETURN(aidl2legacy_AudioPortConfig_audio_port_config(aidl.sinks[i]));
+ VALUE_OR_RETURN(aidl2legacy_AudioPortConfigFw_audio_port_config(aidl.sinks[i]));
}
legacy.num_sources = VALUE_OR_RETURN(convertIntegral<unsigned int>(aidl.sources.size()));
if (legacy.num_sources > AUDIO_PATCH_PORTS_MAX) {
@@ -509,12 +398,12 @@
}
for (size_t i = 0; i < legacy.num_sources; ++i) {
legacy.sources[i] =
- VALUE_OR_RETURN(aidl2legacy_AudioPortConfig_audio_port_config(aidl.sources[i]));
+ VALUE_OR_RETURN(aidl2legacy_AudioPortConfigFw_audio_port_config(aidl.sources[i]));
}
return legacy;
}
-ConversionResult<media::AudioPatchFw> legacy2aidl_audio_patch_AudioPatch(
+ConversionResult<media::AudioPatchFw> legacy2aidl_audio_patch_AudioPatchFw(
const struct audio_patch& legacy) {
media::AudioPatchFw aidl;
aidl.id = VALUE_OR_RETURN(legacy2aidl_audio_patch_handle_t_int32_t(legacy.id));
@@ -524,14 +413,14 @@
}
for (unsigned int i = 0; i < legacy.num_sinks; ++i) {
aidl.sinks.push_back(
- VALUE_OR_RETURN(legacy2aidl_audio_port_config_AudioPortConfig(legacy.sinks[i])));
+ VALUE_OR_RETURN(legacy2aidl_audio_port_config_AudioPortConfigFw(legacy.sinks[i])));
}
if (legacy.num_sources > AUDIO_PATCH_PORTS_MAX) {
return unexpected(BAD_VALUE);
}
for (unsigned int i = 0; i < legacy.num_sources; ++i) {
aidl.sources.push_back(
- VALUE_OR_RETURN(legacy2aidl_audio_port_config_AudioPortConfig(legacy.sources[i])));
+ VALUE_OR_RETURN(legacy2aidl_audio_port_config_AudioPortConfigFw(legacy.sources[i])));
}
return aidl;
}
@@ -541,7 +430,7 @@
const audio_io_handle_t io_handle = VALUE_OR_RETURN(
aidl2legacy_int32_t_audio_io_handle_t(aidl.ioHandle));
const struct audio_patch patch = VALUE_OR_RETURN(
- aidl2legacy_AudioPatch_audio_patch(aidl.patch));
+ aidl2legacy_AudioPatchFw_audio_patch(aidl.patch));
const bool isInput = aidl.isInput;
const uint32_t sampling_rate = VALUE_OR_RETURN(convertIntegral<uint32_t>(aidl.samplingRate));
const audio_format_t format = VALUE_OR_RETURN(
@@ -561,7 +450,7 @@
const sp<AudioIoDescriptor>& legacy) {
media::AudioIoDescriptor aidl;
aidl.ioHandle = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(legacy->getIoHandle()));
- aidl.patch = VALUE_OR_RETURN(legacy2aidl_audio_patch_AudioPatch(legacy->getPatch()));
+ aidl.patch = VALUE_OR_RETURN(legacy2aidl_audio_patch_AudioPatchFw(legacy->getPatch()));
aidl.isInput = legacy->getIsInput();
aidl.samplingRate = VALUE_OR_RETURN(convertIntegral<int32_t>(legacy->getSamplingRate()));
aidl.format = VALUE_OR_RETURN(
@@ -801,18 +690,18 @@
ConversionResult<audio_port_mix_ext>
aidl2legacy_AudioPortMixExt_audio_port_mix_ext(
const AudioPortMixExt& aidl, const media::AudioPortMixExtSys& aidlSys) {
- audio_port_mix_ext legacy{};
+ audio_port_mix_ext legacy = VALUE_OR_RETURN(
+ aidl2legacy_AudioPortMixExt_audio_port_mix_ext(aidl));
legacy.hw_module = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
- legacy.handle = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_io_handle_t(aidl.handle));
return legacy;
}
status_t
legacy2aidl_audio_port_mix_ext_AudioPortMixExt(const audio_port_mix_ext& legacy,
AudioPortMixExt* aidl, media::AudioPortMixExtSys* aidlMixExt) {
+ *aidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_mix_ext_AudioPortMixExt(legacy));
aidlMixExt->hwModule = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidl->handle = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(legacy.handle));
return OK;
}
@@ -831,11 +720,10 @@
ConversionResult<audio_port_device_ext>
aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(
const AudioPortDeviceExt& aidl, const media::AudioPortDeviceExtSys& aidlSys) {
- audio_port_device_ext legacy;
+ audio_port_device_ext legacy = VALUE_OR_RETURN(
+ aidl2legacy_AudioPortDeviceExt_audio_port_device_ext(aidl));
legacy.hw_module = VALUE_OR_RETURN(
aidl2legacy_int32_t_audio_module_handle_t(aidlSys.hwModule));
- RETURN_IF_ERROR(aidl2legacy_AudioDevice_audio_device(
- aidl.device, &legacy.type, legacy.address));
legacy.encapsulation_modes = VALUE_OR_RETURN(
aidl2legacy_AudioEncapsulationMode_mask(aidlSys.encapsulationModes));
legacy.encapsulation_metadata_types = VALUE_OR_RETURN(
@@ -847,10 +735,9 @@
status_t legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(
const audio_port_device_ext& legacy,
AudioPortDeviceExt* aidl, media::AudioPortDeviceExtSys* aidlDeviceExt) {
+ *aidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_port_device_ext_AudioPortDeviceExt(legacy));
aidlDeviceExt->hwModule = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_module_handle_t_int32_t(legacy.hw_module));
- aidl->device = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_device_AudioDevice(legacy.type, legacy.address));
aidlDeviceExt->encapsulationModes = VALUE_OR_RETURN_STATUS(
legacy2aidl_AudioEncapsulationMode_mask(legacy.encapsulation_modes));
aidlDeviceExt->encapsulationMetadataTypes = VALUE_OR_RETURN_STATUS(
@@ -930,93 +817,33 @@
}
ConversionResult<audio_port_v7>
-aidl2legacy_AudioPort_audio_port_v7(const media::AudioPortFw& aidl) {
- audio_port_v7 legacy;
- legacy.id = VALUE_OR_RETURN(aidl2legacy_int32_t_audio_port_handle_t(aidl.hal.id));
+aidl2legacy_AudioPortFw_audio_port_v7(const media::AudioPortFw& aidl) {
+ const bool isInput = VALUE_OR_RETURN(
+ portDirection(aidl.sys.role, aidl.sys.type)) == AudioPortDirection::INPUT;
+ audio_port_v7 legacy = VALUE_OR_RETURN(aidl2legacy_AudioPort_audio_port_v7(aidl.hal, isInput));
legacy.role = VALUE_OR_RETURN(aidl2legacy_AudioPortRole_audio_port_role_t(aidl.sys.role));
legacy.type = VALUE_OR_RETURN(aidl2legacy_AudioPortType_audio_port_type_t(aidl.sys.type));
- RETURN_IF_ERROR(aidl2legacy_string(aidl.hal.name, legacy.name, sizeof(legacy.name)));
-
- if (aidl.hal.profiles.size() > std::size(legacy.audio_profiles)) {
- return unexpected(BAD_VALUE);
- }
- const bool isInput =
- VALUE_OR_RETURN(direction(aidl.sys.role, aidl.sys.type)) == Direction::INPUT;
- RETURN_IF_ERROR(convertRange(
- aidl.hal.profiles.begin(), aidl.hal.profiles.end(), legacy.audio_profiles,
- [isInput](const AudioProfile& p) {
- return aidl2legacy_AudioProfile_audio_profile(p, isInput);
- }));
- legacy.num_audio_profiles = aidl.hal.profiles.size();
-
- if (aidl.hal.extraAudioDescriptors.size() > std::size(legacy.extra_audio_descriptors)) {
- return unexpected(BAD_VALUE);
- }
- RETURN_IF_ERROR(
- convertRange(
- aidl.hal.extraAudioDescriptors.begin(), aidl.hal.extraAudioDescriptors.end(),
- legacy.extra_audio_descriptors,
- aidl2legacy_ExtraAudioDescriptor_audio_extra_audio_descriptor));
- legacy.num_extra_audio_descriptors = aidl.hal.extraAudioDescriptors.size();
-
- if (aidl.hal.gains.size() > std::size(legacy.gains)) {
- return unexpected(BAD_VALUE);
- }
- RETURN_IF_ERROR(convertRange(aidl.hal.gains.begin(), aidl.hal.gains.end(), legacy.gains,
- [isInput](const AudioGain& g) {
- return aidl2legacy_AudioGain_audio_gain(g, isInput);
- }));
- legacy.num_gains = aidl.hal.gains.size();
legacy.active_config = VALUE_OR_RETURN(
- aidl2legacy_AudioPortConfig_audio_port_config(aidl.sys.activeConfig));
+ aidl2legacy_AudioPortConfigFw_audio_port_config(aidl.sys.activeConfig));
legacy.ext = VALUE_OR_RETURN(
aidl2legacy_AudioPortExt_audio_port_v7_ext(aidl.hal.ext, aidl.sys.type, aidl.sys.ext));
return legacy;
}
ConversionResult<media::AudioPortFw>
-legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy) {
+legacy2aidl_audio_port_v7_AudioPortFw(const audio_port_v7& legacy) {
+ const bool isInput = VALUE_OR_RETURN(
+ portDirection(legacy.role, legacy.type)) == AudioPortDirection::INPUT;
media::AudioPortFw aidl;
- aidl.hal.id = VALUE_OR_RETURN(legacy2aidl_audio_port_handle_t_int32_t(legacy.id));
+ aidl.hal = VALUE_OR_RETURN(legacy2aidl_audio_port_v7_AudioPort(legacy, isInput));
aidl.sys.role = VALUE_OR_RETURN(legacy2aidl_audio_port_role_t_AudioPortRole(legacy.role));
aidl.sys.type = VALUE_OR_RETURN(legacy2aidl_audio_port_type_t_AudioPortType(legacy.type));
- aidl.hal.name = VALUE_OR_RETURN(legacy2aidl_string(legacy.name, sizeof(legacy.name)));
-
- if (legacy.num_audio_profiles > std::size(legacy.audio_profiles)) {
- return unexpected(BAD_VALUE);
- }
- const bool isInput = VALUE_OR_RETURN(direction(legacy.role, legacy.type)) == Direction::INPUT;
- RETURN_IF_ERROR(
- convertRange(legacy.audio_profiles, legacy.audio_profiles + legacy.num_audio_profiles,
- std::back_inserter(aidl.hal.profiles),
- [isInput](const audio_profile& p) {
- return legacy2aidl_audio_profile_AudioProfile(p, isInput);
- }));
-
- if (legacy.num_extra_audio_descriptors > std::size(legacy.extra_audio_descriptors)) {
- return unexpected(BAD_VALUE);
- }
+ // These get filled by the call to 'legacy2aidl_AudioPortExt' below.
aidl.sys.profiles.resize(legacy.num_audio_profiles);
- RETURN_IF_ERROR(
- convertRange(legacy.extra_audio_descriptors,
- legacy.extra_audio_descriptors + legacy.num_extra_audio_descriptors,
- std::back_inserter(aidl.hal.extraAudioDescriptors),
- legacy2aidl_audio_extra_audio_descriptor_ExtraAudioDescriptor));
-
- if (legacy.num_gains > std::size(legacy.gains)) {
- return unexpected(BAD_VALUE);
- }
- RETURN_IF_ERROR(
- convertRange(legacy.gains, legacy.gains + legacy.num_gains,
- std::back_inserter(aidl.hal.gains),
- [isInput](const audio_gain& g) {
- return legacy2aidl_audio_gain_AudioGain(g, isInput);
- }));
aidl.sys.gains.resize(legacy.num_gains);
-
aidl.sys.activeConfig = VALUE_OR_RETURN(
- legacy2aidl_audio_port_config_AudioPortConfig(legacy.active_config));
+ legacy2aidl_audio_port_config_AudioPortConfigFw(legacy.active_config, legacy.id));
aidl.sys.activeConfig.hal.portId = aidl.hal.id;
RETURN_IF_ERROR(
legacy2aidl_AudioPortExt(legacy.ext, legacy.type, &aidl.hal.ext, &aidl.sys.ext));
diff --git a/media/libaudioclient/AudioPolicy.cpp b/media/libaudioclient/AudioPolicy.cpp
index 6bb0cbe..1b9936f 100644
--- a/media/libaudioclient/AudioPolicy.cpp
+++ b/media/libaudioclient/AudioPolicy.cpp
@@ -203,9 +203,10 @@
return false;
}
-bool AudioMix::hasMatchUserIdRule() const {
+bool AudioMix::hasUserIdRule(bool match) const {
+ const uint32_t rule = match ? RULE_MATCH_USERID : RULE_EXCLUDE_USERID;
for (size_t i = 0; i < mCriteria.size(); i++) {
- if (mCriteria[i].mRule == RULE_MATCH_USERID) {
+ if (mCriteria[i].mRule == rule) {
return true;
}
}
@@ -214,7 +215,7 @@
bool AudioMix::isDeviceAffinityCompatible() const {
return ((mMixType == MIX_TYPE_PLAYERS)
- && (mRouteFlags == MIX_ROUTE_FLAG_RENDER));
+ && ((mRouteFlags & MIX_ROUTE_FLAG_RENDER) == MIX_ROUTE_FLAG_RENDER));
}
} // namespace android
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index f7e15fc..59016ad 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -82,7 +82,7 @@
// Binder for the AudioFlinger service that's passed to this client process from the system server.
// This allows specific isolated processes to access the audio system. Currently used only for the
// HotwordDetectionService.
-sp<IBinder> gAudioFlingerBinder = nullptr;
+static sp<IBinder> gAudioFlingerBinder = nullptr;
void AudioSystem::setAudioFlingerBinder(const sp<IBinder>& audioFlinger) {
if (audioFlinger->getInterfaceDescriptor() != media::IAudioFlingerService::descriptor) {
@@ -98,6 +98,15 @@
gAudioFlingerBinder = audioFlinger;
}
+static sp<IAudioFlinger> gLocalAudioFlinger; // set if we are local.
+
+status_t AudioSystem::setLocalAudioFlinger(const sp<IAudioFlinger>& af) {
+ Mutex::Autolock _l(gLock);
+ if (gAudioFlinger != nullptr) return INVALID_OPERATION;
+ gLocalAudioFlinger = af;
+ return OK;
+}
+
// establish binder interface to AudioFlinger service
const sp<IAudioFlinger> AudioSystem::get_audio_flinger() {
sp<IAudioFlinger> af;
@@ -105,7 +114,19 @@
bool reportNoError = false;
{
Mutex::Autolock _l(gLock);
- if (gAudioFlinger == 0) {
+ if (gAudioFlinger != nullptr) {
+ return gAudioFlinger;
+ }
+
+ if (gAudioFlingerClient == nullptr) {
+ gAudioFlingerClient = sp<AudioFlingerClient>::make();
+ } else {
+ reportNoError = true;
+ }
+
+ if (gLocalAudioFlinger != nullptr) {
+ gAudioFlinger = gLocalAudioFlinger;
+ } else {
sp<IBinder> binder;
if (gAudioFlingerBinder != nullptr) {
binder = gAudioFlingerBinder;
@@ -113,32 +134,24 @@
sp<IServiceManager> sm = defaultServiceManager();
do {
binder = sm->getService(String16(IAudioFlinger::DEFAULT_SERVICE_NAME));
- if (binder != 0)
- break;
+ if (binder != nullptr) break;
ALOGW("AudioFlinger not published, waiting...");
usleep(500000); // 0.5 s
} while (true);
}
- if (gAudioFlingerClient == NULL) {
- gAudioFlingerClient = new AudioFlingerClient();
- } else {
- reportNoError = true;
- }
binder->linkToDeath(gAudioFlingerClient);
- gAudioFlinger = new AudioFlingerClientAdapter(
- interface_cast<media::IAudioFlingerService>(binder));
- LOG_ALWAYS_FATAL_IF(gAudioFlinger == 0);
- afc = gAudioFlingerClient;
- // Make sure callbacks can be received by gAudioFlingerClient
- ProcessState::self()->startThreadPool();
+ const auto afs = interface_cast<media::IAudioFlingerService>(binder);
+ LOG_ALWAYS_FATAL_IF(afs == nullptr);
+ gAudioFlinger = sp<AudioFlingerClientAdapter>::make(afs);
}
+ afc = gAudioFlingerClient;
af = gAudioFlinger;
+ // Make sure callbacks can be received by gAudioFlingerClient
+ ProcessState::self()->startThreadPool();
}
- if (afc != 0) {
- int64_t token = IPCThreadState::self()->clearCallingIdentity();
- af->registerClient(afc);
- IPCThreadState::self()->restoreCallingIdentity(token);
- }
+ const int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ af->registerClient(afc);
+ IPCThreadState::self()->restoreCallingIdentity(token);
if (reportNoError) reportError(NO_ERROR);
return af;
}
@@ -1559,7 +1572,7 @@
*num_ports = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(numPortsAidl.value));
*generation = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(generationAidl));
RETURN_STATUS_IF_ERROR(convertRange(portsAidl.begin(), portsAidl.end(), ports,
- aidl2legacy_AudioPort_audio_port_v7));
+ aidl2legacy_AudioPortFw_audio_port_v7));
return OK;
}
@@ -1573,7 +1586,7 @@
media::AudioPortFw portAidl;
RETURN_STATUS_IF_ERROR(
statusTFromBinderStatus(aps->getAudioPort(port->id, &portAidl)));
- *port = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPort_audio_port_v7(portAidl));
+ *port = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortFw_audio_port_v7(portAidl));
return OK;
}
@@ -1587,7 +1600,7 @@
if (aps == 0) return PERMISSION_DENIED;
media::AudioPatchFw patchAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_patch_AudioPatch(*patch));
+ legacy2aidl_audio_patch_AudioPatchFw(*patch));
int32_t handleAidl = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_handle_t_int32_t(*handle));
RETURN_STATUS_IF_ERROR(
statusTFromBinderStatus(aps->createAudioPatch(patchAidl, handleAidl, &handleAidl)));
@@ -1625,7 +1638,7 @@
*num_patches = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(numPatchesAidl.value));
*generation = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(generationAidl));
RETURN_STATUS_IF_ERROR(convertRange(patchesAidl.begin(), patchesAidl.end(), patches,
- aidl2legacy_AudioPatch_audio_patch));
+ aidl2legacy_AudioPatchFw_audio_patch));
return OK;
}
@@ -1638,7 +1651,7 @@
if (aps == 0) return PERMISSION_DENIED;
media::AudioPortConfigFw configAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_port_config_AudioPortConfig(*config));
+ legacy2aidl_audio_port_config_AudioPortConfigFw(*config));
return statusTFromBinderStatus(aps->setAudioPortConfig(configAidl));
}
@@ -1859,7 +1872,7 @@
if (aps == 0) return PERMISSION_DENIED;
media::AudioPortConfigFw sourceAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_port_config_AudioPortConfig(*source));
+ legacy2aidl_audio_port_config_AudioPortConfigFw(*source));
media::AudioAttributesInternal attributesAidl = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_attributes_t_AudioAttributesInternal(*attributes));
int32_t portIdAidl;
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index 30ccefe..1c634e7 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -674,18 +674,18 @@
status_t AudioFlingerClientAdapter::getAudioPort(struct audio_port_v7* port) {
media::AudioPortFw portAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_port_v7_AudioPort(*port));
+ legacy2aidl_audio_port_v7_AudioPortFw(*port));
media::AudioPortFw aidlRet;
RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
mDelegate->getAudioPort(portAidl, &aidlRet)));
- *port = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPort_audio_port_v7(aidlRet));
+ *port = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioPortFw_audio_port_v7(aidlRet));
return OK;
}
status_t AudioFlingerClientAdapter::createAudioPatch(const struct audio_patch* patch,
audio_patch_handle_t* handle) {
media::AudioPatchFw patchAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_patch_AudioPatch(*patch));
+ legacy2aidl_audio_patch_AudioPatchFw(*patch));
int32_t aidlRet = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_patch_handle_t_int32_t(
AUDIO_PATCH_HANDLE_NONE));
if (handle != nullptr) {
@@ -712,12 +712,12 @@
mDelegate->listAudioPatches(maxPatches, &aidlRet)));
*num_patches = VALUE_OR_RETURN_STATUS(convertIntegral<unsigned int>(aidlRet.size()));
return convertRange(aidlRet.begin(), aidlRet.end(), patches,
- aidl2legacy_AudioPatch_audio_patch);
+ aidl2legacy_AudioPatchFw_audio_patch);
}
status_t AudioFlingerClientAdapter::setAudioPortConfig(const struct audio_port_config* config) {
media::AudioPortConfigFw configAidl = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_port_config_AudioPortConfig(*config));
+ legacy2aidl_audio_port_config_AudioPortConfigFw(*config));
return statusTFromBinderStatus(mDelegate->setAudioPortConfig(configAidl));
}
@@ -815,7 +815,7 @@
status_t AudioFlingerClientAdapter::setDeviceConnectedState(
const struct audio_port_v7 *port, bool connected) {
media::AudioPortFw aidlPort = VALUE_OR_RETURN_STATUS(
- legacy2aidl_audio_port_v7_AudioPort(*port));
+ legacy2aidl_audio_port_v7_AudioPortFw(*port));
return statusTFromBinderStatus(mDelegate->setDeviceConnectedState(aidlPort, connected));
}
@@ -1256,15 +1256,15 @@
Status AudioFlingerServerAdapter::getAudioPort(const media::AudioPortFw& port,
media::AudioPortFw* _aidl_return) {
- audio_port_v7 portLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPort_audio_port_v7(port));
+ audio_port_v7 portLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPortFw_audio_port_v7(port));
RETURN_BINDER_IF_ERROR(mDelegate->getAudioPort(&portLegacy));
- *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_port_v7_AudioPort(portLegacy));
+ *_aidl_return = VALUE_OR_RETURN_BINDER(legacy2aidl_audio_port_v7_AudioPortFw(portLegacy));
return Status::ok();
}
Status AudioFlingerServerAdapter::createAudioPatch(const media::AudioPatchFw& patch,
int32_t* _aidl_return) {
- audio_patch patchLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPatch_audio_patch(patch));
+ audio_patch patchLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPatchFw_audio_patch(patch));
audio_patch_handle_t handleLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_int32_t_audio_patch_handle_t(*_aidl_return));
RETURN_BINDER_IF_ERROR(mDelegate->createAudioPatch(&patchLegacy, &handleLegacy));
@@ -1287,13 +1287,13 @@
RETURN_BINDER_IF_ERROR(convertRange(&patchesLegacy[0],
&patchesLegacy[count],
std::back_inserter(*_aidl_return),
- legacy2aidl_audio_patch_AudioPatch));
+ legacy2aidl_audio_patch_AudioPatchFw));
return Status::ok();
}
Status AudioFlingerServerAdapter::setAudioPortConfig(const media::AudioPortConfigFw& config) {
audio_port_config configLegacy = VALUE_OR_RETURN_BINDER(
- aidl2legacy_AudioPortConfig_audio_port_config(config));
+ aidl2legacy_AudioPortConfigFw_audio_port_config(config));
return Status::fromStatusT(mDelegate->setAudioPortConfig(&configLegacy));
}
@@ -1372,7 +1372,7 @@
Status AudioFlingerServerAdapter::setDeviceConnectedState(
const media::AudioPortFw& port, bool connected) {
- audio_port_v7 portLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPort_audio_port_v7(port));
+ audio_port_v7 portLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPortFw_audio_port_v7(port));
return Status::fromStatusT(mDelegate->setDeviceConnectedState(&portLegacy, connected));
}
diff --git a/media/libaudioclient/ToneGenerator.cpp b/media/libaudioclient/ToneGenerator.cpp
index f968a4b..f0b4d11 100644
--- a/media/libaudioclient/ToneGenerator.cpp
+++ b/media/libaudioclient/ToneGenerator.cpp
@@ -1027,7 +1027,7 @@
if (property_get("gsm.operator.iso-country", value, "") == 0) {
property_get("gsm.sim.operator.iso-country", value, "");
}
- // If dual sim device has two SIM cards inserted and is not registerd to any network,
+ // If dual sim device has two SIM cards inserted and is not registered to any network,
// "," is set to "gsm.operator.iso-country" prop.
// In this case, "gsm.sim.operator.iso-country" prop should be used.
if (strlen(value) == 1 && strstr(value, ",") != NULL) {
diff --git a/media/libaudioclient/aidl/android/media/ICaptureStateListener.aidl b/media/libaudioclient/aidl/android/media/ICaptureStateListener.aidl
index 3b2206a..3ec6e7a 100644
--- a/media/libaudioclient/aidl/android/media/ICaptureStateListener.aidl
+++ b/media/libaudioclient/aidl/android/media/ICaptureStateListener.aidl
@@ -19,6 +19,6 @@
/**
* {@hide}
*/
-interface ICaptureStateListener {
+oneway interface ICaptureStateListener {
void setCaptureState(boolean active);
}
diff --git a/media/libaudioclient/aidl/android/media/SoundDoseRecord.aidl b/media/libaudioclient/aidl/android/media/SoundDoseRecord.aidl
index 94b8ce2..1397e23 100644
--- a/media/libaudioclient/aidl/android/media/SoundDoseRecord.aidl
+++ b/media/libaudioclient/aidl/android/media/SoundDoseRecord.aidl
@@ -17,6 +17,7 @@
package android.media;
/** Record containing information about the computed sound dose. */
+@JavaDerive(toString = true)
parcelable SoundDoseRecord {
/**
* Corresponds to the time in seconds when the CSD value is calculated from.
diff --git a/media/libaudioclient/include/media/AidlConversion.h b/media/libaudioclient/include/media/AidlConversion.h
index a1012a1..f0e58ae 100644
--- a/media/libaudioclient/include/media/AidlConversion.h
+++ b/media/libaudioclient/include/media/AidlConversion.h
@@ -88,14 +88,15 @@
ConversionResult<int32_t> legacy2aidl_audio_port_config_session_ext_AudioPortConfigSessionExt(
const audio_port_config_session_ext& legacy);
-ConversionResult<audio_port_config> aidl2legacy_AudioPortConfig_audio_port_config(
- const media::AudioPortConfigFw& aidl);
-ConversionResult<media::AudioPortConfigFw> legacy2aidl_audio_port_config_AudioPortConfig(
- const audio_port_config& legacy);
+// portId needs to be set when dealing with the HAL.
+ConversionResult<audio_port_config> aidl2legacy_AudioPortConfigFw_audio_port_config(
+ const media::AudioPortConfigFw& aidl, int32_t* aidlPortId = nullptr);
+ConversionResult<media::AudioPortConfigFw> legacy2aidl_audio_port_config_AudioPortConfigFw(
+ const audio_port_config& legacy, int32_t portId = 0);
-ConversionResult<struct audio_patch> aidl2legacy_AudioPatch_audio_patch(
+ConversionResult<struct audio_patch> aidl2legacy_AudioPatchFw_audio_patch(
const media::AudioPatchFw& aidl);
-ConversionResult<media::AudioPatchFw> legacy2aidl_audio_patch_AudioPatch(
+ConversionResult<media::AudioPatchFw> legacy2aidl_audio_patch_AudioPatchFw(
const struct audio_patch& legacy);
ConversionResult<sp<AudioIoDescriptor>> aidl2legacy_AudioIoDescriptor_AudioIoDescriptor(
@@ -167,9 +168,9 @@
legacy2aidl_audio_port_session_ext_int32_t(const audio_port_session_ext& legacy);
ConversionResult<audio_port_v7>
-aidl2legacy_AudioPort_audio_port_v7(const media::AudioPortFw& aidl);
+aidl2legacy_AudioPortFw_audio_port_v7(const media::AudioPortFw& aidl);
ConversionResult<media::AudioPortFw>
-legacy2aidl_audio_port_v7_AudioPort(const audio_port_v7& legacy);
+legacy2aidl_audio_port_v7_AudioPortFw(const audio_port_v7& legacy);
ConversionResult<audio_unique_id_use_t>
aidl2legacy_AudioUniqueIdUse_audio_unique_id_use_t(media::AudioUniqueIdUse aidl);
diff --git a/media/libaudioclient/include/media/AudioPolicy.h b/media/libaudioclient/include/media/AudioPolicy.h
index 61f2069..3fd438e 100644
--- a/media/libaudioclient/include/media/AudioPolicy.h
+++ b/media/libaudioclient/include/media/AudioPolicy.h
@@ -61,7 +61,10 @@
#define MIX_ROUTE_FLAG_LOOP_BACK (0x1 << 1)
/** Loop back some audio while it is rendered */
#define MIX_ROUTE_FLAG_LOOP_BACK_AND_RENDER (MIX_ROUTE_FLAG_RENDER | MIX_ROUTE_FLAG_LOOP_BACK)
-#define MIX_ROUTE_FLAG_ALL (MIX_ROUTE_FLAG_RENDER | MIX_ROUTE_FLAG_LOOP_BACK)
+/** Control if audio routing disallows preferred device routing **/
+#define MIX_ROUTE_FLAG_DISALLOWS_PREFERRED_DEVICE (0x1 << 2)
+#define MIX_ROUTE_FLAG_ALL (MIX_ROUTE_FLAG_RENDER | MIX_ROUTE_FLAG_LOOP_BACK | \
+ MIX_ROUTE_FLAG_DISALLOWS_PREFERRED_DEVICE)
#define MAX_MIXES_PER_POLICY 10
#define MAX_CRITERIA_PER_MIX 20
@@ -112,9 +115,9 @@
void setMatchUserId(int userId);
/** returns true if this mix has a rule to match or exclude the given userId */
bool hasUserIdRule(bool match, int userId) const;
- /** returns true if this mix has a rule for userId match (any userId) */
- bool hasMatchUserIdRule() const;
- /** returns true if this mix can be used for uid-device affinity routing */
+ /** returns true if this mix has a rule to match or exclude (any userId) */
+ bool hasUserIdRule(bool match) const;
+ /** returns true if this mix has a rule for userId exclude (any userId) */
bool isDeviceAffinityCompatible() const;
std::vector<AudioMixMatchCriterion> mCriteria;
@@ -147,6 +150,11 @@
== MIX_ROUTE_FLAG_LOOP_BACK;
}
+static inline bool is_mix_disallows_preferred_device(uint32_t routeFlags) {
+ return (routeFlags & MIX_ROUTE_FLAG_DISALLOWS_PREFERRED_DEVICE)
+ == MIX_ROUTE_FLAG_DISALLOWS_PREFERRED_DEVICE;
+}
+
}; // namespace android
#endif // ANDROID_AUDIO_POLICY_H
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index 7bb00df..23b0340 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -167,6 +167,10 @@
// HotwordDetectionService.
static void setAudioFlingerBinder(const sp<IBinder>& audioFlinger);
+ // Sets a local AudioFlinger interface to be used by AudioSystem.
+ // This is used by audioserver main() to avoid binder AIDL translation.
+ static status_t setLocalAudioFlinger(const sp<IAudioFlinger>& af);
+
// helper function to obtain AudioFlinger service handle
static const sp<IAudioFlinger> get_audio_flinger();
diff --git a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
index 75d576e..5cd17e1 100644
--- a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
+++ b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
@@ -14,6 +14,8 @@
* limitations under the License.
*/
+#include <iostream>
+
#include <gtest/gtest.h>
#include <media/AidlConversion.h>
@@ -22,19 +24,52 @@
using namespace android;
using namespace android::aidl_utils;
-using android::media::AudioDirectMode;
+using media::AudioDirectMode;
+using media::AudioPortConfigFw;
+using media::AudioPortDeviceExtSys;
+using media::AudioPortFw;
+using media::AudioPortRole;
+using media::AudioPortType;
using media::audio::common::AudioChannelLayout;
+using media::audio::common::AudioDevice;
using media::audio::common::AudioDeviceDescription;
using media::audio::common::AudioDeviceType;
using media::audio::common::AudioEncapsulationMetadataType;
using media::audio::common::AudioEncapsulationType;
using media::audio::common::AudioFormatDescription;
using media::audio::common::AudioFormatType;
+using media::audio::common::AudioGain;
+using media::audio::common::AudioGainConfig;
using media::audio::common::AudioGainMode;
+using media::audio::common::AudioIoFlags;
+using media::audio::common::AudioPortDeviceExt;
+using media::audio::common::AudioProfile;
using media::audio::common::AudioStandard;
using media::audio::common::ExtraAudioDescriptor;
+using media::audio::common::Int;
using media::audio::common::PcmType;
+// Provide value printers for types generated from AIDL
+// They need to be in the same namespace as the types we intend to print
+namespace android::media {
+#define DEFINE_PRINTING_TEMPLATES() \
+ template <typename P> \
+ std::enable_if_t<std::is_base_of_v<::android::Parcelable, P>, std::ostream&> operator<<( \
+ std::ostream& os, const P& p) { \
+ return os << p.toString(); \
+ } \
+ template <typename E> \
+ std::enable_if_t<std::is_enum_v<E>, std::ostream&> operator<<(std::ostream& os, const E& e) { \
+ return os << toString(e); \
+ }
+DEFINE_PRINTING_TEMPLATES();
+
+namespace audio::common {
+DEFINE_PRINTING_TEMPLATES();
+} // namespace audio::common
+#undef DEFINE_PRINTING_TEMPLATES
+} // namespace android::media
+
namespace {
template <typename T>
@@ -367,6 +402,134 @@
testing::Values(make_AFD_Invalid(), AudioFormatDescription{},
make_AFD_Pcm16Bit()));
+AudioPortConfigFw createAudioPortConfigFw(const AudioChannelLayout& layout,
+ const AudioFormatDescription& format,
+ const AudioDeviceDescription& device) {
+ const bool isInput = device.type < AudioDeviceType::OUT_DEFAULT;
+ AudioPortConfigFw result;
+ result.hal.id = 43;
+ result.hal.portId = 42;
+ Int sr44100;
+ sr44100.value = 44100;
+ result.hal.sampleRate = sr44100;
+ result.hal.channelMask = layout;
+ result.hal.format = format;
+ AudioGainConfig gain;
+ gain.mode = 1 << static_cast<int>(AudioGainMode::JOINT);
+ gain.values = std::vector<int32_t>({100});
+ result.hal.gain = gain;
+ AudioPortDeviceExt ext;
+ AudioDevice audioDevice;
+ audioDevice.type = device;
+ ext.device = audioDevice;
+ result.hal.ext = ext;
+ result.sys.role = isInput ? AudioPortRole::SOURCE : AudioPortRole::SINK;
+ result.sys.type = AudioPortType::DEVICE;
+ AudioPortDeviceExtSys sysDevice;
+ sysDevice.hwModule = 1;
+ result.sys.ext = sysDevice;
+ return result;
+}
+
+using AudioPortConfigParam =
+ std::tuple<AudioChannelLayout, AudioFormatDescription, AudioDeviceDescription>;
+class AudioPortConfigRoundTripTest : public testing::TestWithParam<AudioPortConfigParam> {};
+TEST_P(AudioPortConfigRoundTripTest, Aidl2Legacy2Aidl) {
+ const AudioChannelLayout layout = std::get<0>(GetParam());
+ const AudioFormatDescription format = std::get<1>(GetParam());
+ const AudioDeviceDescription device = std::get<2>(GetParam());
+ const bool isInput = device.type < AudioDeviceType::OUT_DEFAULT;
+ AudioPortConfigFw initial = createAudioPortConfigFw(layout, format, device);
+ {
+ audio_port_config conv{};
+ int32_t portId = -1;
+ status_t status =
+ aidl2legacy_AudioPortConfig_audio_port_config(initial.hal, isInput, &conv, &portId);
+ ASSERT_EQ(OK, status);
+ EXPECT_NE(-1, portId);
+ auto convBack = legacy2aidl_audio_port_config_AudioPortConfig(conv, isInput, portId);
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial.hal, convBack.value());
+ }
+ {
+ int32_t portId = -1;
+ auto conv = aidl2legacy_AudioPortConfigFw_audio_port_config(initial, &portId);
+ ASSERT_TRUE(conv.ok());
+ EXPECT_NE(-1, portId);
+ auto convBack = legacy2aidl_audio_port_config_AudioPortConfigFw(conv.value(), portId);
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial, convBack.value());
+ }
+}
+INSTANTIATE_TEST_SUITE_P(
+ AudioPortConfig, AudioPortConfigRoundTripTest,
+ testing::Combine(testing::Values(make_ACL_Stereo(), make_ACL_ChannelIndex2()),
+ testing::Values(make_AFD_Pcm16Bit()),
+ testing::Values(make_ADD_DefaultIn(), make_ADD_DefaultOut(),
+ make_ADD_WiredHeadset())));
+
+class AudioPortFwRoundTripTest : public testing::TestWithParam<AudioDeviceDescription> {
+ public:
+ AudioProfile createProfile(const AudioFormatDescription& format,
+ const std::vector<AudioChannelLayout>& channelMasks,
+ const std::vector<int32_t>& sampleRates) {
+ AudioProfile profile;
+ profile.format = format;
+ profile.channelMasks = channelMasks;
+ profile.sampleRates = sampleRates;
+ return profile;
+ }
+};
+TEST_P(AudioPortFwRoundTripTest, Aidl2Legacy2Aidl) {
+ const AudioDeviceDescription device = GetParam();
+ const bool isInput = device.type < AudioDeviceType::OUT_DEFAULT;
+ AudioPortFw initial;
+ initial.hal.id = 42;
+ initial.hal.profiles.push_back(createProfile(
+ make_AFD_Pcm16Bit(), {make_ACL_Stereo(), make_ACL_ChannelIndex2()}, {44100, 48000}));
+ if (isInput) {
+ initial.hal.flags = AudioIoFlags::make<AudioIoFlags::Tag::input>(0);
+ } else {
+ initial.hal.flags = AudioIoFlags::make<AudioIoFlags::Tag::output>(0);
+ }
+ AudioGain initialGain;
+ initialGain.mode = 1 << static_cast<int>(AudioGainMode::JOINT);
+ initialGain.channelMask = make_ACL_Stereo();
+ initial.hal.gains.push_back(initialGain);
+ AudioPortDeviceExt initialExt;
+ AudioDevice initialDevice;
+ initialDevice.type = device;
+ initialExt.device = initialDevice;
+ initial.hal.ext = initialExt;
+ {
+ auto conv = aidl2legacy_AudioPort_audio_port_v7(initial.hal, isInput);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_audio_port_v7_AudioPort(conv.value(), isInput);
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial.hal, convBack.value());
+ }
+ initial.sys.role = isInput ? AudioPortRole::SOURCE : AudioPortRole::SINK;
+ initial.sys.type = AudioPortType::DEVICE;
+ initial.sys.profiles.resize(initial.hal.profiles.size());
+ initial.sys.gains.resize(initial.hal.gains.size());
+ initial.sys.activeConfig =
+ createAudioPortConfigFw(make_ACL_Stereo(), make_AFD_Pcm16Bit(), device);
+ initial.sys.activeConfig.hal.flags = initial.hal.flags;
+ AudioPortDeviceExtSys initialSysDevice;
+ initialSysDevice.hwModule = 1;
+ initial.sys.ext = initialSysDevice;
+ {
+ auto conv = aidl2legacy_AudioPortFw_audio_port_v7(initial);
+ ASSERT_TRUE(conv.ok());
+ auto convBack = legacy2aidl_audio_port_v7_AudioPortFw(conv.value());
+ ASSERT_TRUE(convBack.ok());
+ EXPECT_EQ(initial, convBack.value());
+ }
+}
+INSTANTIATE_TEST_SUITE_P(AudioPortFw, AudioPortFwRoundTripTest,
+ testing::Values(make_ADD_DefaultIn(), make_ADD_DefaultOut(),
+ make_ADD_WiredHeadset()));
+
class AudioDirectModeRoundTripTest : public testing::TestWithParam<AudioDirectMode> {};
TEST_P(AudioDirectModeRoundTripTest, Aidl2Legacy2Aidl) {
const auto initial = GetParam();
diff --git a/media/libaudioclient/tests/audiorouting_tests.cpp b/media/libaudioclient/tests/audiorouting_tests.cpp
index 2c5fcd7..19d1abc 100644
--- a/media/libaudioclient/tests/audiorouting_tests.cpp
+++ b/media/libaudioclient/tests/audiorouting_tests.cpp
@@ -73,7 +73,6 @@
}
}
ap->stop();
- ap->getAudioTrackHandle()->removeAudioDeviceCallback(cb);
}
}
diff --git a/media/libaudioclient/tests/audiosystem_tests.cpp b/media/libaudioclient/tests/audiosystem_tests.cpp
index a947105..2e6915a 100644
--- a/media/libaudioclient/tests/audiosystem_tests.cpp
+++ b/media/libaudioclient/tests/audiosystem_tests.cpp
@@ -49,12 +49,12 @@
void TearDown() override {
if (mPlayback) {
mPlayback->stop();
- mPlayback->getAudioTrackHandle()->removeAudioDeviceCallback(mCbPlayback);
+ mCbPlayback.clear();
mPlayback.clear();
}
if (mCapture) {
mCapture->stop();
- mCapture->getAudioRecordHandle()->removeAudioDeviceCallback(mCbRecord);
+ mCbRecord.clear();
mCapture.clear();
}
}
diff --git a/media/libaudiofoundation/AudioContainers.cpp b/media/libaudiofoundation/AudioContainers.cpp
index 0a8188f..202a400 100644
--- a/media/libaudiofoundation/AudioContainers.cpp
+++ b/media/libaudiofoundation/AudioContainers.cpp
@@ -77,6 +77,13 @@
return audioDeviceOutLeAudioUnicastSet;
}
+const DeviceTypeSet& getAudioDeviceOutLeAudioBroadcastSet() {
+ static const DeviceTypeSet audioDeviceOutLeAudioUnicastSet = DeviceTypeSet(
+ std::begin(AUDIO_DEVICE_OUT_BLE_BROADCAST_ARRAY),
+ std::end(AUDIO_DEVICE_OUT_BLE_BROADCAST_ARRAY));
+ return audioDeviceOutLeAudioUnicastSet;
+}
+
std::string deviceTypesToString(const DeviceTypeSet &deviceTypes) {
if (deviceTypes.empty()) {
return "Empty device types";
diff --git a/media/libaudiofoundation/include/media/AudioContainers.h b/media/libaudiofoundation/include/media/AudioContainers.h
index 2a14504..88dcee9 100644
--- a/media/libaudiofoundation/include/media/AudioContainers.h
+++ b/media/libaudiofoundation/include/media/AudioContainers.h
@@ -43,6 +43,7 @@
const DeviceTypeSet& getAudioDeviceInAllUsbSet();
const DeviceTypeSet& getAudioDeviceOutAllBleSet();
const DeviceTypeSet& getAudioDeviceOutLeAudioUnicastSet();
+const DeviceTypeSet& getAudioDeviceOutLeAudioBroadcastSet();
template<typename T>
static std::vector<T> Intersection(const std::set<T>& a, const std::set<T>& b) {
diff --git a/media/libaudiohal/FactoryHal.cpp b/media/libaudiohal/FactoryHal.cpp
index 84ac64c..f88915d 100644
--- a/media/libaudiohal/FactoryHal.cpp
+++ b/media/libaudiohal/FactoryHal.cpp
@@ -105,12 +105,12 @@
bool hasAidlHalService(const InterfaceName& interface, const AudioHalVersionInfo& version) {
const std::string name = interface.first + "." + interface.second + "/default";
- AIBinder* binder = AServiceManager_checkService(name.c_str());
- if (binder == nullptr) {
- ALOGW("%s Service %s doesn't exist", __func__, name.c_str());
+ const bool isDeclared = AServiceManager_isDeclared(name.c_str());
+ if (!isDeclared) {
+ ALOGW("%s %s: false", __func__, name.c_str());
return false;
}
- ALOGI("%s AIDL Service %s exist: %s", __func__, name.c_str(), version.toString().c_str());
+ ALOGI("%s %s: true, version %s", __func__, name.c_str(), version.toString().c_str());
return true;
}
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index d151817..17993f5 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -207,6 +207,8 @@
cc_library_shared {
name: "libaudiohal@7.1",
defaults: [
+ "latest_android_hardware_audio_core_sounddose_ndk_shared",
+ "latest_android_hardware_audio_sounddose_ndk_shared",
"libaudiohal_default",
"libaudiohal_hidl_default"
],
@@ -226,6 +228,9 @@
"android.hardware.audio@7.1-util",
"libaudiohal.effect@7.0",
],
+ shared_libs: [
+ "libbinder_ndk",
+ ],
cflags: [
"-DMAJOR_VERSION=7",
"-DMINOR_VERSION=1",
@@ -241,16 +246,35 @@
"libaudiohal_default",
"latest_android_hardware_audio_common_ndk_shared",
"latest_android_hardware_audio_core_ndk_shared",
+ "latest_android_hardware_audio_core_sounddose_ndk_shared",
"latest_android_hardware_audio_effect_ndk_static",
"latest_android_media_audio_common_types_ndk_shared",
],
srcs: [
+ "DeviceHalAidl.cpp",
"DevicesFactoryHalEntry.cpp",
"DevicesFactoryHalAidl.cpp",
+ "EffectConversionHelperAidl.cpp",
"EffectBufferHalAidl.cpp",
"EffectHalAidl.cpp",
+ "effectsAidlConversion/AidlConversionAec.cpp",
+ "effectsAidlConversion/AidlConversionAgc2.cpp",
+ "effectsAidlConversion/AidlConversionBassBoost.cpp",
+ "effectsAidlConversion/AidlConversionDownmix.cpp",
+ "effectsAidlConversion/AidlConversionDynamicsProcessing.cpp",
+ "effectsAidlConversion/AidlConversionEnvReverb.cpp",
+ "effectsAidlConversion/AidlConversionEq.cpp",
+ "effectsAidlConversion/AidlConversionHapticGenerator.cpp",
+ "effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp",
+ "effectsAidlConversion/AidlConversionNoiseSuppression.cpp",
+ "effectsAidlConversion/AidlConversionPresetReverb.cpp",
+ "effectsAidlConversion/AidlConversionSpatializer.cpp",
+ "effectsAidlConversion/AidlConversionVendorExtension.cpp",
+ "effectsAidlConversion/AidlConversionVirtualizer.cpp",
+ "effectsAidlConversion/AidlConversionVisualizer.cpp",
"EffectsFactoryHalAidl.cpp",
"EffectsFactoryHalEntry.cpp",
+ "StreamHalAidl.cpp",
],
static_libs: [
"android.hardware.common-V2-ndk",
@@ -259,6 +283,8 @@
shared_libs: [
"libbinder_ndk",
"libaudio_aidl_conversion_common_ndk",
+ "libaudio_aidl_conversion_effect_ndk",
+ "libaudioaidlcommon",
],
header_libs: [
"libaudio_aidl_conversion_common_util_ndk",
@@ -271,4 +297,4 @@
"-Wthread-safety",
"-DBACKEND_NDK",
],
-}
\ No newline at end of file
+}
diff --git a/media/libaudiohal/impl/ConversionHelperAidl.h b/media/libaudiohal/impl/ConversionHelperAidl.h
new file mode 100644
index 0000000..db6b6cf
--- /dev/null
+++ b/media/libaudiohal/impl/ConversionHelperAidl.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <string>
+#include <string_view>
+#include <vector>
+
+#include <utils/String16.h>
+#include <utils/Vector.h>
+
+namespace android {
+
+class Args {
+ public:
+ explicit Args(const Vector<String16>& args)
+ : mValues(args.size()), mPtrs(args.size()) {
+ for (size_t i = 0; i < args.size(); ++i) {
+ mValues[i] = std::string(String8(args[i]));
+ mPtrs[i] = mValues[i].c_str();
+ }
+ }
+ const char** args() { return mPtrs.data(); }
+ private:
+ std::vector<std::string> mValues;
+ std::vector<const char*> mPtrs;
+};
+
+class ConversionHelperAidl {
+ protected:
+ ConversionHelperAidl(std::string_view className) : mClassName(className) {}
+
+ const std::string& getClassName() const {
+ return mClassName;
+ }
+
+ const std::string mClassName;
+};
+
+} // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalAidl.cpp b/media/libaudiohal/impl/DeviceHalAidl.cpp
index d85d960..2951752a 100644
--- a/media/libaudiohal/impl/DeviceHalAidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalAidl.cpp
@@ -15,95 +15,521 @@
*/
#define LOG_TAG "DeviceHalAidl"
+// #define LOG_NDEBUG 0
+
+#include <algorithm>
+#include <forward_list>
+
+#include <aidl/android/hardware/audio/core/BnStreamCallback.h>
+#include <aidl/android/hardware/audio/core/BnStreamOutEventCallback.h>
+#include <aidl/android/hardware/audio/core/StreamDescriptor.h>
+#include <error/expected_utils.h>
+#include <media/AidlConversionCppNdk.h>
+#include <media/AidlConversionUtil.h>
+#include <mediautils/TimeCheck.h>
+#include <Utils.h>
+#include <utils/Log.h>
#include "DeviceHalAidl.h"
+#include "StreamHalAidl.h"
-status_t DeviceHalAidl::getSupportedDevices(uint32_t* devices) {
- ALOGE("%s not implemented yet devices %p", __func__, devices);
- return OK;
+using aidl::android::aidl_utils::statusTFromBinderStatus;
+using aidl::android::media::audio::common::AudioConfig;
+using aidl::android::media::audio::common::AudioDevice;
+using aidl::android::media::audio::common::AudioDeviceType;
+using aidl::android::media::audio::common::AudioInputFlags;
+using aidl::android::media::audio::common::AudioIoFlags;
+using aidl::android::media::audio::common::AudioLatencyMode;
+using aidl::android::media::audio::common::AudioMode;
+using aidl::android::media::audio::common::AudioOutputFlags;
+using aidl::android::media::audio::common::AudioPort;
+using aidl::android::media::audio::common::AudioPortConfig;
+using aidl::android::media::audio::common::AudioPortDeviceExt;
+using aidl::android::media::audio::common::AudioPortExt;
+using aidl::android::media::audio::common::AudioSource;
+using aidl::android::media::audio::common::Int;
+using aidl::android::media::audio::common::Float;
+using aidl::android::hardware::audio::common::RecordTrackMetadata;
+using aidl::android::hardware::audio::core::AudioPatch;
+using aidl::android::hardware::audio::core::IModule;
+using aidl::android::hardware::audio::core::ITelephony;
+using aidl::android::hardware::audio::core::StreamDescriptor;
+using aidl::android::hardware::audio::core::sounddose::ISoundDose;
+using android::hardware::audio::common::getFrameSizeInBytes;
+using android::hardware::audio::common::isBitPositionFlagSet;
+using android::hardware::audio::common::makeBitPositionFlagMask;
+
+namespace android {
+
+namespace {
+
+bool isConfigEqualToPortConfig(const AudioConfig& config, const AudioPortConfig& portConfig) {
+ return portConfig.sampleRate.value().value == config.base.sampleRate &&
+ portConfig.channelMask.value() == config.base.channelMask &&
+ portConfig.format.value() == config.base.format;
+}
+
+void setConfigFromPortConfig(AudioConfig* config, const AudioPortConfig& portConfig) {
+ config->base.sampleRate = portConfig.sampleRate.value().value;
+ config->base.channelMask = portConfig.channelMask.value();
+ config->base.format = portConfig.format.value();
+}
+
+void setPortConfigFromConfig(AudioPortConfig* portConfig, const AudioConfig& config) {
+ portConfig->sampleRate = Int{ .value = config.base.sampleRate };
+ portConfig->channelMask = config.base.channelMask;
+ portConfig->format = config.base.format;
+}
+
+} // namespace
+
+status_t DeviceHalAidl::getSupportedDevices(uint32_t*) {
+ // Obsolete.
+ return INVALID_OPERATION;
}
status_t DeviceHalAidl::initCheck() {
- ALOGE("%s not implemented yet", __func__);
+ TIME_CHECK();
+ if (mModule == nullptr) return NO_INIT;
+ std::vector<AudioPort> ports;
+ RETURN_STATUS_IF_ERROR(
+ statusTFromBinderStatus(mModule->getAudioPorts(&ports)));
+ ALOGW_IF(ports.empty(), "%s: module %s returned an empty list of audio ports",
+ __func__, mInstance.c_str());
+ std::transform(ports.begin(), ports.end(), std::inserter(mPorts, mPorts.end()),
+ [](const auto& p) { return std::make_pair(p.id, p); });
+ mDefaultInputPortId = mDefaultOutputPortId = -1;
+ const int defaultDeviceFlag = 1 << AudioPortDeviceExt::FLAG_INDEX_DEFAULT_DEVICE;
+ for (const auto& pair : mPorts) {
+ const auto& p = pair.second;
+ if (p.ext.getTag() == AudioPortExt::Tag::device &&
+ (p.ext.get<AudioPortExt::Tag::device>().flags & defaultDeviceFlag) != 0) {
+ if (p.flags.getTag() == AudioIoFlags::Tag::input) {
+ mDefaultInputPortId = p.id;
+ } else if (p.flags.getTag() == AudioIoFlags::Tag::output) {
+ mDefaultOutputPortId = p.id;
+ }
+ }
+ }
+ ALOGI("%s: module %s default port ids: input %d, output %d",
+ __func__, mInstance.c_str(), mDefaultInputPortId, mDefaultOutputPortId);
+ std::vector<AudioPortConfig> portConfigs;
+ RETURN_STATUS_IF_ERROR(
+ statusTFromBinderStatus(mModule->getAudioPortConfigs(&portConfigs))); // OK if empty
+ std::transform(portConfigs.begin(), portConfigs.end(),
+ std::inserter(mPortConfigs, mPortConfigs.end()),
+ [](const auto& p) { return std::make_pair(p.id, p); });
+ std::vector<AudioPatch> patches;
+ RETURN_STATUS_IF_ERROR(
+ statusTFromBinderStatus(mModule->getAudioPatches(&patches))); // OK if empty
+ std::transform(patches.begin(), patches.end(),
+ std::inserter(mPatches, mPatches.end()),
+ [](const auto& p) { return std::make_pair(p.id, p); });
return OK;
}
status_t DeviceHalAidl::setVoiceVolume(float volume) {
- mVoiceVolume = volume;
- ALOGE("%s not implemented yet %f", __func__, volume);
- return OK;
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ std::shared_ptr<ITelephony> telephony;
+ if (ndk::ScopedAStatus status = mModule->getTelephony(&telephony);
+ status.isOk() && telephony != nullptr) {
+ ITelephony::TelecomConfig inConfig{ .voiceVolume = Float{volume} }, outConfig;
+ RETURN_STATUS_IF_ERROR(
+ statusTFromBinderStatus(telephony->setTelecomConfig(inConfig, &outConfig)));
+ ALOGW_IF(outConfig.voiceVolume.has_value() && volume != outConfig.voiceVolume.value().value,
+ "%s: the resulting voice volume %f is not the same as requested %f",
+ __func__, outConfig.voiceVolume.value().value, volume);
+ }
+ return INVALID_OPERATION;
}
status_t DeviceHalAidl::setMasterVolume(float volume) {
- mMasterVolume = volume;
- ALOGE("%s not implemented yet %f", __func__, volume);
- return OK;
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ return statusTFromBinderStatus(mModule->setMasterVolume(volume));
}
status_t DeviceHalAidl::getMasterVolume(float *volume) {
- *volume = mMasterVolume;
- ALOGE("%s not implemented yet %f", __func__, *volume);
- return OK;
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ return statusTFromBinderStatus(mModule->getMasterVolume(volume));
}
status_t DeviceHalAidl::setMode(audio_mode_t mode) {
- ALOGE("%s not implemented yet %u", __func__, mode);
- return OK;
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ AudioMode audioMode = VALUE_OR_FATAL(::aidl::android::legacy2aidl_audio_mode_t_AudioMode(mode));
+ std::shared_ptr<ITelephony> telephony;
+ if (ndk::ScopedAStatus status = mModule->getTelephony(&telephony);
+ status.isOk() && telephony != nullptr) {
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(telephony->switchAudioMode(audioMode)));
+ }
+ return statusTFromBinderStatus(mModule->updateAudioMode(audioMode));
}
status_t DeviceHalAidl::setMicMute(bool state) {
- mMicMute = state;
- ALOGE("%s not implemented yet %d", __func__, state);
- return OK;
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ return statusTFromBinderStatus(mModule->setMicMute(state));
}
+
status_t DeviceHalAidl::getMicMute(bool *state) {
- *state = mMicMute;
- ALOGE("%s not implemented yet %d", __func__, *state);
- return OK;
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ return statusTFromBinderStatus(mModule->getMicMute(state));
}
+
status_t DeviceHalAidl::setMasterMute(bool state) {
- mMasterMute = state;
- ALOGE("%s not implemented yet %d", __func__, state);
- return OK;
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ return statusTFromBinderStatus(mModule->setMasterMute(state));
}
+
status_t DeviceHalAidl::getMasterMute(bool *state) {
- *state = mMasterMute;
- ALOGE("%s not implemented yet %d", __func__, *state);
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ return statusTFromBinderStatus(mModule->getMasterMute(state));
+}
+
+status_t DeviceHalAidl::setParameters(const String8& kvPairs __unused) {
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
return OK;
}
-status_t DeviceHalAidl::setParameters(const String8& kvPairs) {
- ALOGE("%s not implemented yet %s", __func__, kvPairs.c_str());
+status_t DeviceHalAidl::getParameters(const String8& keys __unused, String8 *values) {
+ TIME_CHECK();
+ values->clear();
+ if (!mModule) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
return OK;
}
-status_t DeviceHalAidl::getParameters(const String8& keys, String8 *values) {
- ALOGE("%s not implemented yet %s %s", __func__, keys.c_str(), values->c_str());
- return OK;
-}
+namespace {
+
+class Cleanup {
+ public:
+ typedef void (DeviceHalAidl::*Cleaner)(int32_t);
+
+ Cleanup(DeviceHalAidl* device, Cleaner cleaner, int32_t id) :
+ mDevice(device), mCleaner(cleaner), mId(id) {}
+ ~Cleanup() { clean(); }
+ void clean() {
+ if (mDevice != nullptr) (mDevice->*mCleaner)(mId);
+ disarm();
+ }
+ void disarm() { mDevice = nullptr; }
+
+ private:
+ DeviceHalAidl* mDevice;
+ const Cleaner mCleaner;
+ const int32_t mId;
+};
+
+} // namespace
+
+// Since the order of container elements destruction is unspecified,
+// ensure that cleanups are performed from the most recent one and upwards.
+// This is the same as if there were individual Cleanup instances on the stack,
+// however the bonus is that we can disarm all of them with just one statement.
+class DeviceHalAidl::Cleanups : public std::forward_list<Cleanup> {
+ public:
+ ~Cleanups() { for (auto& c : *this) c.clean(); }
+ void disarmAll() { for (auto& c : *this) c.disarm(); }
+};
status_t DeviceHalAidl::getInputBufferSize(const struct audio_config* config, size_t* size) {
- ALOGE("%s not implemented yet %p %zu", __func__, config, *size);
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ if (size == nullptr) return BAD_VALUE;
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ AudioConfig aidlConfig = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_config_t_AudioConfig(*config, true /*isInput*/));
+ AudioDevice aidlDevice;
+ aidlDevice.type.type = AudioDeviceType::IN_DEFAULT;
+ AudioIoFlags aidlFlags = AudioIoFlags::make<AudioIoFlags::Tag::input>(0);
+ AudioPortConfig mixPortConfig;
+ Cleanups cleanups;
+ audio_config writableConfig = *config;
+ int32_t nominalLatency;
+ RETURN_STATUS_IF_ERROR(prepareToOpenStream(0 /*handle*/, aidlDevice, aidlFlags, &writableConfig,
+ &cleanups, &aidlConfig, &mixPortConfig, &nominalLatency));
+ *size = aidlConfig.frameCount *
+ getFrameSizeInBytes(aidlConfig.base.format, aidlConfig.base.channelMask);
+ // Do not disarm cleanups to release temporary port configs.
return OK;
}
-status_t DeviceHalAidl::openOutputStream(audio_io_handle_t handle, audio_devices_t devices,
- audio_output_flags_t flags, struct audio_config* config,
- const char* address,
- sp<StreamOutHalInterface>* outStream) {
- ALOGE("%s not implemented yet %d %u %u %p %s %p", __func__, handle, devices, flags, config,
- address, outStream);
+status_t DeviceHalAidl::prepareToOpenStream(
+ int32_t aidlHandle, const AudioDevice& aidlDevice, const AudioIoFlags& aidlFlags,
+ struct audio_config* config,
+ Cleanups* cleanups, AudioConfig* aidlConfig, AudioPortConfig* mixPortConfig,
+ int32_t* nominalLatency) {
+ const bool isInput = aidlFlags.getTag() == AudioIoFlags::Tag::input;
+ // Find / create AudioPortConfigs for the device port and the mix port,
+ // then find / create a patch between them, and open a stream on the mix port.
+ AudioPortConfig devicePortConfig;
+ bool created = false;
+ RETURN_STATUS_IF_ERROR(findOrCreatePortConfig(aidlDevice, &devicePortConfig, &created));
+ if (created) {
+ cleanups->emplace_front(this, &DeviceHalAidl::resetPortConfig, devicePortConfig.id);
+ }
+ RETURN_STATUS_IF_ERROR(findOrCreatePortConfig(*aidlConfig, aidlFlags, aidlHandle,
+ mixPortConfig, &created));
+ if (created) {
+ cleanups->emplace_front(this, &DeviceHalAidl::resetPortConfig, mixPortConfig->id);
+ }
+ setConfigFromPortConfig(aidlConfig, *mixPortConfig);
+ AudioPatch patch;
+ if (isInput) {
+ RETURN_STATUS_IF_ERROR(findOrCreatePatch(
+ {devicePortConfig.id}, {mixPortConfig->id}, &patch, &created));
+ } else {
+ RETURN_STATUS_IF_ERROR(findOrCreatePatch(
+ {mixPortConfig->id}, {devicePortConfig.id}, &patch, &created));
+ }
+ if (created) {
+ cleanups->emplace_front(this, &DeviceHalAidl::resetPatch, patch.id);
+ }
+ *nominalLatency = patch.latenciesMs[0];
+ if (aidlConfig->frameCount <= 0) {
+ aidlConfig->frameCount = patch.minimumStreamBufferSizeFrames;
+ }
+ *config = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::aidl2legacy_AudioConfig_audio_config_t(*aidlConfig, isInput));
return OK;
}
-status_t DeviceHalAidl::openInputStream(audio_io_handle_t handle, audio_devices_t devices,
- struct audio_config* config, audio_input_flags_t flags,
- const char* address, audio_source_t source,
- audio_devices_t outputDevice,
- const char* outputDeviceAddress,
- sp<StreamInHalInterface>* inStream) {
- ALOGE("%s not implemented yet %d %u %u %u %p %s %s %p %d", __func__, handle, devices,
- outputDevice, flags, config, address, outputDeviceAddress, inStream, source);
+namespace {
+
+class StreamCallbackBase {
+ protected:
+ explicit StreamCallbackBase(const sp<CallbackBroker>& broker) : mBroker(broker) {}
+ public:
+ void* getCookie() const { return mCookie; }
+ void setCookie(void* cookie) { mCookie = cookie; }
+ sp<CallbackBroker> getBroker() const {
+ if (void* cookie = mCookie; cookie != nullptr) return mBroker.promote();
+ return nullptr;
+ }
+ private:
+ const wp<CallbackBroker> mBroker;
+ std::atomic<void*> mCookie;
+};
+
+template<class C>
+class StreamCallbackBaseHelper {
+ protected:
+ explicit StreamCallbackBaseHelper(const StreamCallbackBase& base) : mBase(base) {}
+ sp<C> getCb(const sp<CallbackBroker>& broker, void* cookie);
+ using CbRef = const sp<C>&;
+ ndk::ScopedAStatus runCb(const std::function<void(CbRef cb)>& f) {
+ if (auto cb = getCb(mBase.getBroker(), mBase.getCookie()); cb != nullptr) f(cb);
+ return ndk::ScopedAStatus::ok();
+ }
+ private:
+ const StreamCallbackBase& mBase;
+};
+
+template<>
+sp<StreamOutHalInterfaceCallback> StreamCallbackBaseHelper<StreamOutHalInterfaceCallback>::getCb(
+ const sp<CallbackBroker>& broker, void* cookie) {
+ if (broker != nullptr) return broker->getStreamOutCallback(cookie);
+ return nullptr;
+}
+
+template<>
+sp<StreamOutHalInterfaceEventCallback>
+StreamCallbackBaseHelper<StreamOutHalInterfaceEventCallback>::getCb(
+ const sp<CallbackBroker>& broker, void* cookie) {
+ if (broker != nullptr) return broker->getStreamOutEventCallback(cookie);
+ return nullptr;
+}
+
+template<>
+sp<StreamOutHalInterfaceLatencyModeCallback>
+StreamCallbackBaseHelper<StreamOutHalInterfaceLatencyModeCallback>::getCb(
+ const sp<CallbackBroker>& broker, void* cookie) {
+ if (broker != nullptr) return broker->getStreamOutLatencyModeCallback(cookie);
+ return nullptr;
+}
+
+/*
+Note on the callback ownership.
+
+In the Binder ownership model, the server implementation is kept alive
+as long as there is any client (proxy object) alive. This is done by
+incrementing the refcount of the server-side object by the Binder framework.
+When it detects that the last client is gone, it decrements the refcount back.
+
+Thus, it is not needed to keep any references to StreamCallback on our
+side (after we have sent an instance to the client), because we are
+the server-side. The callback object will be kept alive as long as the HAL server
+holds a strong ref to IStreamCallback proxy.
+*/
+
+class OutputStreamCallbackAidl : public StreamCallbackBase,
+ public StreamCallbackBaseHelper<StreamOutHalInterfaceCallback>,
+ public ::aidl::android::hardware::audio::core::BnStreamCallback {
+ public:
+ explicit OutputStreamCallbackAidl(const sp<CallbackBroker>& broker)
+ : StreamCallbackBase(broker),
+ StreamCallbackBaseHelper<StreamOutHalInterfaceCallback>(
+ *static_cast<StreamCallbackBase*>(this)) {}
+ ndk::ScopedAStatus onTransferReady() override {
+ return runCb([](CbRef cb) { cb->onWriteReady(); });
+ }
+ ndk::ScopedAStatus onError() override {
+ return runCb([](CbRef cb) { cb->onError(); });
+ }
+ ndk::ScopedAStatus onDrainReady() override {
+ return runCb([](CbRef cb) { cb->onDrainReady(); });
+ }
+};
+
+class OutputStreamEventCallbackAidl :
+ public StreamCallbackBase,
+ public StreamCallbackBaseHelper<StreamOutHalInterfaceEventCallback>,
+ public StreamCallbackBaseHelper<StreamOutHalInterfaceLatencyModeCallback>,
+ public ::aidl::android::hardware::audio::core::BnStreamOutEventCallback {
+ public:
+ explicit OutputStreamEventCallbackAidl(const sp<CallbackBroker>& broker)
+ : StreamCallbackBase(broker),
+ StreamCallbackBaseHelper<StreamOutHalInterfaceEventCallback>(
+ *static_cast<StreamCallbackBase*>(this)),
+ StreamCallbackBaseHelper<StreamOutHalInterfaceLatencyModeCallback>(
+ *static_cast<StreamCallbackBase*>(this)) {}
+ ndk::ScopedAStatus onCodecFormatChanged(const std::vector<uint8_t>& in_audioMetadata) override {
+ std::basic_string<uint8_t> halMetadata(in_audioMetadata.begin(), in_audioMetadata.end());
+ return StreamCallbackBaseHelper<StreamOutHalInterfaceEventCallback>::runCb(
+ [&halMetadata](auto cb) { cb->onCodecFormatChanged(halMetadata); });
+ }
+ ndk::ScopedAStatus onRecommendedLatencyModeChanged(
+ const std::vector<AudioLatencyMode>& in_modes) override {
+ auto halModes = VALUE_OR_FATAL(
+ ::aidl::android::convertContainer<std::vector<audio_latency_mode_t>>(
+ in_modes,
+ ::aidl::android::aidl2legacy_AudioLatencyMode_audio_latency_mode_t));
+ return StreamCallbackBaseHelper<StreamOutHalInterfaceLatencyModeCallback>::runCb(
+ [&halModes](auto cb) { cb->onRecommendedLatencyModeChanged(halModes); });
+ }
+};
+
+} // namespace
+
+status_t DeviceHalAidl::openOutputStream(
+ audio_io_handle_t handle, audio_devices_t devices,
+ audio_output_flags_t flags, struct audio_config* config,
+ const char* address,
+ sp<StreamOutHalInterface>* outStream) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ if (!outStream || !config) {
+ return BAD_VALUE;
+ }
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ int32_t aidlHandle = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_io_handle_t_int32_t(handle));
+ AudioConfig aidlConfig = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_config_t_AudioConfig(*config, false /*isInput*/));
+ AudioDevice aidlDevice = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_device_AudioDevice(devices, address));
+ int32_t aidlOutputFlags = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
+ AudioIoFlags aidlFlags = AudioIoFlags::make<AudioIoFlags::Tag::output>(aidlOutputFlags);
+ AudioPortConfig mixPortConfig;
+ Cleanups cleanups;
+ int32_t nominalLatency;
+ RETURN_STATUS_IF_ERROR(prepareToOpenStream(aidlHandle, aidlDevice, aidlFlags, config,
+ &cleanups, &aidlConfig, &mixPortConfig, &nominalLatency));
+ ::aidl::android::hardware::audio::core::IModule::OpenOutputStreamArguments args;
+ args.portConfigId = mixPortConfig.id;
+ const bool isOffload = isBitPositionFlagSet(
+ aidlOutputFlags, AudioOutputFlags::COMPRESS_OFFLOAD);
+ std::shared_ptr<OutputStreamCallbackAidl> streamCb;
+ if (isOffload) {
+ streamCb = ndk::SharedRefBase::make<OutputStreamCallbackAidl>(this);
+ }
+ auto eventCb = ndk::SharedRefBase::make<OutputStreamEventCallbackAidl>(this);
+ if (isOffload) {
+ args.offloadInfo = aidlConfig.offloadInfo;
+ args.callback = streamCb;
+ }
+ args.bufferSizeFrames = aidlConfig.frameCount;
+ args.eventCallback = eventCb;
+ ::aidl::android::hardware::audio::core::IModule::OpenOutputStreamReturn ret;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->openOutputStream(args, &ret)));
+ StreamContextAidl context(ret.desc);
+ if (!context.isValid()) {
+ ALOGE("%s: Failed to created a valid stream context from the descriptor: %s",
+ __func__, ret.desc.toString().c_str());
+ return NO_INIT;
+ }
+ *outStream = sp<StreamOutHalAidl>::make(*config, std::move(context), nominalLatency,
+ std::move(ret.stream), this /*callbackBroker*/);
+ void* cbCookie = (*outStream).get();
+ {
+ std::lock_guard l(mLock);
+ mCallbacks.emplace(cbCookie, Callbacks{});
+ }
+ if (streamCb) streamCb->setCookie(cbCookie);
+ eventCb->setCookie(cbCookie);
+ cleanups.disarmAll();
+ return OK;
+}
+
+status_t DeviceHalAidl::openInputStream(
+ audio_io_handle_t handle, audio_devices_t devices,
+ struct audio_config* config, audio_input_flags_t flags,
+ const char* address, audio_source_t source,
+ audio_devices_t outputDevice, const char* outputDeviceAddress,
+ sp<StreamInHalInterface>* inStream) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ if (!inStream || !config) {
+ return BAD_VALUE;
+ }
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ int32_t aidlHandle = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_io_handle_t_int32_t(handle));
+ AudioConfig aidlConfig = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_config_t_AudioConfig(*config, true /*isInput*/));
+ AudioDevice aidlDevice = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_device_AudioDevice(devices, address));
+ int32_t aidlInputFlags = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_input_flags_t_int32_t_mask(flags));
+ AudioIoFlags aidlFlags = AudioIoFlags::make<AudioIoFlags::Tag::input>(aidlInputFlags);
+ AudioSource aidlSource = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_source_t_AudioSource(source));
+ AudioPortConfig mixPortConfig;
+ Cleanups cleanups;
+ int32_t nominalLatency;
+ RETURN_STATUS_IF_ERROR(prepareToOpenStream(aidlHandle, aidlDevice, aidlFlags, config,
+ &cleanups, &aidlConfig, &mixPortConfig, &nominalLatency));
+ ::aidl::android::hardware::audio::core::IModule::OpenInputStreamArguments args;
+ args.portConfigId = mixPortConfig.id;
+ RecordTrackMetadata aidlTrackMetadata{
+ .source = aidlSource, .gain = 1, .channelMask = aidlConfig.base.channelMask };
+ if (outputDevice != AUDIO_DEVICE_NONE) {
+ aidlTrackMetadata.destinationDevice = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_device_AudioDevice(
+ outputDevice, outputDeviceAddress));
+ }
+ args.sinkMetadata.tracks.push_back(std::move(aidlTrackMetadata));
+ args.bufferSizeFrames = aidlConfig.frameCount;
+ ::aidl::android::hardware::audio::core::IModule::OpenInputStreamReturn ret;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->openInputStream(args, &ret)));
+ StreamContextAidl context(ret.desc);
+ if (!context.isValid()) {
+ ALOGE("%s: Failed to created a valid stream context from the descriptor: %s",
+ __func__, ret.desc.toString().c_str());
+ return NO_INIT;
+ }
+ *inStream = sp<StreamInHalAidl>::make(*config, std::move(context), nominalLatency,
+ std::move(ret.stream));
+ cleanups.disarmAll();
return OK;
}
@@ -117,71 +543,533 @@
unsigned int num_sinks,
const struct audio_port_config* sinks,
audio_patch_handle_t* patch) {
- ALOGE("%s not implemented yet %d %p %d %p %p", __func__, num_sources, sources, num_sinks,
- sinks, patch);
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ if (num_sinks > AUDIO_PATCH_PORTS_MAX || num_sources > AUDIO_PATCH_PORTS_MAX ||
+ sources == nullptr || sinks == nullptr || patch == nullptr) {
+ return BAD_VALUE;
+ }
+ // Note that the patch handle (*patch) is provided by the framework.
+ // In tests it's possible that its value is AUDIO_PATCH_HANDLE_NONE.
+
+ // Upon conversion, mix port configs contain audio configuration, while
+ // device port configs contain device address. This data is used to find
+ // or create HAL configs.
+ std::vector<AudioPortConfig> aidlSources, aidlSinks;
+ for (unsigned int i = 0; i < num_sources; ++i) {
+ bool isInput = VALUE_OR_RETURN_STATUS(::aidl::android::portDirection(
+ sources[i].role, sources[i].type)) ==
+ ::aidl::android::AudioPortDirection::INPUT;
+ aidlSources.push_back(VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_port_config_AudioPortConfig(
+ sources[i], isInput, 0)));
+ }
+ for (unsigned int i = 0; i < num_sinks; ++i) {
+ bool isInput = VALUE_OR_RETURN_STATUS(::aidl::android::portDirection(
+ sinks[i].role, sinks[i].type)) ==
+ ::aidl::android::AudioPortDirection::INPUT;
+ aidlSinks.push_back(VALUE_OR_RETURN_STATUS(
+ ::aidl::android::legacy2aidl_audio_port_config_AudioPortConfig(
+ sinks[i], isInput, 0)));
+ }
+ Cleanups cleanups;
+ auto existingPatchIt = mPatches.end();
+ auto fwkHandlesIt = *patch != AUDIO_PATCH_HANDLE_NONE ?
+ mFwkHandles.find(*patch) : mFwkHandles.end();
+ AudioPatch aidlPatch;
+ if (fwkHandlesIt != mFwkHandles.end()) {
+ existingPatchIt = mPatches.find(fwkHandlesIt->second);
+ if (existingPatchIt != mPatches.end()) {
+ aidlPatch = existingPatchIt->second;
+ aidlPatch.sourcePortConfigIds.clear();
+ aidlPatch.sinkPortConfigIds.clear();
+ }
+ }
+ ALOGD("%s: sources: %s, sinks: %s",
+ __func__, ::android::internal::ToString(aidlSources).c_str(),
+ ::android::internal::ToString(aidlSinks).c_str());
+ auto fillPortConfigs = [&](
+ const std::vector<AudioPortConfig>& configs, std::vector<int32_t>* ids) -> status_t {
+ for (const auto& s : configs) {
+ AudioPortConfig portConfig;
+ bool created = false;
+ RETURN_STATUS_IF_ERROR(findOrCreatePortConfig(s, &portConfig, &created));
+ if (created) {
+ cleanups.emplace_front(this, &DeviceHalAidl::resetPortConfig, portConfig.id);
+ }
+ ids->push_back(portConfig.id);
+ }
+ return OK;
+ };
+ RETURN_STATUS_IF_ERROR(fillPortConfigs(aidlSources, &aidlPatch.sourcePortConfigIds));
+ RETURN_STATUS_IF_ERROR(fillPortConfigs(aidlSinks, &aidlPatch.sinkPortConfigIds));
+ if (existingPatchIt != mPatches.end()) {
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ mModule->setAudioPatch(aidlPatch, &aidlPatch)));
+ existingPatchIt->second = aidlPatch;
+ } else {
+ bool created = false;
+ RETURN_STATUS_IF_ERROR(findOrCreatePatch(aidlPatch, &aidlPatch, &created));
+ // Since no cleanup of the patch is needed, 'created' is ignored.
+ if (fwkHandlesIt != mFwkHandles.end()) {
+ fwkHandlesIt->second = aidlPatch.id;
+ // Patch handle (*patch) stays the same.
+ } else {
+ if (*patch == AUDIO_PATCH_HANDLE_NONE) {
+ // This isn't good as the module can't provide a handle which is really unique.
+ // However, this situation should only happen in tests.
+ *patch = aidlPatch.id;
+ LOG_ALWAYS_FATAL_IF(mFwkHandles.count(*patch) > 0,
+ "%s: patch id %d clashes with another framework patch handle",
+ __func__, *patch);
+ }
+ mFwkHandles.emplace(*patch, aidlPatch.id);
+ }
+ }
+ cleanups.disarmAll();
return OK;
}
status_t DeviceHalAidl::releaseAudioPatch(audio_patch_handle_t patch) {
- ALOGE("%s not implemented yet patch %d", __func__, patch);
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ auto idMapIt = mFwkHandles.find(patch);
+ if (idMapIt == mFwkHandles.end()) {
+ return BAD_VALUE;
+ }
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->resetAudioPatch(idMapIt->second)));
+ mFwkHandles.erase(idMapIt);
return OK;
}
-status_t DeviceHalAidl::setAudioPortConfig(const struct audio_port_config* config) {
- ALOGE("%s not implemented yet config %p", __func__, config);
+status_t DeviceHalAidl::getAudioPort(struct audio_port* port __unused) {
+ TIME_CHECK();
+ ALOGE("%s not implemented yet", __func__);
+ return INVALID_OPERATION;
+}
+
+status_t DeviceHalAidl::getAudioPort(struct audio_port_v7 *port __unused) {
+ TIME_CHECK();
+ ALOGE("%s not implemented yet", __func__);
+ return INVALID_OPERATION;
+}
+
+status_t DeviceHalAidl::setAudioPortConfig(const struct audio_port_config* config __unused) {
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
return OK;
}
status_t DeviceHalAidl::getMicrophones(
- std::vector<audio_microphone_characteristic_t>* microphones) {
- ALOGE("%s not implemented yet microphones %p", __func__, microphones);
+ std::vector<audio_microphone_characteristic_t>* microphones __unused) {
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
return OK;
}
-status_t DeviceHalAidl::addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) {
+status_t DeviceHalAidl::addDeviceEffect(audio_port_handle_t device __unused,
+ sp<EffectHalInterface> effect) {
if (!effect) {
return BAD_VALUE;
}
- ALOGE("%s not implemented yet device %d", __func__, device);
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
return OK;
}
-status_t DeviceHalAidl::removeDeviceEffect(audio_port_handle_t device,
+status_t DeviceHalAidl::removeDeviceEffect(audio_port_handle_t device __unused,
sp<EffectHalInterface> effect) {
if (!effect) {
return BAD_VALUE;
}
- ALOGE("%s not implemented yet device %d", __func__, device);
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
return OK;
}
status_t DeviceHalAidl::getMmapPolicyInfos(
media::audio::common::AudioMMapPolicyType policyType __unused,
std::vector<media::audio::common::AudioMMapPolicyInfo>* policyInfos __unused) {
+ TIME_CHECK();
ALOGE("%s not implemented yet", __func__);
return OK;
}
int32_t DeviceHalAidl::getAAudioMixerBurstCount() {
+ TIME_CHECK();
ALOGE("%s not implemented yet", __func__);
return OK;
}
int32_t DeviceHalAidl::getAAudioHardwareBurstMinUsec() {
+ TIME_CHECK();
ALOGE("%s not implemented yet", __func__);
return OK;
}
error::Result<audio_hw_sync_t> DeviceHalAidl::getHwAvSync() {
+ TIME_CHECK();
ALOGE("%s not implemented yet", __func__);
return base::unexpected(INVALID_OPERATION);
}
-status_t DeviceHalAidl::dump(int __unused, const Vector<String16>& __unused) {
- ALOGE("%s not implemented yet", __func__);
- return OK;
+status_t DeviceHalAidl::dump(int fd, const Vector<String16>& args) {
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ return mModule->dump(fd, Args(args).args(), args.size());
};
-int32_t DeviceHalAidl::supportsBluetoothVariableLatency(bool* supports __unused) override {
+int32_t DeviceHalAidl::supportsBluetoothVariableLatency(bool* supports __unused) {
+ TIME_CHECK();
ALOGE("%s not implemented yet", __func__);
return INVALID_OPERATION;
}
+
+status_t DeviceHalAidl::getSoundDoseInterface(const std::string& module,
+ ::ndk::SpAIBinder* soundDoseBinder) {
+ TIME_CHECK();
+ if (!mModule) return NO_INIT;
+ if (mSoundDose == nullptr) {
+ ndk::ScopedAStatus status = mModule->getSoundDose(&mSoundDose);
+ if (!status.isOk()) {
+ ALOGE("%s failed to return the sound dose interface for module %s: %s",
+ __func__,
+ module.c_str(),
+ status.getDescription().c_str());
+ return BAD_VALUE;
+ }
+ }
+ *soundDoseBinder = mSoundDose->asBinder();
+ ALOGI("%s using audio AIDL HAL sound dose interface", __func__);
+
+ return OK;
+}
+
+bool DeviceHalAidl::audioDeviceMatches(const AudioDevice& device, const AudioPort& p) {
+ if (p.ext.getTag() != AudioPortExt::Tag::device) return false;
+ return p.ext.get<AudioPortExt::Tag::device>().device == device;
+}
+
+bool DeviceHalAidl::audioDeviceMatches(const AudioDevice& device, const AudioPortConfig& p) {
+ if (p.ext.getTag() != AudioPortExt::Tag::device) return false;
+ if (device.type.type == AudioDeviceType::IN_DEFAULT) {
+ return p.portId == mDefaultInputPortId;
+ } else if (device.type.type == AudioDeviceType::OUT_DEFAULT) {
+ return p.portId == mDefaultOutputPortId;
+ }
+ return p.ext.get<AudioPortExt::Tag::device>().device == device;
+}
+
+status_t DeviceHalAidl::createPortConfig(const AudioPortConfig& requestedPortConfig,
+ AudioPortConfig* appliedPortConfig) {
+ TIME_CHECK();
+ bool applied = false;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->setAudioPortConfig(
+ requestedPortConfig, appliedPortConfig, &applied)));
+ if (!applied) {
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->setAudioPortConfig(
+ *appliedPortConfig, appliedPortConfig, &applied)));
+ if (!applied) {
+ ALOGE("%s: module %s did not apply suggested config %s",
+ __func__, mInstance.c_str(), appliedPortConfig->toString().c_str());
+ return NO_INIT;
+ }
+ }
+ mPortConfigs.emplace(appliedPortConfig->id, *appliedPortConfig);
+ return OK;
+}
+
+status_t DeviceHalAidl::findOrCreatePatch(
+ const AudioPatch& requestedPatch, AudioPatch* patch, bool* created) {
+ std::set<int32_t> sourcePortConfigIds(requestedPatch.sourcePortConfigIds.begin(),
+ requestedPatch.sourcePortConfigIds.end());
+ std::set<int32_t> sinkPortConfigIds(requestedPatch.sinkPortConfigIds.begin(),
+ requestedPatch.sinkPortConfigIds.end());
+ return findOrCreatePatch(sourcePortConfigIds, sinkPortConfigIds, patch, created);
+}
+
+status_t DeviceHalAidl::findOrCreatePatch(
+ const std::set<int32_t>& sourcePortConfigIds, const std::set<int32_t>& sinkPortConfigIds,
+ AudioPatch* patch, bool* created) {
+ auto patchIt = findPatch(sourcePortConfigIds, sinkPortConfigIds);
+ if (patchIt == mPatches.end()) {
+ TIME_CHECK();
+ AudioPatch requestedPatch, appliedPatch;
+ requestedPatch.sourcePortConfigIds.insert(requestedPatch.sourcePortConfigIds.end(),
+ sourcePortConfigIds.begin(), sourcePortConfigIds.end());
+ requestedPatch.sinkPortConfigIds.insert(requestedPatch.sinkPortConfigIds.end(),
+ sinkPortConfigIds.begin(), sinkPortConfigIds.end());
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mModule->setAudioPatch(
+ requestedPatch, &appliedPatch)));
+ patchIt = mPatches.insert(mPatches.end(), std::make_pair(appliedPatch.id, appliedPatch));
+ *created = true;
+ } else {
+ *created = false;
+ }
+ *patch = patchIt->second;
+ return OK;
+}
+
+status_t DeviceHalAidl::findOrCreatePortConfig(const AudioDevice& device,
+ AudioPortConfig* portConfig, bool* created) {
+ auto portConfigIt = findPortConfig(device);
+ if (portConfigIt == mPortConfigs.end()) {
+ auto portsIt = findPort(device);
+ if (portsIt == mPorts.end()) {
+ ALOGE("%s: device port for device %s is not found in the module %s",
+ __func__, device.toString().c_str(), mInstance.c_str());
+ return BAD_VALUE;
+ }
+ AudioPortConfig requestedPortConfig;
+ requestedPortConfig.portId = portsIt->first;
+ AudioPortConfig appliedPortConfig;
+ RETURN_STATUS_IF_ERROR(createPortConfig(requestedPortConfig, &appliedPortConfig));
+ portConfigIt = mPortConfigs.insert(
+ mPortConfigs.end(), std::make_pair(appliedPortConfig.id, appliedPortConfig));
+ *created = true;
+ } else {
+ *created = false;
+ }
+ *portConfig = portConfigIt->second;
+ return OK;
+}
+
+status_t DeviceHalAidl::findOrCreatePortConfig(
+ const AudioConfig& config, const std::optional<AudioIoFlags>& flags, int32_t ioHandle,
+ AudioPortConfig* portConfig, bool* created) {
+ auto portConfigIt = findPortConfig(config, flags, ioHandle);
+ if (portConfigIt == mPortConfigs.end() && flags.has_value()) {
+ auto portsIt = findPort(config, flags.value());
+ if (portsIt == mPorts.end()) {
+ ALOGE("%s: mix port for config %s, flags %s is not found in the module %s",
+ __func__, config.toString().c_str(), flags.value().toString().c_str(),
+ mInstance.c_str());
+ return BAD_VALUE;
+ }
+ AudioPortConfig requestedPortConfig;
+ requestedPortConfig.portId = portsIt->first;
+ setPortConfigFromConfig(&requestedPortConfig, config);
+ AudioPortConfig appliedPortConfig;
+ RETURN_STATUS_IF_ERROR(createPortConfig(requestedPortConfig, &appliedPortConfig));
+ appliedPortConfig.ext.get<AudioPortExt::Tag::mix>().handle = ioHandle;
+ portConfigIt = mPortConfigs.insert(
+ mPortConfigs.end(), std::make_pair(appliedPortConfig.id, appliedPortConfig));
+ *created = true;
+ } else if (!flags.has_value()) {
+ ALOGW("%s: mix port config for %s, handle %d not found in the module %s, "
+ "and was not created as flags are not specified",
+ __func__, config.toString().c_str(), ioHandle, mInstance.c_str());
+ return BAD_VALUE;
+ } else {
+ *created = false;
+ }
+ *portConfig = portConfigIt->second;
+ return OK;
+}
+
+status_t DeviceHalAidl::findOrCreatePortConfig(
+ const AudioPortConfig& requestedPortConfig, AudioPortConfig* portConfig, bool* created) {
+ using Tag = AudioPortExt::Tag;
+ if (requestedPortConfig.ext.getTag() == Tag::mix) {
+ if (const auto& p = requestedPortConfig;
+ !p.sampleRate.has_value() || !p.channelMask.has_value() ||
+ !p.format.has_value()) {
+ ALOGW("%s: provided mix port config is not fully specified: %s",
+ __func__, p.toString().c_str());
+ return BAD_VALUE;
+ }
+ AudioConfig config;
+ setConfigFromPortConfig(&config, requestedPortConfig);
+ return findOrCreatePortConfig(config, requestedPortConfig.flags,
+ requestedPortConfig.ext.get<Tag::mix>().handle, portConfig, created);
+ } else if (requestedPortConfig.ext.getTag() == Tag::device) {
+ return findOrCreatePortConfig(
+ requestedPortConfig.ext.get<Tag::device>().device, portConfig, created);
+ }
+ ALOGW("%s: unsupported audio port config: %s",
+ __func__, requestedPortConfig.toString().c_str());
+ return BAD_VALUE;
+}
+
+DeviceHalAidl::Patches::iterator DeviceHalAidl::findPatch(
+ const std::set<int32_t>& sourcePortConfigIds, const std::set<int32_t>& sinkPortConfigIds) {
+ return std::find_if(mPatches.begin(), mPatches.end(),
+ [&](const auto& pair) {
+ const auto& p = pair.second;
+ std::set<int32_t> patchSrcs(
+ p.sourcePortConfigIds.begin(), p.sourcePortConfigIds.end());
+ std::set<int32_t> patchSinks(
+ p.sinkPortConfigIds.begin(), p.sinkPortConfigIds.end());
+ return sourcePortConfigIds == patchSrcs && sinkPortConfigIds == patchSinks; });
+}
+
+DeviceHalAidl::Ports::iterator DeviceHalAidl::findPort(const AudioDevice& device) {
+ if (device.type.type == AudioDeviceType::IN_DEFAULT) {
+ return mPorts.find(mDefaultInputPortId);
+ } else if (device.type.type == AudioDeviceType::OUT_DEFAULT) {
+ return mPorts.find(mDefaultOutputPortId);
+ }
+ return std::find_if(mPorts.begin(), mPorts.end(),
+ [&](const auto& pair) { return audioDeviceMatches(device, pair.second); });
+}
+
+DeviceHalAidl::Ports::iterator DeviceHalAidl::findPort(
+ const AudioConfig& config, const AudioIoFlags& flags) {
+ using Tag = AudioPortExt::Tag;
+ AudioIoFlags matchFlags = flags;
+ auto matcher = [&](const auto& pair) {
+ const auto& p = pair.second;
+ return p.ext.getTag() == Tag::mix &&
+ p.flags == matchFlags &&
+ std::find_if(p.profiles.begin(), p.profiles.end(),
+ [&](const auto& prof) {
+ return prof.format == config.base.format &&
+ std::find(prof.channelMasks.begin(), prof.channelMasks.end(),
+ config.base.channelMask) != prof.channelMasks.end() &&
+ std::find(prof.sampleRates.begin(), prof.sampleRates.end(),
+ config.base.sampleRate) != prof.sampleRates.end();
+ }) != p.profiles.end(); };
+ auto it = std::find_if(mPorts.begin(), mPorts.end(), matcher);
+ if (it == mPorts.end() && flags.getTag() == AudioIoFlags::Tag::input &&
+ isBitPositionFlagSet(flags.get<AudioIoFlags::Tag::input>(), AudioInputFlags::FAST)) {
+ // "Fast" input is not a mandatory flag, try without it.
+ matchFlags.set<AudioIoFlags::Tag::input>(flags.get<AudioIoFlags::Tag::input>() &
+ ~makeBitPositionFlagMask(AudioInputFlags::FAST));
+ it = std::find_if(mPorts.begin(), mPorts.end(), matcher);
+ }
+ return it;
+}
+
+DeviceHalAidl::PortConfigs::iterator DeviceHalAidl::findPortConfig(const AudioDevice& device) {
+ return std::find_if(mPortConfigs.begin(), mPortConfigs.end(),
+ [&](const auto& pair) { return audioDeviceMatches(device, pair.second); });
+}
+
+DeviceHalAidl::PortConfigs::iterator DeviceHalAidl::findPortConfig(
+ const AudioConfig& config, const std::optional<AudioIoFlags>& flags, int32_t ioHandle) {
+ using Tag = AudioPortExt::Tag;
+ return std::find_if(mPortConfigs.begin(), mPortConfigs.end(),
+ [&](const auto& pair) {
+ const auto& p = pair.second;
+ LOG_ALWAYS_FATAL_IF(p.ext.getTag() == Tag::mix &&
+ !p.sampleRate.has_value() || !p.channelMask.has_value() ||
+ !p.format.has_value() || !p.flags.has_value(),
+ "%s: stored mix port config is not fully specified: %s",
+ __func__, p.toString().c_str());
+ return p.ext.getTag() == Tag::mix &&
+ isConfigEqualToPortConfig(config, p) &&
+ (!flags.has_value() || p.flags.value() == flags.value()) &&
+ p.ext.template get<Tag::mix>().handle == ioHandle; });
+}
+/*
+DeviceHalAidl::PortConfigs::iterator DeviceHalAidl::findPortConfig(
+ const AudioPortConfig& portConfig) {
+ using Tag = AudioPortExt::Tag;
+ if (portConfig.ext.getTag() == Tag::mix) {
+ return std::find_if(mPortConfigs.begin(), mPortConfigs.end(),
+ [&](const auto& pair) {
+ const auto& p = pair.second;
+ LOG_ALWAYS_FATAL_IF(p.ext.getTag() == Tag::mix &&
+ !p.sampleRate.has_value() || !p.channelMask.has_value() ||
+ !p.format.has_value() || !p.flags.has_value(),
+ "%s: stored mix port config is not fully specified: %s",
+ __func__, p.toString().c_str());
+ return p.ext.getTag() == Tag::mix &&
+ (!portConfig.sampleRate.has_value() ||
+ p.sampleRate == portConfig.sampleRate) &&
+ (!portConfig.channelMask.has_value() ||
+ p.channelMask == portConfig.channelMask) &&
+ (!portConfig.format.has_value() || p.format == portConfig.format) &&
+ (!portConfig.flags.has_value() || p.flags == portConfig.flags) &&
+ p.ext.template get<Tag::mix>().handle ==
+ portConfig.ext.template get<Tag::mix>().handle; });
+ } else if (portConfig.ext.getTag() == Tag::device) {
+ return findPortConfig(portConfig.ext.get<Tag::device>().device);
+ }
+ return mPortConfigs.end();
+}
+*/
+void DeviceHalAidl::resetPatch(int32_t patchId) {
+ if (auto it = mPatches.find(patchId); it != mPatches.end()) {
+ mPatches.erase(it);
+ TIME_CHECK();
+ if (ndk::ScopedAStatus status = mModule->resetAudioPatch(patchId); !status.isOk()) {
+ ALOGE("%s: error while resetting patch %d: %s",
+ __func__, patchId, status.getDescription().c_str());
+ }
+ return;
+ }
+ ALOGE("%s: patch id %d not found", __func__, patchId);
+}
+
+void DeviceHalAidl::resetPortConfig(int32_t portConfigId) {
+ if (auto it = mPortConfigs.find(portConfigId); it != mPortConfigs.end()) {
+ mPortConfigs.erase(it);
+ TIME_CHECK();
+ if (ndk::ScopedAStatus status = mModule->resetAudioPortConfig(portConfigId);
+ !status.isOk()) {
+ ALOGE("%s: error while resetting port config %d: %s",
+ __func__, portConfigId, status.getDescription().c_str());
+ }
+ return;
+ }
+ ALOGE("%s: port config id %d not found", __func__, portConfigId);
+}
+
+void DeviceHalAidl::clearCallbacks(void* cookie) {
+ std::lock_guard l(mLock);
+ mCallbacks.erase(cookie);
+}
+
+sp<StreamOutHalInterfaceCallback> DeviceHalAidl::getStreamOutCallback(void* cookie) {
+ return getCallbackImpl(cookie, &Callbacks::out);
+}
+
+void DeviceHalAidl::setStreamOutCallback(
+ void* cookie, const sp<StreamOutHalInterfaceCallback>& cb) {
+ setCallbackImpl(cookie, &Callbacks::out, cb);
+}
+
+sp<StreamOutHalInterfaceEventCallback> DeviceHalAidl::getStreamOutEventCallback(
+ void* cookie) {
+ return getCallbackImpl(cookie, &Callbacks::event);
+}
+
+void DeviceHalAidl::setStreamOutEventCallback(
+ void* cookie, const sp<StreamOutHalInterfaceEventCallback>& cb) {
+ setCallbackImpl(cookie, &Callbacks::event, cb);
+}
+
+sp<StreamOutHalInterfaceLatencyModeCallback> DeviceHalAidl::getStreamOutLatencyModeCallback(
+ void* cookie) {
+ return getCallbackImpl(cookie, &Callbacks::latency);
+}
+
+void DeviceHalAidl::setStreamOutLatencyModeCallback(
+ void* cookie, const sp<StreamOutHalInterfaceLatencyModeCallback>& cb) {
+ setCallbackImpl(cookie, &Callbacks::latency, cb);
+}
+
+template<class C>
+sp<C> DeviceHalAidl::getCallbackImpl(void* cookie, wp<C> DeviceHalAidl::Callbacks::* field) {
+ std::lock_guard l(mLock);
+ if (auto it = mCallbacks.find(cookie); it != mCallbacks.end()) {
+ return ((it->second).*field).promote();
+ }
+ return nullptr;
+}
+template<class C>
+void DeviceHalAidl::setCallbackImpl(
+ void* cookie, wp<C> DeviceHalAidl::Callbacks::* field, const sp<C>& cb) {
+ std::lock_guard l(mLock);
+ if (auto it = mCallbacks.find(cookie); it != mCallbacks.end()) {
+ (it->second).*field = cb;
+ }
+}
+
+} // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalAidl.h b/media/libaudiohal/impl/DeviceHalAidl.h
index 5e8a8dd..76e832d 100644
--- a/media/libaudiohal/impl/DeviceHalAidl.h
+++ b/media/libaudiohal/impl/DeviceHalAidl.h
@@ -16,14 +16,49 @@
#pragma once
+#include <map>
+#include <set>
+#include <vector>
+
+#include <aidl/android/hardware/audio/core/BpModule.h>
+#include <aidl/android/hardware/audio/core/sounddose/BpSoundDose.h>
+#include <android-base/thread_annotations.h>
#include <media/audiohal/DeviceHalInterface.h>
#include <media/audiohal/EffectHalInterface.h>
-#include <aidl/android/hardware/audio/core/BpModule.h>
+#include "ConversionHelperAidl.h"
namespace android {
-class DeviceHalAidl : public DeviceHalInterface {
+class StreamOutHalInterfaceCallback;
+class StreamOutHalInterfaceEventCallback;
+class StreamOutHalInterfaceLatencyModeCallback;
+
+// The role of the broker is to connect AIDL callback interface implementations
+// with StreamOut callback implementations. Since AIDL requires all callbacks
+// to be provided upfront, while libaudiohal interfaces allow late registration,
+// there is a need to coordinate the matching process.
+class CallbackBroker : public virtual RefBase {
+ public:
+ virtual ~CallbackBroker() = default;
+ // The cookie is always the stream instance pointer. We don't use weak pointers to avoid extra
+ // costs on reference counting. The stream cleans up related entries on destruction. Since
+ // access to the callbacks map is synchronized, the possibility for pointer aliasing due to
+ // allocation of a new stream at the address of previously deleted stream is avoided.
+ virtual void clearCallbacks(void* cookie) = 0;
+ virtual sp<StreamOutHalInterfaceCallback> getStreamOutCallback(void* cookie) = 0;
+ virtual void setStreamOutCallback(void* cookie, const sp<StreamOutHalInterfaceCallback>&) = 0;
+ virtual sp<StreamOutHalInterfaceEventCallback> getStreamOutEventCallback(void* cookie) = 0;
+ virtual void setStreamOutEventCallback(void* cookie,
+ const sp<StreamOutHalInterfaceEventCallback>&) = 0;
+ virtual sp<StreamOutHalInterfaceLatencyModeCallback> getStreamOutLatencyModeCallback(
+ void* cookie) = 0;
+ virtual void setStreamOutLatencyModeCallback(
+ void* cookie, const sp<StreamOutHalInterfaceLatencyModeCallback>&) = 0;
+};
+
+class DeviceHalAidl : public DeviceHalInterface, public ConversionHelperAidl,
+ public CallbackBroker {
public:
// Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
status_t getSupportedDevices(uint32_t *devices) override;
@@ -86,6 +121,12 @@
// Releases an audio patch.
status_t releaseAudioPatch(audio_patch_handle_t patch) override;
+ // Fills the list of supported attributes for a given audio port.
+ status_t getAudioPort(struct audio_port* port) override;
+
+ // Fills the list of supported attributes for a given audio port.
+ status_t getAudioPort(struct audio_port_v7 *port) override;
+
// Set audio port configuration.
status_t setAudioPortConfig(const struct audio_port_config* config) override;
@@ -110,21 +151,112 @@
int32_t supportsBluetoothVariableLatency(bool* supports __unused) override;
+ status_t getSoundDoseInterface(const std::string& module,
+ ::ndk::SpAIBinder* soundDoseBinder) override;
+
private:
- friend class DevicesFactoryHalAidl;
- const std::shared_ptr<::aidl::android::hardware::audio::core::IModule> mCore;
- float mMasterVolume = 0.0f;
- float mVoiceVolume = 0.0f;
- bool mMasterMute = false;
- bool mMicMute = false;
+ friend class sp<DeviceHalAidl>;
- // Can not be constructed directly by clients.
- explicit DeviceHalAidl(
- const std::shared_ptr<::aidl::android::hardware::audio::core::IModule>& core)
- : mCore(core) {}
+ struct Callbacks { // No need to use `atomic_wp` because access is serialized.
+ wp<StreamOutHalInterfaceCallback> out;
+ wp<StreamOutHalInterfaceEventCallback> event;
+ wp<StreamOutHalInterfaceLatencyModeCallback> latency;
+ };
+ using Patches = std::map<int32_t /*patch ID*/,
+ ::aidl::android::hardware::audio::core::AudioPatch>;
+ using PortConfigs = std::map<int32_t /*port config ID*/,
+ ::aidl::android::media::audio::common::AudioPortConfig>;
+ using Ports = std::map<int32_t /*port ID*/, ::aidl::android::media::audio::common::AudioPort>;
+ class Cleanups;
- // The destructor automatically closes the device.
- ~DeviceHalAidl();
+ // Must not be constructed directly by clients.
+ DeviceHalAidl(
+ const std::string& instance,
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IModule>& module)
+ : ConversionHelperAidl("DeviceHalAidl"), mInstance(instance), mModule(module) {}
+
+ ~DeviceHalAidl() override = default;
+
+ bool audioDeviceMatches(const ::aidl::android::media::audio::common::AudioDevice& device,
+ const ::aidl::android::media::audio::common::AudioPort& p);
+ bool audioDeviceMatches(const ::aidl::android::media::audio::common::AudioDevice& device,
+ const ::aidl::android::media::audio::common::AudioPortConfig& p);
+ status_t createPortConfig(
+ const ::aidl::android::media::audio::common::AudioPortConfig& requestedPortConfig,
+ ::aidl::android::media::audio::common::AudioPortConfig* appliedPortConfig);
+ status_t findOrCreatePatch(
+ const std::set<int32_t>& sourcePortConfigIds,
+ const std::set<int32_t>& sinkPortConfigIds,
+ ::aidl::android::hardware::audio::core::AudioPatch* patch, bool* created);
+ status_t findOrCreatePatch(
+ const ::aidl::android::hardware::audio::core::AudioPatch& requestedPatch,
+ ::aidl::android::hardware::audio::core::AudioPatch* patch, bool* created);
+ status_t findOrCreatePortConfig(
+ const ::aidl::android::media::audio::common::AudioDevice& device,
+ ::aidl::android::media::audio::common::AudioPortConfig* portConfig,
+ bool* created);
+ status_t findOrCreatePortConfig(
+ const ::aidl::android::media::audio::common::AudioConfig& config,
+ const std::optional<::aidl::android::media::audio::common::AudioIoFlags>& flags,
+ int32_t ioHandle,
+ ::aidl::android::media::audio::common::AudioPortConfig* portConfig, bool* created);
+ status_t findOrCreatePortConfig(
+ const ::aidl::android::media::audio::common::AudioPortConfig& requestedPortConfig,
+ ::aidl::android::media::audio::common::AudioPortConfig* portConfig, bool* created);
+ Patches::iterator findPatch(const std::set<int32_t>& sourcePortConfigIds,
+ const std::set<int32_t>& sinkPortConfigIds);
+ Ports::iterator findPort(const ::aidl::android::media::audio::common::AudioDevice& device);
+ Ports::iterator findPort(
+ const ::aidl::android::media::audio::common::AudioConfig& config,
+ const ::aidl::android::media::audio::common::AudioIoFlags& flags);
+ PortConfigs::iterator findPortConfig(
+ const ::aidl::android::media::audio::common::AudioDevice& device);
+ PortConfigs::iterator findPortConfig(
+ const ::aidl::android::media::audio::common::AudioConfig& config,
+ const std::optional<::aidl::android::media::audio::common::AudioIoFlags>& flags,
+ int32_t ioHandle);
+ // Currently unused but may be useful for implementing setAudioPortConfig
+ // PortConfigs::iterator findPortConfig(
+ // const ::aidl::android::media::audio::common::AudioPortConfig& portConfig);
+ status_t prepareToOpenStream(
+ int32_t aidlHandle,
+ const ::aidl::android::media::audio::common::AudioDevice& aidlDevice,
+ const ::aidl::android::media::audio::common::AudioIoFlags& aidlFlags,
+ struct audio_config* config,
+ Cleanups* cleanups,
+ ::aidl::android::media::audio::common::AudioConfig* aidlConfig,
+ ::aidl::android::media::audio::common::AudioPortConfig* mixPortConfig,
+ int32_t* nominalLatency);
+ void resetPatch(int32_t patchId);
+ void resetPortConfig(int32_t portConfigId);
+
+ // CallbackBroker implementation
+ void clearCallbacks(void* cookie) override;
+ sp<StreamOutHalInterfaceCallback> getStreamOutCallback(void* cookie) override;
+ void setStreamOutCallback(void* cookie, const sp<StreamOutHalInterfaceCallback>& cb) override;
+ sp<StreamOutHalInterfaceEventCallback> getStreamOutEventCallback(void* cookie) override;
+ void setStreamOutEventCallback(void* cookie,
+ const sp<StreamOutHalInterfaceEventCallback>& cb) override;
+ sp<StreamOutHalInterfaceLatencyModeCallback> getStreamOutLatencyModeCallback(
+ void* cookie) override;
+ void setStreamOutLatencyModeCallback(
+ void* cookie, const sp<StreamOutHalInterfaceLatencyModeCallback>& cb) override;
+ // Implementation helpers.
+ template<class C> sp<C> getCallbackImpl(void* cookie, wp<C> Callbacks::* field);
+ template<class C> void setCallbackImpl(void* cookie, wp<C> Callbacks::* field, const sp<C>& cb);
+
+ const std::string mInstance;
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IModule> mModule;
+ std::shared_ptr<::aidl::android::hardware::audio::core::sounddose::ISoundDose>
+ mSoundDose = nullptr;
+ Ports mPorts;
+ int32_t mDefaultInputPortId = -1;
+ int32_t mDefaultOutputPortId = -1;
+ PortConfigs mPortConfigs;
+ Patches mPatches;
+ std::map<audio_patch_handle_t, int32_t /*patch ID*/> mFwkHandles;
+ std::mutex mLock;
+ std::map<void*, Callbacks> mCallbacks GUARDED_BY(mLock);
};
} // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index be063ab..e0b1afb 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -17,7 +17,7 @@
#include <stdio.h>
#define LOG_TAG "DeviceHalHidl"
-//#define LOG_NDEBUG 0
+// #define LOG_NDEBUG 0
#include <cutils/native_handle.h>
#include <cutils/properties.h>
@@ -35,6 +35,17 @@
#include "ParameterUtils.h"
#include "StreamHalHidl.h"
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+#include <aidl/android/hardware/audio/core/sounddose/BpSoundDose.h>
+#include <aidl/android/hardware/audio/sounddose/BpSoundDoseFactory.h>
+#include <android/binder_manager.h>
+
+constexpr std::string_view kSoundDoseInterfaceModule = "/default";
+
+using aidl::android::hardware::audio::core::sounddose::ISoundDose;
+using aidl::android::hardware::audio::sounddose::ISoundDoseFactory;
+#endif
+
using ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION::implementation::HidlUtils;
using ::android::hardware::audio::common::utils::EnumBitfield;
using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::implementation::CoreUtils;
@@ -46,11 +57,21 @@
using namespace ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION;
using namespace ::android::hardware::audio::CORE_TYPES_CPP_VERSION;
-#define TIME_CHECK() auto timeCheck = \
- mediautils::makeTimeCheckStatsForClassMethod(getClassName(), __func__)
+class DeviceHalHidl::SoundDoseWrapper {
+public:
+ SoundDoseWrapper() = default;
+ ~SoundDoseWrapper() = default;
+
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+ std::shared_ptr<ISoundDoseFactory> mSoundDoseFactory;
+ std::shared_ptr<ISoundDose> mSoundDose;
+#endif
+};
DeviceHalHidl::DeviceHalHidl(const sp<::android::hardware::audio::CPP_VERSION::IDevice>& device)
- : CoreConversionHelperHidl("DeviceHalHidl"), mDevice(device) {
+ : CoreConversionHelperHidl("DeviceHalHidl"),
+ mDevice(device),
+ mSoundDoseWrapper(std::make_unique<DeviceHalHidl::SoundDoseWrapper>()) {
}
DeviceHalHidl::DeviceHalHidl(
@@ -59,7 +80,8 @@
#if MAJOR_VERSION <= 6 || (MAJOR_VERSION == 7 && MINOR_VERSION == 0)
mDevice(device),
#endif
- mPrimaryDevice(device) {
+ mPrimaryDevice(device),
+ mSoundDoseWrapper(std::make_unique<DeviceHalHidl::SoundDoseWrapper>()) {
#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
auto getDeviceRet = mPrimaryDevice->getDevice();
if (getDeviceRet.isOk()) {
@@ -577,4 +599,50 @@
return processReturn("dump", ret);
}
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+status_t DeviceHalHidl::getSoundDoseInterface(const std::string& module,
+ ::ndk::SpAIBinder* soundDoseBinder) {
+ if (mSoundDoseWrapper->mSoundDose != nullptr) {
+ *soundDoseBinder = mSoundDoseWrapper->mSoundDose->asBinder();
+ return OK;
+ }
+
+ if (mSoundDoseWrapper->mSoundDoseFactory == nullptr) {
+ std::string interface =
+ std::string(ISoundDoseFactory::descriptor) + kSoundDoseInterfaceModule.data();
+ AIBinder* binder = AServiceManager_checkService(interface.c_str());
+ if (binder == nullptr) {
+ ALOGW("%s service %s doesn't exist", __func__, interface.c_str());
+ return NO_INIT;
+ }
+ mSoundDoseWrapper->mSoundDoseFactory =
+ ISoundDoseFactory::fromBinder(ndk::SpAIBinder(binder));
+ }
+
+ auto result = mSoundDoseWrapper->mSoundDoseFactory->getSoundDose(
+ module, &mSoundDoseWrapper->mSoundDose);
+ if (!result.isOk()) {
+ ALOGW("%s could not get sound dose interface: %s", __func__, result.getMessage());
+ return BAD_VALUE;
+ }
+
+ if (mSoundDoseWrapper->mSoundDose == nullptr) {
+ ALOGW("%s standalone sound dose interface is not implemented", __func__);
+ *soundDoseBinder = nullptr;
+ return OK;
+ }
+
+ *soundDoseBinder = mSoundDoseWrapper->mSoundDose->asBinder();
+ ALOGI("%s using standalone sound dose interface", __func__);
+ return OK;
+}
+#else
+status_t DeviceHalHidl::getSoundDoseInterface(const std::string& module,
+ ::ndk::SpAIBinder* soundDoseBinder) {
+ (void)module; // avoid unused param
+ (void)soundDoseBinder; // avoid unused param
+ return INVALID_OPERATION;
+}
+#endif
+
} // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index 052eb65..30fbd6d 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -130,12 +130,17 @@
status_t dump(int fd, const Vector<String16>& args) override;
+ status_t getSoundDoseInterface(const std::string& module,
+ ::ndk::SpAIBinder* soundDoseBinder) override;
+
private:
friend class DevicesFactoryHalHidl;
sp<::android::hardware::audio::CPP_VERSION::IDevice> mDevice;
// Null if it's not a primary device.
sp<::android::hardware::audio::CPP_VERSION::IPrimaryDevice> mPrimaryDevice;
bool supportsSetConnectedState7_1 = true;
+ class SoundDoseWrapper;
+ const std::unique_ptr<SoundDoseWrapper> mSoundDoseWrapper;
// Can not be constructed directly by clients.
explicit DeviceHalHidl(const sp<::android::hardware::audio::CPP_VERSION::IDevice>& device);
diff --git a/media/libaudiohal/impl/DevicesFactoryHalAidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalAidl.cpp
index b9ca164..b452fa3 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalAidl.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalAidl.cpp
@@ -19,6 +19,7 @@
#include <aidl/android/hardware/audio/core/IModule.h>
#include <android/binder_manager.h>
+#include <binder/IServiceManager.h>
#include <memory>
#include <utils/Log.h>
@@ -35,27 +36,28 @@
ALOG_ASSERT(iconfig != nullptr, "Provided default IConfig service is NULL");
}
-void DevicesFactoryHalAidl::onFirstRef() {
- ALOGE("%s not implemented yet", __func__);
-}
-
// Opens a device with the specified name. To close the device, it is
// necessary to release references to the returned object.
status_t DevicesFactoryHalAidl::openDevice(const char *name, sp<DeviceHalInterface> *device) {
if (name == nullptr || device == nullptr) {
return BAD_VALUE;
}
- ALOGE("%s not implemented yet %s", __func__, name);
- return INVALID_OPERATION;
- // TODO: only support primary now ("default" means "primary")
- if (strcmp(name, "primary") != 0) {
- auto serviceName = std::string() + IModule::descriptor + "/default";
- auto service = IModule::fromBinder(
+ std::shared_ptr<IModule> service;
+ // FIXME: Normally we will list available HAL modules and connect to them,
+ // however currently we still get the list of module names from the config.
+ // Since the example service does not have all modules, the SM will wait
+ // for the missing ones forever.
+ if (strcmp(name, "primary") == 0 || strcmp(name, "r_submix") == 0) {
+ if (strcmp(name, "primary") == 0) name = "default";
+ auto serviceName = std::string(IModule::descriptor) + "/" + name;
+ service = IModule::fromBinder(
ndk::SpAIBinder(AServiceManager_waitForService(serviceName.c_str())));
- ALOGW("%s fromBinder %s %s", __func__, IModule::descriptor, service ? "succ" : "fail");
- *device = new DeviceHalAidl(service);
+ ALOGE_IF(service == nullptr, "%s fromBinder %s failed", __func__, serviceName.c_str());
}
+ // If the service is a nullptr, the device will not be really functional,
+ // but will not crash either.
+ *device = sp<DeviceHalAidl>::make(name, service);
return OK;
}
@@ -63,26 +65,45 @@
if (pids == nullptr) {
return BAD_VALUE;
}
- ALOGE("%s not implemented yet", __func__);
- return INVALID_OPERATION;
+ // The functionality for retrieving debug infos of services is not exposed via the NDK.
+ sp<IServiceManager> sm = defaultServiceManager();
+ if (sm == nullptr) {
+ return NO_INIT;
+ }
+ std::set<pid_t> pidsSet;
+ const auto moduleServiceName = std::string(IModule::descriptor) + "/";
+ auto debugInfos = sm->getServiceDebugInfo();
+ for (const auto& info : debugInfos) {
+ if (info.pid > 0 &&
+ info.name.size() > moduleServiceName.size() && // '>' as there must be instance name
+ info.name.substr(0, moduleServiceName.size()) == moduleServiceName) {
+ pidsSet.insert(info.pid);
+ }
+ }
+ *pids = {pidsSet.begin(), pidsSet.end()};
+ return NO_ERROR;
}
status_t DevicesFactoryHalAidl::setCallbackOnce(sp<DevicesFactoryHalCallback> callback) {
- if (callback == nullptr) {
- return BAD_VALUE;
+ // Dynamic registration of module instances is not supported. The functionality
+ // in the audio server which is related to this callback can be removed together
+ // with HIDL support.
+ ALOG_ASSERT(callback != nullptr);
+ if (callback != nullptr) {
+ callback->onNewDevicesAvailable();
}
- ALOGE("%s not implemented yet", __func__);
- return INVALID_OPERATION;
+ return NO_ERROR;
}
AudioHalVersionInfo DevicesFactoryHalAidl::getHalVersion() const {
int32_t versionNumber = 0;
- if (mIConfig) {
- if (!mIConfig->getInterfaceVersion(&versionNumber).isOk()) {
- ALOGE("%s getInterfaceVersion failed", __func__);
- } else {
- ALOGI("%s getInterfaceVersion %d", __func__, versionNumber);
+ if (mIConfig != 0) {
+ if (ndk::ScopedAStatus status = mIConfig->getInterfaceVersion(&versionNumber);
+ !status.isOk()) {
+ ALOGE("%s getInterfaceVersion failed: %s", __func__, status.getDescription().c_str());
}
+ } else {
+ ALOGW("%s no IConfig instance", __func__);
}
// AIDL does not have minor version, fill 0 for all versions
return AudioHalVersionInfo(AudioHalVersionInfo::Type::AIDL, versionNumber);
diff --git a/media/libaudiohal/impl/DevicesFactoryHalAidl.h b/media/libaudiohal/impl/DevicesFactoryHalAidl.h
index 71138a0..cb627bc 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalAidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalAidl.h
@@ -27,7 +27,6 @@
public:
explicit DevicesFactoryHalAidl(
std::shared_ptr<::aidl::android::hardware::audio::core::IConfig> iConfig);
- void onFirstRef() override;
// Opens a device with the specified name. To close the device, it is
// necessary to release references to the returned object.
diff --git a/media/libaudiohal/impl/EffectConversionHelperAidl.cpp b/media/libaudiohal/impl/EffectConversionHelperAidl.cpp
new file mode 100644
index 0000000..7e25b04
--- /dev/null
+++ b/media/libaudiohal/impl/EffectConversionHelperAidl.cpp
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "EffectConversionHelperAidl"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionCppNdk.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+
+#include <utils/Log.h>
+
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::CommandId;
+using ::aidl::android::hardware::audio::effect::Descriptor;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::media::audio::common::AudioDeviceDescription;
+using ::aidl::android::media::audio::common::AudioMode;
+using ::aidl::android::media::audio::common::AudioSource;
+using android::effect::utils::EffectParamReader;
+using android::effect::utils::EffectParamWriter;
+
+using ::android::status_t;
+
+const std::map<uint32_t /* effect_command_e */, EffectConversionHelperAidl::CommandHandler>
+ EffectConversionHelperAidl::mCommandHandlerMap = {
+ {EFFECT_CMD_INIT, &EffectConversionHelperAidl::handleInit},
+ {EFFECT_CMD_SET_PARAM, &EffectConversionHelperAidl::handleSetParameter},
+ {EFFECT_CMD_GET_PARAM, &EffectConversionHelperAidl::handleGetParameter},
+ {EFFECT_CMD_SET_CONFIG, &EffectConversionHelperAidl::handleSetConfig},
+ {EFFECT_CMD_GET_CONFIG, &EffectConversionHelperAidl::handleGetConfig},
+ {EFFECT_CMD_RESET, &EffectConversionHelperAidl::handleReset},
+ {EFFECT_CMD_ENABLE, &EffectConversionHelperAidl::handleEnable},
+ {EFFECT_CMD_DISABLE, &EffectConversionHelperAidl::handleDisable},
+ {EFFECT_CMD_SET_AUDIO_SOURCE, &EffectConversionHelperAidl::handleSetAudioSource},
+ {EFFECT_CMD_SET_DEVICE, &EffectConversionHelperAidl::handleSetDevice},
+ {EFFECT_CMD_SET_INPUT_DEVICE, &EffectConversionHelperAidl::handleSetDevice},
+ {EFFECT_CMD_SET_VOLUME, &EffectConversionHelperAidl::handleSetVolume},
+ {EFFECT_CMD_OFFLOAD, &EffectConversionHelperAidl::handleSetOffload},
+ {EFFECT_CMD_FIRST_PROPRIETARY, &EffectConversionHelperAidl::handleFirstPriority}};
+
+EffectConversionHelperAidl::EffectConversionHelperAidl(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId, const Descriptor& desc)
+ : mSessionId(sessionId), mIoId(ioId), mDesc(desc), mEffect(std::move(effect)) {
+ mCommon.session = sessionId;
+ mCommon.ioHandle = ioId;
+ mCommon.input = mCommon.output = kDefaultAudioConfig;
+}
+
+status_t EffectConversionHelperAidl::handleCommand(uint32_t cmdCode, uint32_t cmdSize,
+ void* pCmdData, uint32_t* replySize,
+ void* pReplyData) {
+ const auto& handler = mCommandHandlerMap.find(cmdCode);
+ if (handler == mCommandHandlerMap.end() || !handler->second) {
+ ALOGE("%s handler for command %u doesn't exist", __func__, cmdCode);
+ return BAD_VALUE;
+ }
+ return (this->*handler->second)(cmdSize, pCmdData, replySize, pReplyData);
+}
+
+status_t EffectConversionHelperAidl::handleInit(uint32_t cmdSize __unused,
+ const void* pCmdData __unused, uint32_t* replySize,
+ void* pReplyData) {
+ if (!replySize || *replySize < sizeof(int) || !pReplyData) {
+ return BAD_VALUE;
+ }
+
+ return *(status_t*)pReplyData =
+ statusTFromBinderStatus(mEffect->open(mCommon, std::nullopt, &mOpenReturn));
+}
+
+status_t EffectConversionHelperAidl::handleSetParameter(uint32_t cmdSize, const void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ if (cmdSize < sizeof(effect_param_t) || !pCmdData || !replySize ||
+ *replySize < sizeof(int) || !pReplyData) {
+ return BAD_VALUE;
+ }
+
+ auto reader = EffectParamReader(*(effect_param_t*)pCmdData);
+ if (!reader.validateCmdSize(cmdSize)) {
+ ALOGE("%s illegal param %s size %u", __func__, reader.toString().c_str(), cmdSize);
+ return BAD_VALUE;
+ }
+
+ status_t ret = setParameter(reader);
+ EffectParamWriter writer(*(effect_param_t*)pReplyData);
+ writer.setStatus(ret);
+ return *(status_t*)pReplyData = ret;
+}
+
+status_t EffectConversionHelperAidl::handleGetParameter(uint32_t cmdSize, const void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ if (cmdSize < sizeof(effect_param_t) || !pCmdData || !replySize || !pReplyData) {
+ return BAD_VALUE;
+ }
+
+ const auto reader = EffectParamReader(*(effect_param_t*)pCmdData);
+ if (*replySize < sizeof(effect_param_t) + reader.getParameterSize()) {
+ ALOGE("%s illegal param %s, replySize %u", __func__, reader.toString().c_str(), *replySize);
+ return BAD_VALUE;
+ }
+
+ // copy effect_param_t and parameters
+ memcpy(pReplyData, pCmdData, sizeof(effect_param_t) + reader.getParameterSize());
+ auto writer = EffectParamWriter(*(effect_param_t*)pReplyData);
+ status_t ret = getParameter(writer);
+ writer.finishValueWrite();
+ writer.setStatus(ret);
+ *replySize = writer.getTotalSize();
+ if (ret != OK) {
+ ALOGE("%s error ret %d, %s", __func__, ret, writer.toString().c_str());
+ }
+ return ret;
+}
+
+status_t EffectConversionHelperAidl::handleSetConfig(uint32_t cmdSize,
+ const void* pCmdData __unused,
+ uint32_t* replySize, void* pReplyData) {
+ if (!replySize || *replySize != sizeof(int) || !pReplyData ||
+ cmdSize != sizeof(effect_config_t)) {
+ return BAD_VALUE;
+ }
+
+ // TODO: need to implement setConfig with setParameter(common)
+ return *static_cast<int32_t*>(pReplyData) = OK;
+}
+
+status_t EffectConversionHelperAidl::handleGetConfig(uint32_t cmdSize __unused,
+ const void* pCmdData __unused,
+ uint32_t* replySize, void* pReplyData) {
+ if (!replySize || *replySize != sizeof(effect_config_t) || !pReplyData) {
+ ALOGE("%s parameter invalid %p %p", __func__, replySize, pReplyData);
+ return BAD_VALUE;
+ }
+
+ Parameter param;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(
+ Parameter::Id::make<Parameter::Id::commonTag>(Parameter::common), ¶m)));
+
+ const auto& common = param.get<Parameter::common>();
+ effect_config_t* pConfig = (effect_config_t*)pReplyData;
+ pConfig->inputCfg = VALUE_OR_RETURN_STATUS(
+ ::aidl::android::aidl2legacy_AudioConfigBase_buffer_config_t(common.input.base, true));
+ pConfig->outputCfg =
+ VALUE_OR_RETURN_STATUS(::aidl::android::aidl2legacy_AudioConfigBase_buffer_config_t(
+ common.output.base, false));
+ mCommon = common;
+ return OK;
+}
+
+status_t EffectConversionHelperAidl::handleReset(uint32_t cmdSize __unused,
+ const void* pCmdData __unused, uint32_t* replySize,
+ void* pReplyData) {
+ if (!replySize || !pReplyData) {
+ ALOGE("%s parameter invalid %p %p", __func__, replySize, pReplyData);
+ return BAD_VALUE;
+ }
+
+ return statusTFromBinderStatus(mEffect->command(CommandId::RESET));
+}
+
+status_t EffectConversionHelperAidl::handleEnable(uint32_t cmdSize __unused,
+ const void* pCmdData __unused,
+ uint32_t* replySize, void* pReplyData) {
+ if (!replySize || !pReplyData) {
+ ALOGE("%s parameter invalid %p %p", __func__, replySize, pReplyData);
+ return BAD_VALUE;
+ }
+
+ return statusTFromBinderStatus(mEffect->command(CommandId::START));
+}
+
+status_t EffectConversionHelperAidl::handleDisable(uint32_t cmdSize __unused,
+ const void* pCmdData __unused,
+ uint32_t* replySize, void* pReplyData) {
+ if (!replySize || !pReplyData) {
+ ALOGE("%s parameter invalid %p %p", __func__, replySize, pReplyData);
+ return BAD_VALUE;
+ }
+
+ return statusTFromBinderStatus(mEffect->command(CommandId::STOP));
+}
+
+status_t EffectConversionHelperAidl::handleSetAudioSource(uint32_t cmdSize, const void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ if (cmdSize != sizeof(uint32_t) || !pCmdData || !replySize || !pReplyData) {
+ ALOGE("%s parameter invalid %u %p %p %p", __func__, cmdSize, pCmdData, replySize,
+ pReplyData);
+ return BAD_VALUE;
+ }
+
+ audio_source_t source = *(audio_source_t*)pCmdData;
+ AudioSource aidlSource =
+ VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_audio_source_t_AudioSource(source));
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ mEffect->setParameter(Parameter::make<Parameter::source>(aidlSource))));
+ return *static_cast<int32_t*>(pReplyData) = OK;
+}
+
+status_t EffectConversionHelperAidl::handleSetAudioMode(uint32_t cmdSize, const void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ if (cmdSize != sizeof(uint32_t) || !pCmdData || !replySize || !pReplyData) {
+ ALOGE("%s parameter invalid %u %p %p %p", __func__, cmdSize, pCmdData, replySize,
+ pReplyData);
+ return BAD_VALUE;
+ }
+ audio_mode_t mode = *(audio_mode_t *)pCmdData;
+ AudioMode aidlMode =
+ VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_audio_mode_t_AudioMode(mode));
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ mEffect->setParameter(Parameter::make<Parameter::mode>(aidlMode))));
+ return *static_cast<int32_t*>(pReplyData) = OK;
+}
+
+status_t EffectConversionHelperAidl::handleSetDevice(uint32_t cmdSize, const void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ if (cmdSize != sizeof(uint32_t) || !pCmdData || !replySize || !pReplyData) {
+ ALOGE("%s parameter invalid %u %p %p %p", __func__, cmdSize, pCmdData, replySize,
+ pReplyData);
+ return BAD_VALUE;
+ }
+ // TODO: convert from audio_devices_t to std::vector<AudioDeviceDescription>
+ // const auto& legacyDevice = *(uint32_t*)(pCmdData);
+ std::vector<AudioDeviceDescription> aidlDevices;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ mEffect->setParameter(Parameter::make<Parameter::deviceDescription>(aidlDevices))));
+ return *static_cast<int32_t*>(pReplyData) = OK;
+}
+status_t EffectConversionHelperAidl::handleSetVolume(uint32_t cmdSize, const void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ if (cmdSize != 2 * sizeof(uint32_t) || !pCmdData || !replySize || !pReplyData) {
+ ALOGE("%s parameter invalid %u %p %p %p", __func__, cmdSize, pCmdData, replySize,
+ pReplyData);
+ return BAD_VALUE;
+ }
+ Parameter::VolumeStereo volume = {.left = (float)(*(uint32_t*)pCmdData) / (1 << 24),
+ .right = (float)(*(uint32_t*)pCmdData + 1) / (1 << 24)};
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ mEffect->setParameter(Parameter::make<Parameter::volumeStereo>(volume))));
+ return *static_cast<int32_t*>(pReplyData) = OK;
+}
+
+status_t EffectConversionHelperAidl::handleSetOffload(uint32_t cmdSize, const void* pCmdData,
+ uint32_t* replySize, void* pReplyData) {
+ if (cmdSize < sizeof(effect_offload_param_t) || !pCmdData || !replySize || !pReplyData) {
+ ALOGE("%s parameter invalid %u %p %p %p", __func__, cmdSize, pCmdData, replySize,
+ pReplyData);
+ return BAD_VALUE;
+ }
+ // TODO: handle this after effectproxy implemented in libaudiohal
+ return *static_cast<int32_t*>(pReplyData) = OK;
+}
+
+status_t EffectConversionHelperAidl::handleFirstPriority(uint32_t cmdSize __unused,
+ const void* pCmdData __unused,
+ uint32_t* replySize, void* pReplyData) {
+ if (!replySize || !pReplyData) {
+ ALOGE("%s parameter invalid %p %p", __func__, replySize, pReplyData);
+ return BAD_VALUE;
+ }
+
+ // TODO to be implemented
+ return OK;
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/EffectConversionHelperAidl.h b/media/libaudiohal/impl/EffectConversionHelperAidl.h
new file mode 100644
index 0000000..94435c6
--- /dev/null
+++ b/media/libaudiohal/impl/EffectConversionHelperAidl.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <utils/Errors.h>
+
+#include <aidl/android/hardware/audio/effect/BpEffect.h>
+#include <system/audio_effect.h>
+#include <system/audio_effects/audio_effects_utils.h>
+
+namespace android {
+namespace effect {
+
+class EffectConversionHelperAidl {
+ public:
+ status_t handleCommand(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ virtual ~EffectConversionHelperAidl() {}
+
+ protected:
+ const int32_t mSessionId;
+ const int32_t mIoId;
+ const ::aidl::android::hardware::audio::effect::Descriptor mDesc;
+ const std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> mEffect;
+ ::aidl::android::hardware::audio::effect::IEffect::OpenEffectReturn mOpenReturn;
+ ::aidl::android::hardware::audio::effect::Parameter::Common mCommon;
+
+ EffectConversionHelperAidl(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc);
+
+ status_t handleSetParameter(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleGetParameter(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+
+ private:
+ const aidl::android::media::audio::common::AudioFormatDescription kDefaultFormatDescription = {
+ .type = aidl::android::media::audio::common::AudioFormatType::PCM,
+ .pcm = aidl::android::media::audio::common::PcmType::FLOAT_32_BIT};
+
+ static constexpr int kDefaultframeCount = 0x100;
+
+ using AudioChannelLayout = aidl::android::media::audio::common::AudioChannelLayout;
+ const aidl::android::media::audio::common::AudioConfig kDefaultAudioConfig = {
+ .base = {.sampleRate = 44100,
+ .channelMask = AudioChannelLayout::make<AudioChannelLayout::layoutMask>(
+ AudioChannelLayout::LAYOUT_STEREO),
+ .format = kDefaultFormatDescription},
+ .frameCount = kDefaultframeCount};
+ // command handler map
+ typedef status_t (EffectConversionHelperAidl::*CommandHandler)(uint32_t /* cmdSize */,
+ const void* /* pCmdData */,
+ uint32_t* /* replySize */,
+ void* /* pReplyData */);
+ static const std::map<uint32_t /* effect_command_e */, CommandHandler> mCommandHandlerMap;
+
+ status_t handleInit(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleSetConfig(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleGetConfig(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleEnable(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleDisable(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleReset(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleSetAudioSource(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleSetAudioMode(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleSetDevice(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleSetVolume(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleSetOffload(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+ status_t handleFirstPriority(uint32_t cmdSize, const void* pCmdData, uint32_t* replySize,
+ void* pReplyData);
+
+ // implemented by conversion of each effect
+ virtual status_t setParameter(utils::EffectParamReader& param) = 0;
+ virtual status_t getParameter(utils::EffectParamWriter& param) = 0;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/EffectHalAidl.cpp b/media/libaudiohal/impl/EffectHalAidl.cpp
index 31c5ca5..8fa301a 100644
--- a/media/libaudiohal/impl/EffectHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectHalAidl.cpp
@@ -17,33 +17,123 @@
#define LOG_TAG "EffectHalAidl"
//#define LOG_NDEBUG 0
+#include <memory>
+
+#include <error/expected_utils.h>
#include <media/AidlConversionCppNdk.h>
-#include <media/AidlConversionNdk.h>
-#include <media/audiohal/AudioHalUtils.h>
+#include <media/AidlConversionEffect.h>
+#include <media/AidlConversionUtil.h>
+#include <media/audiohal/AudioEffectUuid.h>
#include <media/EffectsFactoryApi.h>
#include <mediautils/TimeCheck.h>
+#include <system/audio.h>
#include <utils/Log.h>
#include "EffectHalAidl.h"
-#include <system/audio.h>
-
#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "effectsAidlConversion/AidlConversionAec.h"
+#include "effectsAidlConversion/AidlConversionAgc2.h"
+#include "effectsAidlConversion/AidlConversionBassBoost.h"
+#include "effectsAidlConversion/AidlConversionDownmix.h"
+#include "effectsAidlConversion/AidlConversionDynamicsProcessing.h"
+#include "effectsAidlConversion/AidlConversionEnvReverb.h"
+#include "effectsAidlConversion/AidlConversionEq.h"
+#include "effectsAidlConversion/AidlConversionHapticGenerator.h"
+#include "effectsAidlConversion/AidlConversionLoudnessEnhancer.h"
+#include "effectsAidlConversion/AidlConversionNoiseSuppression.h"
+#include "effectsAidlConversion/AidlConversionPresetReverb.h"
+#include "effectsAidlConversion/AidlConversionSpatializer.h"
+#include "effectsAidlConversion/AidlConversionVendorExtension.h"
+#include "effectsAidlConversion/AidlConversionVirtualizer.h"
+#include "effectsAidlConversion/AidlConversionVisualizer.h"
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
using ::aidl::android::hardware::audio::effect::CommandId;
using ::aidl::android::hardware::audio::effect::Descriptor;
using ::aidl::android::hardware::audio::effect::IEffect;
-using ::aidl::android::hardware::audio::effect::State;
+using ::aidl::android::hardware::audio::effect::IFactory;
using ::aidl::android::hardware::audio::effect::Parameter;
namespace android {
namespace effect {
-EffectHalAidl::EffectHalAidl(const std::shared_ptr<IEffect>& effect, uint64_t effectId,
- int32_t sessionId, int32_t ioId)
- : mEffectId(effectId), mSessionId(sessionId), mIoId(ioId), mEffect(effect) {}
+EffectHalAidl::EffectHalAidl(
+ const std::shared_ptr<::aidl::android::hardware::audio::effect::IFactory>& factory,
+ const std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect>& effect,
+ uint64_t effectId, int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : mFactory(factory),
+ mEffect(effect),
+ mEffectId(effectId),
+ mSessionId(sessionId),
+ mIoId(ioId),
+ mDesc(desc) {
+ createAidlConversion(effect, sessionId, ioId, desc);
+}
-EffectHalAidl::~EffectHalAidl() {}
+EffectHalAidl::~EffectHalAidl() {
+ if (mFactory) {
+ mFactory->destroyEffect(mEffect);
+ }
+}
+
+status_t EffectHalAidl::createAidlConversion(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc) {
+ const auto& typeUuid = desc.common.id.type;
+ ALOGI("%s create UUID %s", __func__, typeUuid.toString().c_str());
+ if (typeUuid == kAcousticEchoCancelerTypeUUID) {
+ mConversion =
+ std::make_unique<android::effect::AidlConversionAec>(effect, sessionId, ioId, desc);
+ } else if (typeUuid == kAutomaticGainControl2TypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionAgc2>(effect, sessionId, ioId,
+ desc);
+ } else if (typeUuid == kBassBoostTypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionBassBoost>(effect, sessionId,
+ ioId, desc);
+ } else if (typeUuid == kDownmixTypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionDownmix>(effect, sessionId,
+ ioId, desc);
+ } else if (typeUuid == kDynamicsProcessingTypeUUID) {
+ mConversion =
+ std::make_unique<android::effect::AidlConversionDp>(effect, sessionId, ioId, desc);
+ } else if (typeUuid == kEnvReverbTypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionEnvReverb>(effect, sessionId,
+ ioId, desc);
+ } else if (typeUuid == kEqualizerTypeUUID) {
+ mConversion =
+ std::make_unique<android::effect::AidlConversionEq>(effect, sessionId, ioId, desc);
+ } else if (typeUuid == kHapticGeneratorTypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionHapticGenerator>(
+ effect, sessionId, ioId, desc);
+ } else if (typeUuid == kLoudnessEnhancerTypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionLoudnessEnhancer>(
+ effect, sessionId, ioId, desc);
+ } else if (typeUuid == kNoiseSuppressionTypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionNoiseSuppression>(
+ effect, sessionId, ioId, desc);
+ } else if (typeUuid == kPresetReverbTypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionPresetReverb>(
+ effect, sessionId, ioId, desc);
+ } else if (typeUuid == kSpatializerTypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionSpatializer>(
+ effect, sessionId, ioId, desc);
+ } else if (typeUuid == kVirtualizerTypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionVirtualizer>(
+ effect, sessionId, ioId, desc);
+ } else if (typeUuid == kVisualizerTypeUUID) {
+ mConversion = std::make_unique<android::effect::AidlConversionVisualizer>(effect, sessionId,
+ ioId, desc);
+ } else {
+ // For unknown UUID, use vendor extension implementation
+ mConversion = std::make_unique<android::effect::AidlConversionVendorExtension>(
+ effect, sessionId, ioId, desc);
+ }
+ return OK;
+}
status_t EffectHalAidl::setInBuffer(const sp<EffectBufferHalInterface>& buffer) {
if (buffer == nullptr) {
@@ -63,7 +153,7 @@
status_t EffectHalAidl::process() {
ALOGW("%s not implemented yet", __func__);
- // write to input FMQ here?
+ // write to input FMQ here, and wait for statusMQ STATUS_OK
return OK;
}
@@ -73,137 +163,11 @@
return OK;
}
-status_t EffectHalAidl::handleSetConfig(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
- uint32_t* replySize, void* pReplyData) {
- if (pCmdData == NULL || cmdSize != sizeof(effect_config_t) || replySize == NULL ||
- *replySize != sizeof(int32_t) || pReplyData == NULL) {
- ALOGE("%s parameter error code %u", __func__, cmdCode);
- return BAD_VALUE;
- }
-
- *static_cast<int32_t*>(pReplyData) = FAILED_TRANSACTION;
- memcpy(&mConfig, pCmdData, cmdSize);
-
- State state;
- RETURN_IF_BINDER_FAIL(mEffect->getState(&state));
- // effect not open yet, save settings locally
- if (state != State::INIT) {
- effect_config_t* legacyConfig = static_cast<effect_config_t*>(pCmdData);
- // already open, apply latest settings
- Parameter aidlParam;
- Parameter::Common aidlCommon;
- aidlCommon.input.base =
- VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_AudioConfigBase_buffer_config_t(
- legacyConfig->inputCfg, true /* isInput */));
- aidlCommon.output.base =
- VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_AudioConfigBase_buffer_config_t(
- legacyConfig->outputCfg, false /* isInput */));
- aidlCommon.session = mSessionId;
- aidlCommon.ioHandle = mIoId;
- Parameter::Id id;
- id.set<Parameter::Id::commonTag>(Parameter::common);
- aidlParam.set<Parameter::common>(aidlCommon);
- RETURN_IF_BINDER_FAIL(mEffect->setParameter(aidlParam));
- }
- *(int*)pReplyData = 0;
- *static_cast<int32_t*>(pReplyData) = OK;
- return OK;
-}
-
-status_t EffectHalAidl::handleGetConfig(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
- uint32_t* replySize, void* pReplyData) {
- if (pCmdData == NULL || cmdSize == 0 || replySize == NULL ||
- *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
- ALOGE("%s parameter error with cmdCode %d", __func__, cmdCode);
- return BAD_VALUE;
- }
-
- *(effect_config_t*)pReplyData = mConfig;
- return OK;
-}
-
-status_t EffectHalAidl::handleSetParameter(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
- uint32_t* replySize, void* pReplyData) {
- ALOGW("%s not implemented yet", __func__);
- if (pCmdData == NULL || cmdSize == 0 || replySize == NULL ||
- *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
- ALOGE("%s parameter error with cmdCode %d", __func__, cmdCode);
- return BAD_VALUE;
- }
- return OK;
-}
-
-status_t EffectHalAidl::handleGetParameter(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
- uint32_t* replySize, void* pReplyData) {
- ALOGW("%s not implemented yet", __func__);
- if (pCmdData == NULL || cmdSize == 0 || replySize == NULL ||
- *replySize != sizeof(effect_config_t) || pReplyData == NULL) {
- ALOGE("%s parameter error with cmdCode %d", __func__, cmdCode);
- return BAD_VALUE;
- }
- return OK;
-}
-
status_t EffectHalAidl::command(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
uint32_t* replySize, void* pReplyData) {
- ALOGW("%s code %d not implemented yet", __func__, cmdCode);
- ::ndk::ScopedAStatus status;
- switch (cmdCode) {
- case EFFECT_CMD_INIT: {
- // open with default effect_config_t (convert to Parameter.Common)
- IEffect::OpenEffectReturn ret;
- Parameter::Common common;
- RETURN_IF_BINDER_FAIL(mEffect->open(common, std::nullopt, &ret));
- return OK;
- }
- case EFFECT_CMD_SET_CONFIG:
- return handleSetConfig(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
- case EFFECT_CMD_GET_CONFIG:
- return handleGetConfig(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
- case EFFECT_CMD_RESET:
- return mEffect->command(CommandId::RESET).getStatus();
- case EFFECT_CMD_ENABLE:
- return mEffect->command(CommandId::START).getStatus();
- case EFFECT_CMD_DISABLE:
- return mEffect->command(CommandId::STOP).getStatus();
- case EFFECT_CMD_SET_PARAM:
- return handleSetParameter(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
- case EFFECT_CMD_SET_PARAM_DEFERRED:
- case EFFECT_CMD_SET_PARAM_COMMIT:
- // TODO
- return OK;
- case EFFECT_CMD_GET_PARAM:
- return handleGetParameter(cmdCode, cmdSize, pCmdData, replySize, pReplyData);
- case EFFECT_CMD_SET_DEVICE:
- return OK;
- case EFFECT_CMD_SET_VOLUME:
- return OK;
- case EFFECT_CMD_SET_AUDIO_MODE:
- return OK;
- case EFFECT_CMD_SET_CONFIG_REVERSE:
- return OK;
- case EFFECT_CMD_SET_INPUT_DEVICE:
- return OK;
- case EFFECT_CMD_GET_CONFIG_REVERSE:
- return OK;
- case EFFECT_CMD_GET_FEATURE_SUPPORTED_CONFIGS:
- return OK;
- case EFFECT_CMD_GET_FEATURE_CONFIG:
- return OK;
- case EFFECT_CMD_SET_FEATURE_CONFIG:
- return OK;
- case EFFECT_CMD_SET_AUDIO_SOURCE:
- return OK;
- case EFFECT_CMD_OFFLOAD:
- return OK;
- case EFFECT_CMD_DUMP:
- return OK;
- case EFFECT_CMD_FIRST_PROPRIETARY:
- return OK;
- default:
- return INVALID_OPERATION;
- }
- return INVALID_OPERATION;
+ return mConversion
+ ? mConversion->handleCommand(cmdCode, cmdSize, pCmdData, replySize, pReplyData)
+ : INVALID_OPERATION;
}
status_t EffectHalAidl::getDescriptor(effect_descriptor_t* pDescriptor) {
@@ -212,7 +176,7 @@
return BAD_VALUE;
}
Descriptor aidlDesc;
- RETURN_IF_BINDER_FAIL(mEffect->getDescriptor(&aidlDesc));
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getDescriptor(&aidlDesc)));
*pDescriptor = VALUE_OR_RETURN_STATUS(
::aidl::android::aidl2legacy_Descriptor_effect_descriptor(aidlDesc));
@@ -220,9 +184,7 @@
}
status_t EffectHalAidl::close() {
- auto ret = mEffect->close();
- ALOGI("%s %s", __func__, ret.getMessage());
- return ret.getStatus();
+ return statusTFromBinderStatus(mEffect->close());
}
status_t EffectHalAidl::dump(int fd) {
diff --git a/media/libaudiohal/impl/EffectHalAidl.h b/media/libaudiohal/impl/EffectHalAidl.h
index 76bb240..83b644b 100644
--- a/media/libaudiohal/impl/EffectHalAidl.h
+++ b/media/libaudiohal/impl/EffectHalAidl.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2022 The Android Open Source Project
+ * Copyright (C) 2023 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -17,8 +17,12 @@
#pragma once
#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include <aidl/android/hardware/audio/effect/IFactory.h>
#include <media/audiohal/EffectHalInterface.h>
#include <system/audio_effect.h>
+#include <memory>
+
+#include "EffectConversionHelperAidl.h"
namespace android {
namespace effect {
@@ -55,28 +59,35 @@
uint64_t effectId() const override { return mEffectId; }
- private:
- friend class EffectsFactoryHalAidl;
+ const std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> getIEffect() const {
+ return mEffect;
+ }
+ private:
+ friend class sp<EffectHalAidl>;
+
+ const std::shared_ptr<::aidl::android::hardware::audio::effect::IFactory> mFactory;
+ const std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> mEffect;
const uint64_t mEffectId;
const int32_t mSessionId;
const int32_t mIoId;
+ const ::aidl::android::hardware::audio::effect::Descriptor mDesc;
+ std::unique_ptr<EffectConversionHelperAidl> mConversion;
+
sp<EffectBufferHalInterface> mInBuffer, mOutBuffer;
effect_config_t mConfig;
- std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> mEffect;
+ status_t createAidlConversion(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc);
// Can not be constructed directly by clients.
- EffectHalAidl(const std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect>& effect,
- uint64_t effectId, int32_t sessionId, int32_t ioId);
-
- status_t handleSetConfig(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
- uint32_t* replySize, void* pReplyData);
- status_t handleGetConfig(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
- uint32_t* replySize, void* pReplyData);
- status_t handleSetParameter(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
- uint32_t* replySize, void* pReplyData);
- status_t handleGetParameter(uint32_t cmdCode, uint32_t cmdSize, void* pCmdData,
- uint32_t* replySize, void* pReplyData);
+ EffectHalAidl(
+ const std::shared_ptr<::aidl::android::hardware::audio::effect::IFactory>& factory,
+ const std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect>& effect,
+ uint64_t effectId, int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc);
+ bool setEffectReverse(bool reverse);
// The destructor automatically releases the effect.
virtual ~EffectHalAidl();
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index 3956a6c..ed952a3 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -46,9 +46,6 @@
using namespace ::android::hardware::audio::common::CPP_VERSION;
using namespace ::android::hardware::audio::effect::CPP_VERSION;
-#define TIME_CHECK() auto timeCheck = \
- mediautils::makeTimeCheckStatsForClassMethod(getClassName(), __func__)
-
EffectHalHidl::EffectHalHidl(const sp<IEffect>& effect, uint64_t effectId)
: EffectConversionHelperHidl("EffectHalHidl"),
mEffect(effect), mEffectId(effectId), mBuffersChanged(true), mEfGroup(nullptr) {
diff --git a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
index 0039c86..0aae87b 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalAidl.cpp
@@ -21,16 +21,19 @@
//#define LOG_NDEBUG 0
#include <aidl/android/hardware/audio/effect/IFactory.h>
+#include <error/expected_utils.h>
#include <android/binder_manager.h>
#include <media/AidlConversionCppNdk.h>
-#include <media/AidlConversionNdk.h>
-#include <media/audiohal/AudioHalUtils.h>
+#include <media/AidlConversionEffect.h>
+#include <system/audio.h>
#include <utils/Log.h>
#include "EffectBufferHalAidl.h"
#include "EffectHalAidl.h"
#include "EffectsFactoryHalAidl.h"
+using ::aidl::android::legacy2aidl_audio_uuid_t_AudioUuid;
+using aidl::android::aidl_utils::statusTFromBinderStatus;
using aidl::android::hardware::audio::effect::IFactory;
using aidl::android::media::audio::common::AudioUuid;
using android::detail::AudioHalVersionInfo;
@@ -55,7 +58,7 @@
{
std::lock_guard lg(mLock);
- RETURN_IF_NOT_OK(queryEffectList_l());
+ RETURN_STATUS_IF_ERROR(queryEffectList_l());
*pNumEffects = mDescList->size();
}
ALOGI("%s %d", __func__, *pNumEffects);
@@ -68,7 +71,7 @@
}
std::lock_guard lg(mLock);
- RETURN_IF_NOT_OK(queryEffectList_l());
+ RETURN_STATUS_IF_ERROR(queryEffectList_l());
auto listSize = mDescList->size();
if (index >= listSize) {
@@ -87,8 +90,7 @@
return BAD_VALUE;
}
- AudioUuid uuid =
- VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_audio_uuid_t_AudioUuid(*halUuid));
+ AudioUuid uuid = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(*halUuid));
std::lock_guard lg(mLock);
return getHalDescriptorWithImplUuid_l(uuid, pDescriptor);
}
@@ -99,8 +101,7 @@
return BAD_VALUE;
}
- AudioUuid type =
- VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_audio_uuid_t_AudioUuid(*halType));
+ AudioUuid type = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(*halType));
std::lock_guard lg(mLock);
return getHalDescriptorWithTypeUuid_l(type, descriptors);
}
@@ -111,24 +112,29 @@
if (uuid == nullptr || effect == nullptr) {
return BAD_VALUE;
}
- ALOGI("%s session %d ioId %d", __func__, sessionId, ioId);
-
- AudioUuid aidlUuid =
- VALUE_OR_RETURN_STATUS(::aidl::android::legacy2aidl_audio_uuid_t_AudioUuid(*uuid));
- std::shared_ptr<IEffect> aidlEffect;
- ndk::ScopedAStatus status = mFactory->createEffect(aidlUuid, &aidlEffect);
- if (!status.isOk() || aidlEffect == nullptr) {
- ALOGE("%s IFactory::createFactory failed %s UUID %s", __func__,
- status.getDescription().c_str(), aidlUuid.toString().c_str());
+ if (sessionId == AUDIO_SESSION_DEVICE && ioId == AUDIO_IO_HANDLE_NONE) {
return INVALID_OPERATION;
}
+
+ ALOGI("%s session %d ioId %d", __func__, sessionId, ioId);
+
+ AudioUuid aidlUuid = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_uuid_t_AudioUuid(*uuid));
+ std::shared_ptr<IEffect> aidlEffect;
+ Descriptor desc;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mFactory->createEffect(aidlUuid, &aidlEffect)));
+ if (aidlEffect == nullptr) {
+ ALOGE("%s IFactory::createFactory failed UUID %s", __func__, aidlUuid.toString().c_str());
+ return NAME_NOT_FOUND;
+ }
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(aidlEffect->getDescriptor(&desc)));
+
uint64_t effectId;
{
std::lock_guard lg(mLock);
effectId = ++mEffectIdCounter;
}
- *effect = new EffectHalAidl(aidlEffect, effectId, sessionId, ioId);
+ *effect = sp<EffectHalAidl>::make(mFactory, aidlEffect, effectId, sessionId, ioId, desc);
return OK;
}
@@ -174,7 +180,7 @@
return BAD_VALUE;
}
if (!mDescList) {
- RETURN_IF_NOT_OK(queryEffectList_l());
+ RETURN_STATUS_IF_ERROR(queryEffectList_l());
}
auto matchIt = std::find_if(mDescList->begin(), mDescList->end(),
@@ -195,7 +201,7 @@
return BAD_VALUE;
}
if (!mDescList) {
- RETURN_IF_NOT_OK(queryEffectList_l());
+ RETURN_STATUS_IF_ERROR(queryEffectList_l());
}
std::vector<Descriptor> result;
std::copy_if(mDescList->begin(), mDescList->end(), std::back_inserter(result),
diff --git a/media/libaudiohal/impl/StreamHalAidl.cpp b/media/libaudiohal/impl/StreamHalAidl.cpp
new file mode 100644
index 0000000..17b3c2e
--- /dev/null
+++ b/media/libaudiohal/impl/StreamHalAidl.cpp
@@ -0,0 +1,734 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "StreamHalAidl"
+//#define LOG_NDEBUG 0
+
+#include <algorithm>
+#include <cstdint>
+
+#include <audio_utils/clock.h>
+#include <mediautils/TimeCheck.h>
+#include <utils/Log.h>
+
+#include "DeviceHalAidl.h"
+#include "StreamHalAidl.h"
+
+using ::aidl::android::hardware::audio::core::IStreamCommon;
+using ::aidl::android::hardware::audio::core::IStreamIn;
+using ::aidl::android::hardware::audio::core::IStreamOut;
+using ::aidl::android::hardware::audio::core::StreamDescriptor;
+
+namespace android {
+
+using HalCommand = StreamDescriptor::Command;
+namespace {
+template<HalCommand::Tag cmd> HalCommand makeHalCommand() {
+ return HalCommand::make<cmd>(::aidl::android::media::audio::common::Void{});
+}
+template<HalCommand::Tag cmd, typename T> HalCommand makeHalCommand(T data) {
+ return HalCommand::make<cmd>(data);
+}
+} // namespace
+
+// static
+template<class T>
+std::shared_ptr<IStreamCommon> StreamHalAidl::getStreamCommon(const std::shared_ptr<T>& stream) {
+ std::shared_ptr<::aidl::android::hardware::audio::core::IStreamCommon> streamCommon;
+ if (stream != nullptr) {
+ if (ndk::ScopedAStatus status = stream->getStreamCommon(&streamCommon);
+ !status.isOk()) {
+ ALOGE("%s: failed to retrieve IStreamCommon instance: %s", __func__,
+ status.getDescription().c_str());
+ }
+ }
+ return streamCommon;
+}
+
+StreamHalAidl::StreamHalAidl(
+ std::string_view className, bool isInput, const audio_config& config,
+ int32_t nominalLatency, StreamContextAidl&& context,
+ const std::shared_ptr<IStreamCommon>& stream)
+ : ConversionHelperAidl(className),
+ mIsInput(isInput),
+ mConfig(configToBase(config)),
+ mContext(std::move(context)),
+ mStream(stream) {
+ {
+ std::lock_guard l(mLock);
+ mLastReply.latencyMs = nominalLatency;
+ }
+ // Instrument audio signal power logging.
+ // Note: This assumes channel mask, format, and sample rate do not change after creation.
+ if (audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ /* mStreamPowerLog.isUserDebugOrEngBuild() && */
+ StreamHalAidl::getAudioProperties(&config) == NO_ERROR) {
+ mStreamPowerLog.init(config.sample_rate, config.channel_mask, config.format);
+ }
+}
+
+StreamHalAidl::~StreamHalAidl() {
+ if (mStream != nullptr) {
+ ndk::ScopedAStatus status = mStream->close();
+ ALOGE_IF(!status.isOk(), "%s: status %s", __func__, status.getDescription().c_str());
+ }
+}
+
+status_t StreamHalAidl::getBufferSize(size_t *size) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ if (size == nullptr) {
+ return BAD_VALUE;
+ }
+ if (mContext.getFrameSizeBytes() == 0 || mContext.getBufferSizeFrames() == 0 ||
+ !mStream) {
+ return NO_INIT;
+ }
+ *size = mContext.getBufferSizeBytes();
+ return OK;
+}
+
+status_t StreamHalAidl::getAudioProperties(audio_config_base_t *configBase) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ if (configBase == nullptr) {
+ return BAD_VALUE;
+ }
+ if (!mStream) return NO_INIT;
+ *configBase = mConfig;
+ return OK;
+}
+
+status_t StreamHalAidl::setParameters(const String8& kvPairs __unused) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamHalAidl::getParameters(const String8& keys __unused, String8 *values) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ values->clear();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamHalAidl::getFrameSize(size_t *size) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ if (size == nullptr) {
+ return BAD_VALUE;
+ }
+ if (mContext.getFrameSizeBytes() == 0 || !mStream) {
+ return NO_INIT;
+ }
+ *size = mContext.getFrameSizeBytes();
+ return OK;
+}
+
+status_t StreamHalAidl::addEffect(sp<EffectHalInterface> effect __unused) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamHalAidl::removeEffect(sp<EffectHalInterface> effect __unused) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamHalAidl::standby() {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ const auto state = getState();
+ StreamDescriptor::Reply reply;
+ switch (state) {
+ case StreamDescriptor::State::ACTIVE:
+ if (status_t status = pause(&reply); status != OK) return status;
+ if (reply.state != StreamDescriptor::State::PAUSED) {
+ ALOGE("%s: unexpected stream state: %s (expected PAUSED)",
+ __func__, toString(reply.state).c_str());
+ return INVALID_OPERATION;
+ }
+ FALLTHROUGH_INTENDED;
+ case StreamDescriptor::State::PAUSED:
+ case StreamDescriptor::State::DRAIN_PAUSED:
+ return flush();
+ case StreamDescriptor::State::IDLE:
+ if (status_t status = sendCommand(makeHalCommand<HalCommand::Tag::standby>(),
+ &reply); status != OK) {
+ return status;
+ }
+ if (reply.state != StreamDescriptor::State::STANDBY) {
+ ALOGE("%s: unexpected stream state: %s (expected STANDBY)",
+ __func__, toString(reply.state).c_str());
+ return INVALID_OPERATION;
+ }
+ FALLTHROUGH_INTENDED;
+ case StreamDescriptor::State::STANDBY:
+ return OK;
+ default:
+ ALOGE("%s: not supported from %s stream state %s",
+ __func__, mIsInput ? "input" : "output", toString(state).c_str());
+ return INVALID_OPERATION;
+ }
+}
+
+status_t StreamHalAidl::dump(int fd, const Vector<String16>& args) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ return mStream->dump(fd, Args(args).args(), args.size());
+}
+
+status_t StreamHalAidl::start() {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamHalAidl::stop() {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamHalAidl::getLatency(uint32_t *latency) {
+ ALOGV("%p %s::%s", this, getClassName().c_str(), __func__);
+ if (!mStream) return NO_INIT;
+ StreamDescriptor::Reply reply;
+ if (status_t status = updateCountersIfNeeded(&reply); status != OK) {
+ return status;
+ }
+ *latency = std::max<int32_t>(0, reply.latencyMs);
+ return OK;
+}
+
+status_t StreamHalAidl::getObservablePosition(int64_t *frames, int64_t *timestamp) {
+ ALOGV("%p %s::%s", this, getClassName().c_str(), __func__);
+ if (!mStream) return NO_INIT;
+ StreamDescriptor::Reply reply;
+ if (status_t status = updateCountersIfNeeded(&reply); status != OK) {
+ return status;
+ }
+ *frames = reply.observable.frames;
+ *timestamp = reply.observable.timeNs;
+ return OK;
+}
+
+status_t StreamHalAidl::getXruns(int32_t *frames) {
+ ALOGV("%p %s::%s", this, getClassName().c_str(), __func__);
+ if (!mStream) return NO_INIT;
+ StreamDescriptor::Reply reply;
+ if (status_t status = updateCountersIfNeeded(&reply); status != OK) {
+ return status;
+ }
+ *frames = reply.xrunFrames;
+ return OK;
+}
+
+status_t StreamHalAidl::transfer(void *buffer, size_t bytes, size_t *transferred) {
+ ALOGV("%p %s::%s", this, getClassName().c_str(), __func__);
+ // TIME_CHECK(); // TODO(b/238654698) reenable only when optimized.
+ if (!mStream || mContext.getDataMQ() == nullptr) return NO_INIT;
+ mWorkerTid.store(gettid(), std::memory_order_release);
+ // Switch the stream into an active state if needed.
+ // Note: in future we may add support for priming the audio pipeline
+ // with data prior to enabling output (thus we can issue a "burst" command in the "standby"
+ // stream state), however this scenario wasn't supported by the HIDL HAL.
+ if (getState() == StreamDescriptor::State::STANDBY) {
+ StreamDescriptor::Reply reply;
+ if (status_t status = sendCommand(makeHalCommand<HalCommand::Tag::start>(), &reply);
+ status != OK) {
+ return status;
+ }
+ if (reply.state != StreamDescriptor::State::IDLE) {
+ ALOGE("%s: failed to get the stream out of standby, actual state: %s",
+ __func__, toString(reply.state).c_str());
+ return INVALID_OPERATION;
+ }
+ }
+ if (!mIsInput) {
+ bytes = std::min(bytes, mContext.getDataMQ()->availableToWrite());
+ }
+ StreamDescriptor::Command burst =
+ StreamDescriptor::Command::make<StreamDescriptor::Command::Tag::burst>(bytes);
+ if (!mIsInput) {
+ if (!mContext.getDataMQ()->write(static_cast<const int8_t*>(buffer), bytes)) {
+ ALOGE("%s: failed to write %zu bytes to data MQ", __func__, bytes);
+ return NOT_ENOUGH_DATA;
+ }
+ }
+ StreamDescriptor::Reply reply;
+ if (status_t status = sendCommand(burst, &reply); status != OK) {
+ return status;
+ }
+ *transferred = reply.fmqByteCount;
+ if (mIsInput) {
+ LOG_ALWAYS_FATAL_IF(*transferred > bytes,
+ "%s: HAL module read %zu bytes, which exceeds requested count %zu",
+ __func__, *transferred, bytes);
+ if (!mContext.getDataMQ()->read(static_cast<int8_t*>(buffer),
+ mContext.getDataMQ()->availableToRead())) {
+ ALOGE("%s: failed to read %zu bytes to data MQ", __func__, *transferred);
+ return NOT_ENOUGH_DATA;
+ }
+ }
+ mStreamPowerLog.log(buffer, *transferred);
+ return OK;
+}
+
+status_t StreamHalAidl::pause(StreamDescriptor::Reply* reply) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ return sendCommand(makeHalCommand<HalCommand::Tag::pause>(), reply,
+ true /*safeFromNonWorkerThread*/); // The workers stops its I/O activity first.
+}
+
+status_t StreamHalAidl::resume(StreamDescriptor::Reply* reply) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ if (mIsInput) {
+ return sendCommand(makeHalCommand<HalCommand::Tag::burst>(0), reply);
+ } else {
+ return sendCommand(makeHalCommand<HalCommand::Tag::start>(), reply);
+ }
+}
+
+status_t StreamHalAidl::drain(bool earlyNotify, StreamDescriptor::Reply* reply) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ return sendCommand(makeHalCommand<HalCommand::Tag::drain>(
+ mIsInput ? StreamDescriptor::DrainMode::DRAIN_UNSPECIFIED :
+ earlyNotify ? StreamDescriptor::DrainMode::DRAIN_EARLY_NOTIFY :
+ StreamDescriptor::DrainMode::DRAIN_ALL), reply);
+}
+
+status_t StreamHalAidl::flush(StreamDescriptor::Reply* reply) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ return sendCommand(makeHalCommand<HalCommand::Tag::flush>(), reply,
+ true /*safeFromNonWorkerThread*/); // The workers stops its I/O activity first.
+}
+
+status_t StreamHalAidl::exit() {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamHalAidl::createMmapBuffer(int32_t minSizeFrames __unused,
+ struct audio_mmap_buffer_info *info __unused) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamHalAidl::getMmapPosition(struct audio_mmap_position *position __unused) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamHalAidl::setHalThreadPriority(int priority __unused) {
+ // Obsolete, must be done by the HAL module.
+ return OK;
+}
+
+status_t StreamHalAidl::getHalPid(pid_t *pid __unused) {
+ ALOGD("%p %s::%s", this, getClassName().c_str(), __func__);
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+bool StreamHalAidl::requestHalThreadPriority(pid_t threadPid __unused, pid_t threadId __unused) {
+ // Obsolete, must be done by the HAL module.
+ return true;
+}
+
+status_t StreamHalAidl::legacyCreateAudioPatch(const struct audio_port_config& port __unused,
+ std::optional<audio_source_t> source __unused,
+ audio_devices_t type __unused) {
+ // Obsolete since 'DeviceHalAidl.supportsAudioPatches' always returns 'true'.
+ return INVALID_OPERATION;
+}
+
+status_t StreamHalAidl::legacyReleaseAudioPatch() {
+ // Obsolete since 'DeviceHalAidl.supportsAudioPatches' always returns 'true'.
+ return INVALID_OPERATION;
+}
+
+status_t StreamHalAidl::sendCommand(
+ const ::aidl::android::hardware::audio::core::StreamDescriptor::Command &command,
+ ::aidl::android::hardware::audio::core::StreamDescriptor::Reply* reply,
+ bool safeFromNonWorkerThread) {
+ // TIME_CHECK(); // TODO(b/238654698) reenable only when optimized.
+ if (!safeFromNonWorkerThread) {
+ const pid_t workerTid = mWorkerTid.load(std::memory_order_acquire);
+ LOG_ALWAYS_FATAL_IF(workerTid != gettid(),
+ "%s %s: must be invoked from the worker thread (%d)",
+ __func__, command.toString().c_str(), workerTid);
+ }
+ if (!mContext.getCommandMQ()->writeBlocking(&command, 1)) {
+ ALOGE("%s: failed to write command %s to MQ", __func__, command.toString().c_str());
+ return NOT_ENOUGH_DATA;
+ }
+ StreamDescriptor::Reply localReply{};
+ if (reply == nullptr) {
+ reply = &localReply;
+ }
+ if (!mContext.getReplyMQ()->readBlocking(reply, 1)) {
+ ALOGE("%s: failed to read from reply MQ, command %s", __func__, command.toString().c_str());
+ return NOT_ENOUGH_DATA;
+ }
+ {
+ std::lock_guard l(mLock);
+ mLastReply = *reply;
+ }
+ switch (reply->status) {
+ case STATUS_OK: return OK;
+ case STATUS_BAD_VALUE: return BAD_VALUE;
+ case STATUS_INVALID_OPERATION: return INVALID_OPERATION;
+ case STATUS_NOT_ENOUGH_DATA: return NOT_ENOUGH_DATA;
+ default:
+ ALOGE("%s: unexpected status %d returned for command %s",
+ __func__, reply->status, command.toString().c_str());
+ return INVALID_OPERATION;
+ }
+}
+
+status_t StreamHalAidl::updateCountersIfNeeded(
+ ::aidl::android::hardware::audio::core::StreamDescriptor::Reply* reply) {
+ if (mWorkerTid.load(std::memory_order_acquire) == gettid()) {
+ if (const auto state = getState(); state != StreamDescriptor::State::ACTIVE &&
+ state != StreamDescriptor::State::DRAINING &&
+ state != StreamDescriptor::State::TRANSFERRING) {
+ return sendCommand(makeHalCommand<HalCommand::Tag::getStatus>(), reply);
+ }
+ }
+ if (reply != nullptr) {
+ std::lock_guard l(mLock);
+ *reply = mLastReply;
+ }
+ return OK;
+}
+
+StreamOutHalAidl::StreamOutHalAidl(
+ const audio_config& config, StreamContextAidl&& context, int32_t nominalLatency,
+ const std::shared_ptr<IStreamOut>& stream, const sp<CallbackBroker>& callbackBroker)
+ : StreamHalAidl("StreamOutHalAidl", false /*isInput*/, config, nominalLatency,
+ std::move(context), getStreamCommon(stream)),
+ mStream(stream), mCallbackBroker(callbackBroker) {}
+
+StreamOutHalAidl::~StreamOutHalAidl() {
+ if (auto broker = mCallbackBroker.promote(); broker != nullptr) {
+ broker->clearCallbacks(this);
+ }
+}
+
+status_t StreamOutHalAidl::getLatency(uint32_t *latency) {
+ return StreamHalAidl::getLatency(latency);
+}
+
+status_t StreamOutHalAidl::setVolume(float left __unused, float right __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamOutHalAidl::selectPresentation(int presentationId __unused, int programId __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamOutHalAidl::write(const void *buffer, size_t bytes, size_t *written) {
+ if (buffer == nullptr || written == nullptr) {
+ return BAD_VALUE;
+ }
+ // For the output scenario, 'transfer' does not modify the buffer.
+ return transfer(const_cast<void*>(buffer), bytes, written);
+}
+
+status_t StreamOutHalAidl::getRenderPosition(uint32_t *dspFrames) {
+ if (dspFrames == nullptr) {
+ return BAD_VALUE;
+ }
+ int64_t aidlFrames = 0, aidlTimestamp = 0;
+ if (status_t status = getObservablePosition(&aidlFrames, &aidlTimestamp); status != OK) {
+ return OK;
+ }
+ *dspFrames = std::clamp<int64_t>(aidlFrames, 0, UINT32_MAX);
+ return OK;
+}
+
+status_t StreamOutHalAidl::getNextWriteTimestamp(int64_t *timestamp __unused) {
+ // Obsolete, use getPresentationPosition.
+ return INVALID_OPERATION;
+}
+
+status_t StreamOutHalAidl::setCallback(wp<StreamOutHalInterfaceCallback> callback) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ if (auto broker = mCallbackBroker.promote(); broker != nullptr) {
+ if (auto cb = callback.promote(); cb != nullptr) {
+ broker->setStreamOutCallback(this, cb);
+ } else {
+ // It is expected that the framework never passes a null pointer.
+ // In the AIDL model callbacks can't be "unregistered".
+ LOG_ALWAYS_FATAL("%s: received an expired or null callback pointer", __func__);
+ }
+ }
+ return OK;
+}
+
+status_t StreamOutHalAidl::supportsPauseAndResume(bool *supportsPause, bool *supportsResume) {
+ if (supportsPause == nullptr || supportsResume == nullptr) {
+ return BAD_VALUE;
+ }
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ *supportsPause = *supportsResume = true;
+ return OK;
+}
+
+status_t StreamOutHalAidl::pause() {
+ return StreamHalAidl::pause();
+}
+
+status_t StreamOutHalAidl::resume() {
+ return StreamHalAidl::resume();
+}
+
+status_t StreamOutHalAidl::supportsDrain(bool *supportsDrain) {
+ if (supportsDrain == nullptr) {
+ return BAD_VALUE;
+ }
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ *supportsDrain = true;
+ return OK;
+}
+
+status_t StreamOutHalAidl::drain(bool earlyNotify) {
+ return StreamHalAidl::drain(earlyNotify);
+}
+
+status_t StreamOutHalAidl::flush() {
+ return StreamHalAidl::flush();
+}
+
+status_t StreamOutHalAidl::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) {
+ if (frames == nullptr || timestamp == nullptr) {
+ return BAD_VALUE;
+ }
+ int64_t aidlFrames = 0, aidlTimestamp = 0;
+ if (status_t status = getObservablePosition(&aidlFrames, &aidlTimestamp); status != OK) {
+ return status;
+ }
+ *frames = std::max<int64_t>(0, aidlFrames);
+ timestamp->tv_sec = aidlTimestamp / NANOS_PER_SECOND;
+ timestamp->tv_nsec = aidlTimestamp - timestamp->tv_sec * NANOS_PER_SECOND;
+ return OK;
+}
+
+status_t StreamOutHalAidl::updateSourceMetadata(
+ const StreamOutHalInterface::SourceMetadata& sourceMetadata __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamOutHalAidl::getDualMonoMode(audio_dual_mono_mode_t* mode __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamOutHalAidl::setDualMonoMode(audio_dual_mono_mode_t mode __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamOutHalAidl::getAudioDescriptionMixLevel(float* leveldB __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamOutHalAidl::setAudioDescriptionMixLevel(float leveldB __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamOutHalAidl::getPlaybackRateParameters(
+ audio_playback_rate_t* playbackRate __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamOutHalAidl::setPlaybackRateParameters(
+ const audio_playback_rate_t& playbackRate __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamOutHalAidl::setEventCallback(
+ const sp<StreamOutHalInterfaceEventCallback>& callback) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ if (auto broker = mCallbackBroker.promote(); broker != nullptr) {
+ broker->setStreamOutEventCallback(this, callback);
+ }
+ return OK;
+}
+
+status_t StreamOutHalAidl::setLatencyMode(audio_latency_mode_t mode __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+};
+
+status_t StreamOutHalAidl::getRecommendedLatencyModes(
+ std::vector<audio_latency_mode_t> *modes __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+};
+
+status_t StreamOutHalAidl::setLatencyModeCallback(
+ const sp<StreamOutHalInterfaceLatencyModeCallback>& callback) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ if (auto broker = mCallbackBroker.promote(); broker != nullptr) {
+ broker->setStreamOutLatencyModeCallback(this, callback);
+ }
+ return OK;
+};
+
+status_t StreamOutHalAidl::exit() {
+ return StreamHalAidl::exit();
+}
+
+StreamInHalAidl::StreamInHalAidl(
+ const audio_config& config, StreamContextAidl&& context, int32_t nominalLatency,
+ const std::shared_ptr<IStreamIn>& stream)
+ : StreamHalAidl("StreamInHalAidl", true /*isInput*/, config, nominalLatency,
+ std::move(context), getStreamCommon(stream)),
+ mStream(stream) {}
+
+status_t StreamInHalAidl::setGain(float gain __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamInHalAidl::read(void *buffer, size_t bytes, size_t *read) {
+ if (buffer == nullptr || read == nullptr) {
+ return BAD_VALUE;
+ }
+ return transfer(buffer, bytes, read);
+}
+
+status_t StreamInHalAidl::getInputFramesLost(uint32_t *framesLost) {
+ if (framesLost == nullptr) {
+ return BAD_VALUE;
+ }
+ int32_t aidlXruns = 0;
+ if (status_t status = getXruns(&aidlXruns); status != OK) {
+ return status;
+ }
+ *framesLost = std::max<int32_t>(0, aidlXruns);
+ return OK;
+}
+
+status_t StreamInHalAidl::getCapturePosition(int64_t *frames, int64_t *time) {
+ if (frames == nullptr || time == nullptr) {
+ return BAD_VALUE;
+ }
+ return getObservablePosition(frames, time);
+}
+
+status_t StreamInHalAidl::getActiveMicrophones(
+ std::vector<media::MicrophoneInfo> *microphones __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamInHalAidl::updateSinkMetadata(
+ const StreamInHalInterface::SinkMetadata& sinkMetadata __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamInHalAidl::setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+status_t StreamInHalAidl::setPreferredMicrophoneFieldDimension(float zoom __unused) {
+ TIME_CHECK();
+ if (!mStream) return NO_INIT;
+ ALOGE("%s not implemented yet", __func__);
+ return OK;
+}
+
+} // namespace android
diff --git a/media/libaudiohal/impl/StreamHalAidl.h b/media/libaudiohal/impl/StreamHalAidl.h
new file mode 100644
index 0000000..162c7bc
--- /dev/null
+++ b/media/libaudiohal/impl/StreamHalAidl.h
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <atomic>
+#include <memory>
+#include <mutex>
+#include <string_view>
+
+#include <aidl/android/hardware/audio/core/BpStreamCommon.h>
+#include <aidl/android/hardware/audio/core/BpStreamIn.h>
+#include <aidl/android/hardware/audio/core/BpStreamOut.h>
+#include <fmq/AidlMessageQueue.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <media/audiohal/StreamHalInterface.h>
+
+#include "ConversionHelperAidl.h"
+#include "StreamPowerLog.h"
+
+namespace android {
+
+class StreamContextAidl {
+ public:
+ typedef AidlMessageQueue<::aidl::android::hardware::audio::core::StreamDescriptor::Command,
+ ::aidl::android::hardware::common::fmq::SynchronizedReadWrite> CommandMQ;
+ typedef AidlMessageQueue<::aidl::android::hardware::audio::core::StreamDescriptor::Reply,
+ ::aidl::android::hardware::common::fmq::SynchronizedReadWrite> ReplyMQ;
+ typedef AidlMessageQueue<int8_t,
+ ::aidl::android::hardware::common::fmq::SynchronizedReadWrite> DataMQ;
+
+ explicit StreamContextAidl(
+ const ::aidl::android::hardware::audio::core::StreamDescriptor& descriptor)
+ : mFrameSizeBytes(descriptor.frameSizeBytes),
+ mCommandMQ(new CommandMQ(descriptor.command)),
+ mReplyMQ(new ReplyMQ(descriptor.reply)),
+ mBufferSizeFrames(descriptor.bufferSizeFrames),
+ mDataMQ(maybeCreateDataMQ(descriptor)) {}
+ StreamContextAidl(StreamContextAidl&& other) :
+ mFrameSizeBytes(other.mFrameSizeBytes),
+ mCommandMQ(std::move(other.mCommandMQ)),
+ mReplyMQ(std::move(other.mReplyMQ)),
+ mBufferSizeFrames(other.mBufferSizeFrames),
+ mDataMQ(std::move(other.mDataMQ)) {}
+ StreamContextAidl& operator=(StreamContextAidl&& other) {
+ mFrameSizeBytes = other.mFrameSizeBytes;
+ mCommandMQ = std::move(other.mCommandMQ);
+ mReplyMQ = std::move(other.mReplyMQ);
+ mBufferSizeFrames = other.mBufferSizeFrames;
+ mDataMQ = std::move(other.mDataMQ);
+ return *this;
+ }
+ bool isValid() const {
+ return mFrameSizeBytes != 0 &&
+ mCommandMQ != nullptr && mCommandMQ->isValid() &&
+ mReplyMQ != nullptr && mReplyMQ->isValid() &&
+ (mDataMQ != nullptr || (
+ mDataMQ->isValid() &&
+ mDataMQ->getQuantumCount() * mDataMQ->getQuantumSize() >=
+ mFrameSizeBytes * mBufferSizeFrames));
+ }
+ size_t getBufferSizeBytes() const { return mFrameSizeBytes * mBufferSizeFrames; }
+ size_t getBufferSizeFrames() const { return mBufferSizeFrames; }
+ CommandMQ* getCommandMQ() const { return mCommandMQ.get(); }
+ DataMQ* getDataMQ() const { return mDataMQ.get(); }
+ size_t getFrameSizeBytes() const { return mFrameSizeBytes; }
+ ReplyMQ* getReplyMQ() const { return mReplyMQ.get(); }
+
+ private:
+ static std::unique_ptr<DataMQ> maybeCreateDataMQ(
+ const ::aidl::android::hardware::audio::core::StreamDescriptor& descriptor) {
+ using Tag = ::aidl::android::hardware::audio::core::StreamDescriptor::AudioBuffer::Tag;
+ if (descriptor.audio.getTag() == Tag::fmq) {
+ return std::make_unique<DataMQ>(descriptor.audio.get<Tag::fmq>());
+ }
+ return nullptr;
+ }
+
+ size_t mFrameSizeBytes;
+ std::unique_ptr<CommandMQ> mCommandMQ;
+ std::unique_ptr<ReplyMQ> mReplyMQ;
+ size_t mBufferSizeFrames;
+ std::unique_ptr<DataMQ> mDataMQ;
+};
+
+class StreamHalAidl : public virtual StreamHalInterface, public ConversionHelperAidl {
+ public:
+ // Return size of input/output buffer in bytes for this stream - eg. 4800.
+ status_t getBufferSize(size_t *size) override;
+
+ // Return the base configuration of the stream:
+ // - channel mask;
+ // - format - e.g. AUDIO_FORMAT_PCM_16_BIT;
+ // - sampling rate in Hz - eg. 44100.
+ status_t getAudioProperties(audio_config_base_t *configBase) override;
+
+ // Set audio stream parameters.
+ status_t setParameters(const String8& kvPairs) override;
+
+ // Get audio stream parameters.
+ status_t getParameters(const String8& keys, String8 *values) override;
+
+ // Return the frame size (number of bytes per sample) of a stream.
+ status_t getFrameSize(size_t *size) override;
+
+ // Add or remove the effect on the stream.
+ status_t addEffect(sp<EffectHalInterface> effect) override;
+ status_t removeEffect(sp<EffectHalInterface> effect) override;
+
+ // Put the audio hardware input/output into standby mode.
+ status_t standby() override;
+
+ status_t dump(int fd, const Vector<String16>& args) override;
+
+ // Start a stream operating in mmap mode.
+ status_t start() override;
+
+ // Stop a stream operating in mmap mode.
+ status_t stop() override;
+
+ // Retrieve information on the data buffer in mmap mode.
+ status_t createMmapBuffer(int32_t minSizeFrames,
+ struct audio_mmap_buffer_info *info) override;
+
+ // Get current read/write position in the mmap buffer
+ status_t getMmapPosition(struct audio_mmap_position *position) override;
+
+ // Set the priority of the thread that interacts with the HAL
+ // (must match the priority of the audioflinger's thread that calls 'read' / 'write')
+ status_t setHalThreadPriority(int priority) override;
+
+ status_t legacyCreateAudioPatch(const struct audio_port_config& port,
+ std::optional<audio_source_t> source,
+ audio_devices_t type) override;
+
+ status_t legacyReleaseAudioPatch() override;
+
+ protected:
+ template<class T>
+ static std::shared_ptr<::aidl::android::hardware::audio::core::IStreamCommon> getStreamCommon(
+ const std::shared_ptr<T>& stream);
+
+ // Subclasses can not be constructed directly by clients.
+ StreamHalAidl(std::string_view className,
+ bool isInput,
+ const audio_config& config,
+ int32_t nominalLatency,
+ StreamContextAidl&& context,
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IStreamCommon>& stream);
+
+ ~StreamHalAidl() override;
+
+ status_t getHalPid(pid_t *pid);
+
+ bool requestHalThreadPriority(pid_t threadPid, pid_t threadId);
+
+ status_t getLatency(uint32_t *latency);
+
+ status_t getObservablePosition(int64_t *frames, int64_t *timestamp);
+
+ status_t getXruns(int32_t *frames);
+
+ status_t transfer(void *buffer, size_t bytes, size_t *transferred);
+
+ status_t pause(
+ ::aidl::android::hardware::audio::core::StreamDescriptor::Reply* reply = nullptr);
+
+ status_t resume(
+ ::aidl::android::hardware::audio::core::StreamDescriptor::Reply* reply = nullptr);
+
+ status_t drain(bool earlyNotify,
+ ::aidl::android::hardware::audio::core::StreamDescriptor::Reply* reply = nullptr);
+
+ status_t flush(
+ ::aidl::android::hardware::audio::core::StreamDescriptor::Reply* reply = nullptr);
+
+ status_t exit();
+
+ const bool mIsInput;
+ const audio_config_base_t mConfig;
+ const StreamContextAidl mContext;
+
+ private:
+ static audio_config_base_t configToBase(const audio_config& config) {
+ audio_config_base_t result = AUDIO_CONFIG_BASE_INITIALIZER;
+ result.sample_rate = config.sample_rate;
+ result.channel_mask = config.channel_mask;
+ result.format = config.format;
+ return result;
+ }
+ ::aidl::android::hardware::audio::core::StreamDescriptor::State getState() {
+ std::lock_guard l(mLock);
+ return mLastReply.state;
+ }
+ status_t sendCommand(
+ const ::aidl::android::hardware::audio::core::StreamDescriptor::Command &command,
+ ::aidl::android::hardware::audio::core::StreamDescriptor::Reply* reply = nullptr,
+ bool safeFromNonWorkerThread = false);
+ status_t updateCountersIfNeeded(
+ ::aidl::android::hardware::audio::core::StreamDescriptor::Reply* reply = nullptr);
+
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IStreamCommon> mStream;
+ std::mutex mLock;
+ ::aidl::android::hardware::audio::core::StreamDescriptor::Reply mLastReply GUARDED_BY(mLock);
+ // mStreamPowerLog is used for audio signal power logging.
+ StreamPowerLog mStreamPowerLog;
+ std::atomic<pid_t> mWorkerTid = -1;
+};
+
+class CallbackBroker;
+
+class StreamOutHalAidl : public StreamOutHalInterface, public StreamHalAidl {
+ public:
+ // Return the audio hardware driver estimated latency in milliseconds.
+ status_t getLatency(uint32_t *latency) override;
+
+ // Use this method in situations where audio mixing is done in the hardware.
+ status_t setVolume(float left, float right) override;
+
+ // Selects the audio presentation (if available).
+ status_t selectPresentation(int presentationId, int programId) override;
+
+ // Write audio buffer to driver.
+ status_t write(const void *buffer, size_t bytes, size_t *written) override;
+
+ // Return the number of audio frames written by the audio dsp to DAC since
+ // the output has exited standby.
+ status_t getRenderPosition(uint32_t *dspFrames) override;
+
+ // Get the local time at which the next write to the audio driver will be presented.
+ status_t getNextWriteTimestamp(int64_t *timestamp) override;
+
+ // Set the callback for notifying completion of non-blocking write and drain.
+ status_t setCallback(wp<StreamOutHalInterfaceCallback> callback) override;
+
+ // Returns whether pause and resume operations are supported.
+ status_t supportsPauseAndResume(bool *supportsPause, bool *supportsResume) override;
+
+ // Notifies to the audio driver to resume playback following a pause.
+ status_t pause() override;
+
+ // Notifies to the audio driver to resume playback following a pause.
+ status_t resume() override;
+
+ // Returns whether drain operation is supported.
+ status_t supportsDrain(bool *supportsDrain) override;
+
+ // Requests notification when data buffered by the driver/hardware has been played.
+ status_t drain(bool earlyNotify) override;
+
+ // Notifies to the audio driver to flush the queued data.
+ status_t flush() override;
+
+ // Return a recent count of the number of audio frames presented to an external observer.
+ status_t getPresentationPosition(uint64_t *frames, struct timespec *timestamp) override;
+
+ // Called when the metadata of the stream's source has been changed.
+ status_t updateSourceMetadata(const SourceMetadata& sourceMetadata) override;
+
+ // Returns the Dual Mono mode presentation setting.
+ status_t getDualMonoMode(audio_dual_mono_mode_t* mode) override;
+
+ // Sets the Dual Mono mode presentation on the output device.
+ status_t setDualMonoMode(audio_dual_mono_mode_t mode) override;
+
+ // Returns the Audio Description Mix level in dB.
+ status_t getAudioDescriptionMixLevel(float* leveldB) override;
+
+ // Sets the Audio Description Mix level in dB.
+ status_t setAudioDescriptionMixLevel(float leveldB) override;
+
+ // Retrieves current playback rate parameters.
+ status_t getPlaybackRateParameters(audio_playback_rate_t* playbackRate) override;
+
+ // Sets the playback rate parameters that control playback behavior.
+ status_t setPlaybackRateParameters(const audio_playback_rate_t& playbackRate) override;
+
+ status_t setEventCallback(const sp<StreamOutHalInterfaceEventCallback>& callback) override;
+
+ status_t setLatencyMode(audio_latency_mode_t mode) override;
+ status_t getRecommendedLatencyModes(std::vector<audio_latency_mode_t> *modes) override;
+ status_t setLatencyModeCallback(
+ const sp<StreamOutHalInterfaceLatencyModeCallback>& callback) override;
+
+ status_t exit() override;
+
+ private:
+ friend class sp<StreamOutHalAidl>;
+
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IStreamOut> mStream;
+ const wp<CallbackBroker> mCallbackBroker;
+
+ // Can not be constructed directly by clients.
+ StreamOutHalAidl(
+ const audio_config& config, StreamContextAidl&& context, int32_t nominalLatency,
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IStreamOut>& stream,
+ const sp<CallbackBroker>& callbackBroker);
+
+ ~StreamOutHalAidl() override;
+};
+
+class StreamInHalAidl : public StreamInHalInterface, public StreamHalAidl {
+ public:
+ // Set the input gain for the audio driver.
+ status_t setGain(float gain) override;
+
+ // Read audio buffer in from driver.
+ status_t read(void *buffer, size_t bytes, size_t *read) override;
+
+ // Return the amount of input frames lost in the audio driver.
+ status_t getInputFramesLost(uint32_t *framesLost) override;
+
+ // Return a recent count of the number of audio frames received and
+ // the clock time associated with that frame count.
+ status_t getCapturePosition(int64_t *frames, int64_t *time) override;
+
+ // Get active microphones
+ status_t getActiveMicrophones(std::vector<media::MicrophoneInfo> *microphones) override;
+
+ // Set microphone direction (for processing)
+ status_t setPreferredMicrophoneDirection(
+ audio_microphone_direction_t direction) override;
+
+ // Set microphone zoom (for processing)
+ status_t setPreferredMicrophoneFieldDimension(float zoom) override;
+
+ // Called when the metadata of the stream's sink has been changed.
+ status_t updateSinkMetadata(const SinkMetadata& sinkMetadata) override;
+
+ private:
+ friend class sp<StreamInHalAidl>;
+
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IStreamIn> mStream;
+
+ // Can not be constructed directly by clients.
+ StreamInHalAidl(
+ const audio_config& config, StreamContextAidl&& context, int32_t nominalLatency,
+ const std::shared_ptr<::aidl::android::hardware::audio::core::IStreamIn>& stream);
+
+ ~StreamInHalAidl() override = default;
+};
+
+} // namespace android
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 76f9a60..2c289e1 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -46,9 +46,6 @@
using namespace ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION;
using namespace ::android::hardware::audio::CORE_TYPES_CPP_VERSION;
-#define TIME_CHECK() auto TimeCheck = \
- mediautils::makeTimeCheckStatsForClassMethod(getClassName(), __func__)
-
StreamHalHidl::StreamHalHidl(std::string_view className, IStream *stream)
: CoreConversionHelperHidl(className),
mStream(stream),
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.cpp
new file mode 100644
index 0000000..15768b3
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.cpp
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionAec"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_aec.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionAec.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::AcousticEchoCanceler;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionAec::setParameter(EffectParamReader& param) {
+ uint32_t type, value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type) ||
+ OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+
+ Parameter aidlParam;
+ switch (type) {
+ case AEC_PARAM_ECHO_DELAY:
+ FALLTHROUGH_INTENDED;
+ case AEC_PARAM_PROPERTIES: {
+ aidlParam = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_uint32_echoDelay_Parameter_aec(value));
+ break;
+ }
+ case AEC_PARAM_MOBILE_MODE: {
+ aidlParam = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_uint32_mobileMode_Parameter_aec(value));
+ break;
+ }
+ default: {
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionAec::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0, value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type)) {
+ param.setStatus(BAD_VALUE);
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case AEC_PARAM_ECHO_DELAY:
+ FALLTHROUGH_INTENDED;
+ case AEC_PARAM_PROPERTIES: {
+ Parameter::Id id =
+ MAKE_SPECIFIC_PARAMETER_ID(AcousticEchoCanceler, acousticEchoCancelerTag,
+ AcousticEchoCanceler::echoDelayUs);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ value = VALUE_OR_RETURN_STATUS(
+ aidl::android::aidl2legacy_Parameter_aec_uint32_echoDelay(aidlParam));
+ break;
+ }
+ case AEC_PARAM_MOBILE_MODE: {
+ Parameter::Id id =
+ MAKE_SPECIFIC_PARAMETER_ID(AcousticEchoCanceler, acousticEchoCancelerTag,
+ AcousticEchoCanceler::mobileMode);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ value = VALUE_OR_RETURN_STATUS(
+ aidl::android::aidl2legacy_Parameter_aec_uint32_mobileMode(aidlParam));
+ break;
+ }
+ default:
+ // use vendor extension implementation
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ return param.writeToValue(&value);
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.h
new file mode 100644
index 0000000..3ee419a
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAec.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionAec : public EffectConversionHelperAidl {
+ public:
+ AidlConversionAec(std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionAec() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.cpp
new file mode 100644
index 0000000..80df2b8
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.cpp
@@ -0,0 +1,122 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionAgc2"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_agc2.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionAgc2.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::AutomaticGainControl;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionAgc2::setParameter(EffectParamReader& param) {
+ uint32_t type = 0, value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type) || OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case AGC2_PARAM_FIXED_DIGITAL_GAIN: {
+ aidlParam = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_uint32_fixedDigitalGain_Parameter_agc(value));
+ break;
+ }
+ case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR: {
+ aidlParam = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_uint32_levelEstimator_Parameter_agc(value));
+ break;
+ }
+ case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN: {
+ aidlParam = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_uint32_saturationMargin_Parameter_agc(value));
+ break;
+ }
+ default: {
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionAgc2::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0, value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case AGC2_PARAM_FIXED_DIGITAL_GAIN: {
+ Parameter::Id id =
+ MAKE_SPECIFIC_PARAMETER_ID(AutomaticGainControl, automaticGainControlTag,
+ AutomaticGainControl::fixedDigitalGainMb);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ value = VALUE_OR_RETURN_STATUS(
+ aidl::android::aidl2legacy_Parameter_agc_uint32_fixedDigitalGain(aidlParam));
+ break;
+ }
+ case AGC2_PARAM_ADAPT_DIGI_LEVEL_ESTIMATOR: {
+ Parameter::Id id =
+ MAKE_SPECIFIC_PARAMETER_ID(AutomaticGainControl, automaticGainControlTag,
+ AutomaticGainControl::levelEstimator);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ value = VALUE_OR_RETURN_STATUS(
+ aidl::android::aidl2legacy_Parameter_agc_uint32_levelEstimator(aidlParam));
+ break;
+ }
+ case AGC2_PARAM_ADAPT_DIGI_EXTRA_SATURATION_MARGIN: {
+ Parameter::Id id =
+ MAKE_SPECIFIC_PARAMETER_ID(AutomaticGainControl, automaticGainControlTag,
+ AutomaticGainControl::saturationMarginMb);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ value = VALUE_OR_RETURN_STATUS(
+ aidl::android::aidl2legacy_Parameter_agc_uint32_saturationMargin(aidlParam));
+ break;
+ }
+ default: {
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+
+ return param.writeToValue(&value);
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.h
new file mode 100644
index 0000000..8f7eac7
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionAgc2.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionAgc2 : public EffectConversionHelperAidl {
+ public:
+ AidlConversionAgc2(std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionAgc2() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.cpp
new file mode 100644
index 0000000..038b7df
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.cpp
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionBassBoost"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_bassboost.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionBassBoost.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::convertIntegral;
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::BassBoost;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionBassBoost::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ uint16_t value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type) || OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case BASSBOOST_PARAM_STRENGTH: {
+ aidlParam = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_uint16_strengthPm_Parameter_BassBoost(value));
+ break;
+ }
+ case BASSBOOST_PARAM_STRENGTH_SUPPORTED: {
+ ALOGW("%s set BASSBOOST_PARAM_STRENGTH_SUPPORTED not supported", __func__);
+ return BAD_VALUE;
+ }
+ default: {
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionBassBoost::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ param.setStatus(BAD_VALUE);
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case BASSBOOST_PARAM_STRENGTH: {
+ uint32_t value;
+ Parameter::Id id =
+ MAKE_SPECIFIC_PARAMETER_ID(BassBoost, bassBoostTag, BassBoost::strengthPm);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ value = VALUE_OR_RETURN_STATUS(
+ aidl::android::aidl2legacy_Parameter_BassBoost_uint16_strengthPm(aidlParam));
+ return param.writeToValue(&value);
+ }
+ case BASSBOOST_PARAM_STRENGTH_SUPPORTED: {
+ uint16_t value;
+ const auto& cap =
+ VALUE_OR_RETURN_STATUS(aidl::android::UNION_GET(mDesc.capability, bassBoost));
+ value = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(cap.strengthSupported));
+ return param.writeToValue(&value);
+ }
+ default: {
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.h
new file mode 100644
index 0000000..9664aa1
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionBassBoost.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionBassBoost : public EffectConversionHelperAidl {
+ public:
+ AidlConversionBassBoost(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionBassBoost() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.cpp
new file mode 100644
index 0000000..17cedf7
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionDownmix"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_downmix.h>
+
+#include <system/audio_effect.h>
+#include <utils/Log.h>
+
+#include "AidlConversionDownmix.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::Downmix;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionDownmix::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ int16_t value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(int16_t)) ||
+ OK != param.readFromParameter(&type) || OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case DOWNMIX_PARAM_TYPE: {
+ aidlParam = VALUE_OR_RETURN_STATUS(
+ aidl::android::legacy2aidl_int16_type_Parameter_Downmix(value));
+ break;
+ }
+ default: {
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionDownmix::getParameter(EffectParamWriter& param) {
+ int16_t value = 0;
+ uint32_t type = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type)) {
+ param.setStatus(BAD_VALUE);
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case DOWNMIX_PARAM_TYPE: {
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(Downmix, downmixTag, Downmix::type);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ value = VALUE_OR_RETURN_STATUS(
+ aidl::android::aidl2legacy_Parameter_Downmix_int16_type(aidlParam));
+ break;
+ }
+ default: {
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+
+ return param.writeToValue(&value);
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.h
new file mode 100644
index 0000000..8b28ca3
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDownmix.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BpEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionDownmix : public EffectConversionHelperAidl {
+ public:
+ AidlConversionDownmix(std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionDownmix() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.cpp
new file mode 100644
index 0000000..4ecaa07
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.cpp
@@ -0,0 +1,522 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionDp"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionCppNdk.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effect.h>
+#include <system/audio_effects/effect_dynamicsprocessing.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionDynamicsProcessing.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::convertIntegral;
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::Capability;
+using ::aidl::android::hardware::audio::effect::DynamicsProcessing;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::toString;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionDp::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ if (OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case DP_PARAM_INPUT_GAIN: {
+ DynamicsProcessing::InputGain inputGainAidl;
+ if (OK != param.readFromParameter(&inputGainAidl.channel) ||
+ OK != param.readFromValue(&inputGainAidl.gainDb)) {
+ ALOGE("%s invalid inputGain %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ aidlParam = MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing, inputGain,
+ {inputGainAidl});
+ break;
+ }
+ case DP_PARAM_ENGINE_ARCHITECTURE: {
+ DynamicsProcessing::EngineArchitecture engine =
+ VALUE_OR_RETURN_STATUS(readEngineArchitectureFromParam(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing,
+ engineArchitecture, engine);
+ mEngine = engine;
+ break;
+ }
+ case DP_PARAM_PRE_EQ: {
+ DynamicsProcessing::ChannelConfig chConfig =
+ VALUE_OR_RETURN_STATUS(readChannelConfigFromParam(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing, preEq,
+ {chConfig});
+ break;
+ }
+ case DP_PARAM_POST_EQ: {
+ DynamicsProcessing::ChannelConfig chConfig =
+ VALUE_OR_RETURN_STATUS(readChannelConfigFromParam(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing, postEq,
+ {chConfig});
+ break;
+ }
+ case DP_PARAM_MBC: {
+ DynamicsProcessing::ChannelConfig chConfig =
+ VALUE_OR_RETURN_STATUS(readChannelConfigFromParam(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing, mbc,
+ {chConfig});
+ break;
+ }
+ case DP_PARAM_PRE_EQ_BAND: {
+ DynamicsProcessing::EqBandConfig bandConfig =
+ VALUE_OR_RETURN_STATUS(readEqBandConfigFromParam(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing, preEqBand,
+ {bandConfig});
+ break;
+ }
+ case DP_PARAM_POST_EQ_BAND: {
+ DynamicsProcessing::EqBandConfig bandConfig =
+ VALUE_OR_RETURN_STATUS(readEqBandConfigFromParam(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing, postEqBand,
+ {bandConfig});
+ break;
+ }
+ case DP_PARAM_MBC_BAND: {
+ DynamicsProcessing::MbcBandConfig bandConfig =
+ VALUE_OR_RETURN_STATUS(readMbcBandConfigFromParam(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing, mbcBand,
+ {bandConfig});
+ break;
+ }
+ case DP_PARAM_LIMITER: {
+ DynamicsProcessing::LimiterConfig config =
+ VALUE_OR_RETURN_STATUS(readLimiterConfigFromParam(param));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(DynamicsProcessing, dynamicsProcessing, limiter,
+ {config});
+ break;
+ }
+ default: {
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionDp::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0;
+ if (OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case DP_PARAM_INPUT_GAIN: {
+ int32_t channel;
+ if (OK != param.readFromParameter(&channel)) {
+ ALOGE("%s invalid inputGain %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag,
+ DynamicsProcessing::inputGain);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+
+ DynamicsProcessing::Capability cap =
+ mDesc.capability.get<Capability::dynamicsProcessing>();
+ std::vector<DynamicsProcessing::InputGain> gains =
+ VALUE_OR_RETURN_STATUS(aidl::android::GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, DynamicsProcessing, dynamicsProcessing,
+ DynamicsProcessing::inputGain,
+ std::vector<DynamicsProcessing::InputGain>));
+ for (const auto& gain : gains) {
+ if (gain.channel == channel) {
+ return param.writeToValue(&gain.gainDb);
+ }
+ }
+ ALOGE("%s not able to find channel %d", __func__, channel);
+ return BAD_VALUE;
+ }
+ case DP_PARAM_ENGINE_ARCHITECTURE: {
+ int32_t channel;
+ if (OK != param.readFromParameter(&channel)) {
+ ALOGE("%s invalid inputGain %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag,
+ DynamicsProcessing::engineArchitecture);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+
+ DynamicsProcessing::EngineArchitecture engine =
+ VALUE_OR_RETURN_STATUS(aidl::android::GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, DynamicsProcessing, dynamicsProcessing,
+ DynamicsProcessing::engineArchitecture,
+ DynamicsProcessing::EngineArchitecture));
+ int32_t resolution = VALUE_OR_RETURN_STATUS(
+ aidl::android::aidl2legacy_DynamicsProcessing_ResolutionPreference_int32(
+ engine.resolutionPreference));
+ int32_t preEqInUse =
+ VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(engine.preEqStage.inUse));
+ int32_t mbcInUse =
+ VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(engine.mbcStage.inUse));
+ int32_t postEqInUse =
+ VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(engine.postEqStage.inUse));
+ int32_t limiterInUse =
+ VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(engine.limiterInUse));
+ if (OK != param.writeToValue(&resolution) ||
+ OK != param.writeToValue(&engine.preferredProcessingDurationMs) ||
+ OK != param.writeToValue(&preEqInUse) ||
+ OK != param.writeToValue(&engine.preEqStage.bandCount) ||
+ OK != param.writeToValue(&mbcInUse) ||
+ OK != param.writeToValue(&engine.mbcStage.bandCount) ||
+ OK != param.writeToValue(&postEqInUse) ||
+ OK != param.writeToValue(&engine.postEqStage.bandCount) ||
+ OK != param.writeToValue(&limiterInUse)) {
+ ALOGE("%s invalid engineArchitecture %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ mEngine = engine;
+ return OK;
+ }
+ case DP_PARAM_PRE_EQ: {
+ return getChannelConfig(DynamicsProcessing::preEq, param);
+ }
+ case DP_PARAM_POST_EQ: {
+ return getChannelConfig(DynamicsProcessing::postEq, param);
+ }
+ case DP_PARAM_MBC: {
+ return getChannelConfig(DynamicsProcessing::mbc, param);
+ }
+ case DP_PARAM_PRE_EQ_BAND: {
+ return getEqBandConfig(DynamicsProcessing::preEqBand, param);
+ }
+ case DP_PARAM_POST_EQ_BAND: {
+ return getEqBandConfig(DynamicsProcessing::postEqBand, param);
+ }
+ case DP_PARAM_MBC_BAND: {
+ return getMbcBandConfig(param);
+ }
+ case DP_PARAM_LIMITER: {
+ return getLimiterConfig(param);
+ }
+ case DP_PARAM_GET_CHANNEL_COUNT: {
+ uint32_t channel = VALUE_OR_RETURN_STATUS(
+ aidl::android::aidl2legacy_AudioChannelLayout_audio_channel_mask_t(
+ mCommon.input.base.channelMask, true /* input */));
+ if (OK != param.writeToValue(&channel)) {
+ ALOGE("%s write channel number %d to param failed %s", __func__, channel,
+ param.toString().c_str());
+ return BAD_VALUE;
+ }
+ return OK;
+ }
+ default: {
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+}
+
+aidl::ConversionResult<DynamicsProcessing::ChannelConfig>
+AidlConversionDp::readChannelConfigFromParam(EffectParamReader& param) {
+ int32_t enable, channel;
+ if (OK != param.readFromParameter(&channel) || OK != param.readFromValue(&enable)) {
+ ALOGE("%s invalid channel config param %s", __func__, param.toString().c_str());
+ return ::android::base::unexpected(::android::BAD_VALUE);
+ }
+ return DynamicsProcessing::ChannelConfig(
+ {.enable = VALUE_OR_RETURN(convertIntegral<bool>(enable)), .channel = channel});
+}
+
+aidl::ConversionResult<DynamicsProcessing::EqBandConfig>
+AidlConversionDp::readEqBandConfigFromParam(EffectParamReader& param) {
+ DynamicsProcessing::EqBandConfig config;
+ int32_t enable;
+ if (OK != param.readFromParameter(&config.channel) ||
+ OK != param.readFromParameter(&config.band) ||
+ OK != param.readFromValue(&enable) ||
+ OK != param.readFromValue(&config.cutoffFrequencyHz) ||
+ OK != param.readFromValue(&config.gainDb)) {
+ ALOGE("%s invalid eq band param %s", __func__, param.toString().c_str());
+ return ::android::base::unexpected(::android::BAD_VALUE);
+ }
+ config.enable = VALUE_OR_RETURN(convertIntegral<bool>(enable));
+ return config;
+}
+
+aidl::ConversionResult<DynamicsProcessing::MbcBandConfig>
+AidlConversionDp::readMbcBandConfigFromParam(EffectParamReader& param) {
+ DynamicsProcessing::MbcBandConfig config;
+ int32_t enable;
+ if (OK != param.readFromParameter(&config.channel) ||
+ OK != param.readFromParameter(&config.band) ||
+ OK != param.readFromValue(&enable) ||
+ OK != param.readFromValue(&config.cutoffFrequencyHz) ||
+ OK != param.readFromValue(&config.attackTimeMs) ||
+ OK != param.readFromValue(&config.releaseTimeMs) ||
+ OK != param.readFromValue(&config.ratio) ||
+ OK != param.readFromValue(&config.thresholdDb) ||
+ OK != param.readFromValue(&config.kneeWidthDb) ||
+ OK != param.readFromValue(&config.noiseGateThresholdDb) ||
+ OK != param.readFromValue(&config.expanderRatio) ||
+ OK != param.readFromValue(&config.preGainDb) ||
+ OK != param.readFromValue(&config.postGainDb)) {
+ ALOGE("%s invalid mbc band config param %s", __func__, param.toString().c_str());
+ return ::android::base::unexpected(::android::BAD_VALUE);
+ }
+ config.enable = VALUE_OR_RETURN(convertIntegral<bool>(enable));
+ return config;
+}
+
+aidl::ConversionResult<DynamicsProcessing::LimiterConfig>
+AidlConversionDp::readLimiterConfigFromParam(EffectParamReader& param) {
+ DynamicsProcessing::LimiterConfig config;
+ int32_t enable, inUse;
+ if (OK != param.readFromParameter(&config.channel) ||
+ OK != param.readFromValue(&inUse) ||
+ OK != param.readFromValue(&enable) ||
+ OK != param.readFromValue(&config.linkGroup) ||
+ OK != param.readFromValue(&config.attackTimeMs) ||
+ OK != param.readFromValue(&config.releaseTimeMs) ||
+ OK != param.readFromValue(&config.ratio) ||
+ OK != param.readFromValue(&config.thresholdDb) ||
+ OK != param.readFromValue(&config.postGainDb)) {
+ ALOGE("%s invalid limiter config param %s", __func__, param.toString().c_str());
+ return ::android::base::unexpected(::android::BAD_VALUE);
+ }
+ config.enable = VALUE_OR_RETURN(convertIntegral<bool>(enable));
+ return config;
+}
+
+aidl::ConversionResult<DynamicsProcessing::EngineArchitecture>
+AidlConversionDp::readEngineArchitectureFromParam(EffectParamReader& param) {
+ DynamicsProcessing::EngineArchitecture engine;
+ int32_t variant, preEqInUse, mbcInUse, postEqInUse, limiterInUse;
+ if (OK != param.readFromValue(&variant) &&
+ OK != param.readFromValue(&engine.preferredProcessingDurationMs) &&
+ OK != param.readFromValue(&preEqInUse) &&
+ OK != param.readFromValue(&engine.preEqStage.bandCount) &&
+ OK != param.readFromValue(&mbcInUse) &&
+ OK != param.readFromValue(&engine.mbcStage.bandCount) &&
+ OK != param.readFromValue(&postEqInUse) &&
+ OK != param.readFromValue(&engine.postEqStage.bandCount) &&
+ OK != param.readFromValue(&limiterInUse)) {
+ ALOGE("%s invalid engineArchitecture %s", __func__, param.toString().c_str());
+ return ::android::base::unexpected(::android::BAD_VALUE);
+ }
+
+ engine.resolutionPreference = VALUE_OR_RETURN(
+ aidl::android::legacy2aidl_int32_DynamicsProcessing_ResolutionPreference(variant));
+ engine.preEqStage.inUse = VALUE_OR_RETURN(convertIntegral<bool>(preEqInUse));
+ engine.mbcStage.inUse = VALUE_OR_RETURN(convertIntegral<bool>(mbcInUse));
+ engine.postEqStage.inUse = VALUE_OR_RETURN(convertIntegral<bool>(postEqInUse));
+ engine.limiterInUse = VALUE_OR_RETURN(convertIntegral<bool>(limiterInUse));
+ return engine;
+}
+
+status_t AidlConversionDp::getChannelConfig(DynamicsProcessing::Tag tag, EffectParamWriter& param) {
+ int32_t channel;
+ if (OK != param.readFromParameter(&channel)) {
+ ALOGE("%s invalid parameter %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+
+ Parameter aidlParam;
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag, tag);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+
+ std::vector<DynamicsProcessing::ChannelConfig> channels;
+ int32_t inUse, bandCount;
+ switch (tag) {
+ case DynamicsProcessing::preEq: {
+ inUse = mEngine.preEqStage.inUse;
+ bandCount = mEngine.preEqStage.bandCount;
+ channels = VALUE_OR_RETURN_STATUS(aidl::android::GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, DynamicsProcessing, dynamicsProcessing, DynamicsProcessing::preEq,
+ std::vector<DynamicsProcessing::ChannelConfig>));
+ break;
+ }
+ case DynamicsProcessing::postEq: {
+ inUse = mEngine.postEqStage.inUse;
+ bandCount = mEngine.postEqStage.bandCount;
+ channels = VALUE_OR_RETURN_STATUS(aidl::android::GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, DynamicsProcessing, dynamicsProcessing, DynamicsProcessing::postEq,
+ std::vector<DynamicsProcessing::ChannelConfig>));
+ break;
+ }
+ case DynamicsProcessing::mbc: {
+ inUse = mEngine.mbcStage.inUse;
+ bandCount = mEngine.mbcStage.bandCount;
+ channels = VALUE_OR_RETURN_STATUS(aidl::android::GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, DynamicsProcessing, dynamicsProcessing, DynamicsProcessing::mbc,
+ std::vector<DynamicsProcessing::ChannelConfig>));
+ break;
+ }
+ default: {
+ ALOGE("%s unsupported tag %s", __func__, toString(tag).c_str());
+ return BAD_VALUE;
+ }
+ }
+
+ for (const auto& ch : channels) {
+ if (ch.channel == channel) {
+ int32_t enable = ch.enable;
+ if (OK != param.writeToValue(&inUse) ||
+ OK != param.writeToValue(&enable) ||
+ OK != param.writeToValue(&bandCount)) {
+ ALOGE("%s failed to write into param value %s", __func__,
+ param.toString().c_str());
+ return BAD_VALUE;
+ }
+ return OK;
+ }
+ }
+ ALOGE("%s not able to find channel %d", __func__, channel);
+ return BAD_VALUE;
+}
+
+status_t AidlConversionDp::getEqBandConfig(DynamicsProcessing::Tag tag, EffectParamWriter& param) {
+ int32_t channel, band;
+ if (OK != param.readFromParameter(&channel) || OK != param.readFromParameter(&band)) {
+ ALOGE("%s invalid parameter %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+
+ Parameter aidlParam;
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag, tag);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+
+ std::vector<DynamicsProcessing::EqBandConfig> bands;
+ if (tag == DynamicsProcessing::preEqBand) {
+ bands = VALUE_OR_RETURN_STATUS(aidl::android::GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, DynamicsProcessing, dynamicsProcessing, preEqBand,
+ std::vector<DynamicsProcessing::EqBandConfig>));
+ } else if (tag == DynamicsProcessing::postEqBand) {
+ bands = VALUE_OR_RETURN_STATUS(aidl::android::GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, DynamicsProcessing, dynamicsProcessing, postEqBand,
+ std::vector<DynamicsProcessing::EqBandConfig>));
+ } else {
+ return BAD_VALUE;
+ }
+
+ for (const auto& bandIt : bands) {
+ if (bandIt.channel == channel && bandIt.band == band) {
+ int32_t enable = bandIt.enable;
+ if (OK != param.writeToValue(&enable) ||
+ OK != param.writeToValue(&bandIt.cutoffFrequencyHz) ||
+ OK != param.writeToValue(&bandIt.gainDb)) {
+ ALOGE("%s failed to write into param value %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ return OK;
+ }
+ }
+ ALOGE("%s not able to find channel %d band %d", __func__, channel, band);
+ return BAD_VALUE;
+}
+
+status_t AidlConversionDp::getMbcBandConfig(EffectParamWriter& param) {
+ int32_t channel, band;
+ if (OK != param.readFromParameter(&channel) || OK != param.readFromParameter(&band)) {
+ ALOGE("%s invalid parameter %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag,
+ DynamicsProcessing::mbcBand);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+
+ std::vector<DynamicsProcessing::MbcBandConfig> bands =
+ VALUE_OR_RETURN_STATUS(aidl::android::GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, DynamicsProcessing, dynamicsProcessing, mbcBand,
+ std::vector<DynamicsProcessing::MbcBandConfig>));
+
+ for (const auto& bandIt : bands) {
+ if (bandIt.channel == channel && bandIt.band == band) {
+ int32_t enable = bandIt.enable;
+ if (OK != param.writeToValue(&enable) ||
+ OK != param.writeToValue(&bandIt.cutoffFrequencyHz) ||
+ OK != param.writeToValue(&bandIt.attackTimeMs) ||
+ OK != param.writeToValue(&bandIt.releaseTimeMs) ||
+ OK != param.writeToValue(&bandIt.ratio) ||
+ OK != param.writeToValue(&bandIt.thresholdDb) ||
+ OK != param.writeToValue(&bandIt.kneeWidthDb) ||
+ OK != param.writeToValue(&bandIt.noiseGateThresholdDb) ||
+ OK != param.writeToValue(&bandIt.expanderRatio) ||
+ OK != param.writeToValue(&bandIt.preGainDb) ||
+ OK != param.writeToValue(&bandIt.postGainDb)) {
+ ALOGE("%s failed to write into param value %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ return OK;
+ }
+ }
+ ALOGE("%s not able to find channel %d band %d", __func__, channel, band);
+ return BAD_VALUE;
+}
+
+status_t AidlConversionDp::getLimiterConfig(EffectParamWriter& param) {
+ int32_t channel;
+ if (OK != param.readFromParameter(&channel)) {
+ ALOGE("%s invalid parameter %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(DynamicsProcessing, dynamicsProcessingTag,
+ DynamicsProcessing::limiter);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+
+ std::vector<DynamicsProcessing::LimiterConfig> configs =
+ VALUE_OR_RETURN_STATUS(aidl::android::GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, DynamicsProcessing, dynamicsProcessing, limiter,
+ std::vector<DynamicsProcessing::LimiterConfig>));
+
+ for (const auto& config : configs) {
+ if (config.channel == channel) {
+ int32_t inUse = mEngine.limiterInUse;
+ int32_t enable = config.enable;
+ if (OK != param.writeToValue(&inUse) ||
+ OK != param.writeToValue(&enable) ||
+ OK != param.writeToValue(&config.linkGroup) ||
+ OK != param.writeToValue(&config.attackTimeMs) ||
+ OK != param.writeToValue(&config.releaseTimeMs) ||
+ OK != param.writeToValue(&config.ratio) ||
+ OK != param.writeToValue(&config.thresholdDb) ||
+ OK != param.writeToValue(&config.postGainDb)) {
+ ALOGE("%s failed to write into param value %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ return OK;
+ }
+ }
+ ALOGE("%s not able to find channel %d", __func__, channel);
+ return BAD_VALUE;
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.h
new file mode 100644
index 0000000..6bab18d
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionDynamicsProcessing.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BpEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionDp : public EffectConversionHelperAidl {
+ public:
+ AidlConversionDp(std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionDp() {}
+
+ private:
+ aidl::android::hardware::audio::effect::DynamicsProcessing::EngineArchitecture mEngine;
+
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+
+ aidl::ConversionResult<
+ aidl::android::hardware::audio::effect::DynamicsProcessing::ChannelConfig>
+ readChannelConfigFromParam(utils::EffectParamReader& param);
+ aidl::ConversionResult<aidl::android::hardware::audio::effect::DynamicsProcessing::EqBandConfig>
+ readEqBandConfigFromParam(utils::EffectParamReader& param);
+ aidl::ConversionResult<
+ aidl::android::hardware::audio::effect::DynamicsProcessing::MbcBandConfig>
+ readMbcBandConfigFromParam(utils::EffectParamReader& param);
+ aidl::ConversionResult<
+ aidl::android::hardware::audio::effect::DynamicsProcessing::LimiterConfig>
+ readLimiterConfigFromParam(utils::EffectParamReader& param);
+ aidl::ConversionResult<
+ aidl::android::hardware::audio::effect::DynamicsProcessing::EngineArchitecture>
+ readEngineArchitectureFromParam(utils::EffectParamReader& param);
+
+ status_t getChannelConfig(aidl::android::hardware::audio::effect::DynamicsProcessing::Tag tag,
+ utils::EffectParamWriter& writer);
+ status_t getEqBandConfig(aidl::android::hardware::audio::effect::DynamicsProcessing::Tag tag,
+ utils::EffectParamWriter& param);
+ status_t getMbcBandConfig(utils::EffectParamWriter& param);
+ status_t getLimiterConfig(utils::EffectParamWriter& param);
+
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.cpp
new file mode 100644
index 0000000..960273b
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.cpp
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionEnvReverb"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionCppNdk.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_environmentalreverb.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionEnvReverb.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::convertIntegral;
+using ::aidl::android::getParameterSpecificField;
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::EnvironmentalReverb;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+#define MAKE_AIDL_PARAMETER(aidlParam, param, value, tag) \
+ { \
+ if (OK != param.readFromValue(&value)) { \
+ ALOGE("%s invalid parameter %s %d", __func__, #tag, value); \
+ return BAD_VALUE; \
+ } \
+ aidlParam = MAKE_SPECIFIC_PARAMETER( \
+ EnvironmentalReverb, environmentalReverb, tag, \
+ VALUE_OR_RETURN_STATUS(aidl::android::convertIntegral<int>(value))); \
+ }
+
+#define GET_AIDL_PARAMETER(tag, value, param) \
+ { \
+ Parameter aidlParam; \
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(EnvironmentalReverb, environmentalReverbTag, \
+ EnvironmentalReverb::tag); \
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam))); \
+ value = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD( \
+ aidlParam, EnvironmentalReverb, environmentalReverb, EnvironmentalReverb::tag, \
+ std::decay_t<decltype(value)>)); \
+ return param.writeToValue(&value); \
+ }
+
+status_t AidlConversionEnvReverb::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ uint16_t value16;
+ uint32_t value32;
+ switch (type) {
+ case REVERB_PARAM_ROOM_LEVEL: {
+ MAKE_AIDL_PARAMETER(aidlParam, param, value16, roomLevelMb);
+ break;
+ }
+ case REVERB_PARAM_ROOM_HF_LEVEL: {
+ MAKE_AIDL_PARAMETER(aidlParam, param, value16, roomHfLevelMb);
+ break;
+ }
+ case REVERB_PARAM_DECAY_TIME: {
+ MAKE_AIDL_PARAMETER(aidlParam, param, value32, decayTimeMs);
+ break;
+ }
+ case REVERB_PARAM_DECAY_HF_RATIO: {
+ MAKE_AIDL_PARAMETER(aidlParam, param, value16, decayHfRatioPm);
+ break;
+ }
+ case REVERB_PARAM_REVERB_LEVEL: {
+ MAKE_AIDL_PARAMETER(aidlParam, param, value16, levelMb);
+ break;
+ }
+ case REVERB_PARAM_REVERB_DELAY: {
+ MAKE_AIDL_PARAMETER(aidlParam, param, value32, delayMs);
+ break;
+ }
+ case REVERB_PARAM_DIFFUSION: {
+ MAKE_AIDL_PARAMETER(aidlParam, param, value16, diffusionPm);
+ break;
+ }
+ case REVERB_PARAM_DENSITY: {
+ MAKE_AIDL_PARAMETER(aidlParam, param, value16, densityPm);
+ break;
+ }
+ case REVERB_PARAM_BYPASS: {
+ if (OK != param.readFromValue(&value32)) {
+ ALOGE("%s invalid bypass parameter %d", __func__, value32);
+ return BAD_VALUE;
+ }
+ bool isByPass = VALUE_OR_RETURN_STATUS(aidl::android::convertIntegral<bool>(value32));
+ aidlParam = MAKE_SPECIFIC_PARAMETER(EnvironmentalReverb, environmentalReverb, bypass,
+ isByPass);
+ break;
+ }
+ case REVERB_PARAM_REFLECTIONS_LEVEL: {
+ // TODO
+ break;
+ }
+ case REVERB_PARAM_REFLECTIONS_DELAY: {
+ // TODO
+ break;
+ }
+ case REVERB_PARAM_PROPERTIES: {
+ // TODO
+ break;
+ }
+ default: {
+ // TODO: handle with vendor extension
+ }
+ }
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionEnvReverb::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ param.setStatus(BAD_VALUE);
+ return BAD_VALUE;
+ }
+ uint16_t value16;
+ uint32_t value32;
+ switch (type) {
+ case REVERB_PARAM_ROOM_LEVEL: {
+ GET_AIDL_PARAMETER(roomLevelMb, value16, param);
+ }
+ case REVERB_PARAM_ROOM_HF_LEVEL: {
+ GET_AIDL_PARAMETER(roomHfLevelMb, value16, param);
+ }
+ case REVERB_PARAM_DECAY_TIME: {
+ GET_AIDL_PARAMETER(decayTimeMs, value32, param);
+ }
+ case REVERB_PARAM_DECAY_HF_RATIO: {
+ GET_AIDL_PARAMETER(decayHfRatioPm, value16, param);
+ }
+ case REVERB_PARAM_REVERB_LEVEL: {
+ GET_AIDL_PARAMETER(levelMb, value16, param);
+ }
+ case REVERB_PARAM_REVERB_DELAY: {
+ GET_AIDL_PARAMETER(delayMs, value32, param);
+ }
+ case REVERB_PARAM_DIFFUSION: {
+ GET_AIDL_PARAMETER(diffusionPm, value16, param);
+ }
+ case REVERB_PARAM_DENSITY: {
+ GET_AIDL_PARAMETER(densityPm, value16, param);
+ }
+ case REVERB_PARAM_BYPASS: {
+ bool isByPass;
+ GET_AIDL_PARAMETER(bypass, isByPass, param);
+ }
+ case REVERB_PARAM_REFLECTIONS_LEVEL: {
+ // TODO
+ break;
+ }
+ case REVERB_PARAM_REFLECTIONS_DELAY: {
+ // TODO
+ break;
+ }
+ case REVERB_PARAM_PROPERTIES: {
+ // TODO
+ break;
+ }
+ default: {
+ // TODO: handle with vendor extension
+ }
+ }
+ return BAD_VALUE;
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.h
new file mode 100644
index 0000000..8b92374
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEnvReverb.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionEnvReverb : public EffectConversionHelperAidl {
+ public:
+ AidlConversionEnvReverb(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionEnvReverb() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.cpp
new file mode 100644
index 0000000..a10d271
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.cpp
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionEQ"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_equalizer.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionEq.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::getParameterSpecificField;
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::Equalizer;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionEq::setParameter(EffectParamReader& param) {
+ uint32_t type;
+ uint16_t value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type) ||
+ OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+
+ Parameter aidlParam;
+ switch (type) {
+ case EQ_PARAM_CUR_PRESET: {
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Equalizer, equalizer, preset, (int)value);
+ break;
+ }
+ case EQ_PARAM_BAND_LEVEL: {
+ int32_t band;
+ uint16_t level;
+ if (OK != param.readFromParameter(&band) || OK != param.readFromParameter(&level)) {
+ ALOGE("%s invalid bandLevel param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ std::vector<Equalizer::BandLevel> bandLevels = {{.index = band, .levelMb = level}};
+ aidlParam = MAKE_SPECIFIC_PARAMETER(Equalizer, equalizer, bandLevels, bandLevels);
+ break;
+ }
+ case EQ_PARAM_PROPERTIES: {
+ // TODO: handle properties setting
+ break;
+ }
+ default: {
+ // TODO: implement vendor extension parameters
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+aidl::ConversionResult<Parameter> AidlConversionEq::getAidlParameter(Equalizer::Tag tag) {
+ Parameter aidlParam;
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(Equalizer, equalizerTag, tag);
+ RETURN_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ return aidlParam;
+}
+
+status_t AidlConversionEq::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0, value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type)) {
+ param.setStatus(BAD_VALUE);
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case EQ_PARAM_NUM_BANDS: {
+ aidlParam = VALUE_OR_RETURN_STATUS(getAidlParameter(Equalizer::bandLevels));
+ auto bandLevels = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, Equalizer, equalizer, Equalizer::bandLevels,
+ std::vector<Equalizer::BandLevel>));
+ uint32_t num = bandLevels.size();
+ return param.writeToValue(&num);
+ }
+ default:
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ return param.writeToValue(&value);
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.h
new file mode 100644
index 0000000..0433965
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionEq.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionEq : public EffectConversionHelperAidl {
+ public:
+ AidlConversionEq(std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionEq() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+ aidl::ConversionResult<::aidl::android::hardware::audio::effect::Parameter> getAidlParameter(
+ ::aidl::android::hardware::audio::effect::Equalizer::Tag tag);
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.cpp
new file mode 100644
index 0000000..9575e7d
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionHapticGenerator"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_hapticgenerator.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionHapticGenerator.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::HapticGenerator;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionHapticGenerator::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case HG_PARAM_HAPTIC_INTENSITY: {
+ int32_t id = 0, scale;
+ if (OK != param.readFromValue(&id) || OK != param.readFromValue(&scale)) {
+ ALOGE("%s invalid intensity %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ HapticGenerator::HapticScale hpScale(
+ {.id = id, .scale = (HapticGenerator::VibratorScale)(scale)});
+ aidlParam = MAKE_SPECIFIC_PARAMETER(HapticGenerator, hapticGenerator, hapticScales,
+ {hpScale});
+ break;
+ }
+ case HG_PARAM_VIBRATOR_INFO: {
+ float resonantFrequencyHz, qFactor, maxAmplitude;
+ if (OK != param.readFromValue(&resonantFrequencyHz) ||
+ OK != param.readFromValue(&qFactor) || OK != param.readFromValue(&maxAmplitude)) {
+ ALOGE("%s invalid vibrator info %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ HapticGenerator::VibratorInformation info({.resonantFrequencyHz = resonantFrequencyHz,
+ .qFactor = qFactor,
+ .maxAmplitude = maxAmplitude});
+ aidlParam =
+ MAKE_SPECIFIC_PARAMETER(HapticGenerator, hapticGenerator, vibratorInfo, info);
+ break;
+ }
+ default: {
+ // TODO: implement vendor extension parameters
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+// No parameter to get for HapticGenerator
+status_t AidlConversionHapticGenerator::getParameter(EffectParamWriter& param __unused) {
+ return OK;
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.h
new file mode 100644
index 0000000..03114a5
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionHapticGenerator.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionHapticGenerator : public EffectConversionHelperAidl {
+ public:
+ AidlConversionHapticGenerator(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionHapticGenerator() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp
new file mode 100644
index 0000000..e3c898f
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.cpp
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionLoudnessEnhancer"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_loudnessenhancer.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionLoudnessEnhancer.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::getParameterSpecificField;
+using ::aidl::android::hardware::audio::effect::LoudnessEnhancer;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionLoudnessEnhancer::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ int32_t gain = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type) || OK != param.readFromValue(&gain)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ switch (type) {
+ case LOUDNESS_ENHANCER_PARAM_TARGET_GAIN_MB: {
+ aidlParam = MAKE_SPECIFIC_PARAMETER(LoudnessEnhancer, loudnessEnhancer, gainMb, gain);
+ break;
+ }
+ default: {
+ // TODO: implement vendor extension parameters
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionLoudnessEnhancer::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ param.setStatus(BAD_VALUE);
+ return BAD_VALUE;
+ }
+ switch (type) {
+ case LOUDNESS_ENHANCER_PARAM_TARGET_GAIN_MB: {
+ Parameter aidlParam;
+ Parameter::Id id = MAKE_SPECIFIC_PARAMETER_ID(LoudnessEnhancer, loudnessEnhancerTag,
+ LoudnessEnhancer::gainMb);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ int32_t gain = VALUE_OR_RETURN_STATUS(GET_PARAMETER_SPECIFIC_FIELD(
+ aidlParam, LoudnessEnhancer, loudnessEnhancer, LoudnessEnhancer::gainMb,
+ std::decay_t<decltype(gain)>));
+ return param.writeToValue(&gain);
+ }
+ default: {
+ // TODO: implement vendor extension parameters
+ ALOGW("%s unknown param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ }
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.h
new file mode 100644
index 0000000..c0402f9
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionLoudnessEnhancer.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionLoudnessEnhancer : public EffectConversionHelperAidl {
+ public:
+ AidlConversionLoudnessEnhancer(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionLoudnessEnhancer() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.cpp
new file mode 100644
index 0000000..5faf645
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionNoiseSuppression"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_ns.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionNoiseSuppression.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionNoiseSuppression::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ uint16_t value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type) || OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ // TODO
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionNoiseSuppression::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0, value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ param.setStatus(BAD_VALUE);
+ return BAD_VALUE;
+ }
+ // TODO
+ return param.writeToValue(&value);
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.h
new file mode 100644
index 0000000..f51e13a
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionNoiseSuppression.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionNoiseSuppression : public EffectConversionHelperAidl {
+ public:
+ AidlConversionNoiseSuppression(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionNoiseSuppression() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.cpp
new file mode 100644
index 0000000..3e9bf4b
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.cpp
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionPresetReverb"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_presetreverb.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionPresetReverb.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::convertIntegral;
+using ::aidl::android::getParameterSpecificField;
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::aidl::android::hardware::audio::effect::PresetReverb;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionPresetReverb::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ if (type == REVERB_PARAM_PRESET) {
+ uint16_t value = 0;
+ if (OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid preset value %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ aidlParam = MAKE_SPECIFIC_PARAMETER(PresetReverb, presetReverb, preset,
+ static_cast<PresetReverb::Presets>(value));
+ } else {
+ // handle vendor extension
+ }
+
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionPresetReverb::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0;
+ uint16_t value = 0;
+ ALOGE("%s enter %s", __func__, param.toString().c_str());
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ param.setStatus(BAD_VALUE);
+ return BAD_VALUE;
+ }
+ if (type == REVERB_PARAM_PRESET) {
+ Parameter aidlParam;
+ Parameter::Id id =
+ MAKE_SPECIFIC_PARAMETER_ID(PresetReverb, presetReverbTag, PresetReverb::preset);
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(mEffect->getParameter(id, &aidlParam)));
+ auto aidlPreset = VALUE_OR_RETURN_STATUS(
+ GET_PARAMETER_SPECIFIC_FIELD(aidlParam, PresetReverb, presetReverb,
+ PresetReverb::preset, PresetReverb::Presets));
+ value = static_cast<uint16_t>(aidlPreset);
+ } else {
+ // handle vendor extension
+ }
+ return param.writeToValue(&value);
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.h
new file mode 100644
index 0000000..397d6e6
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionPresetReverb.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionPresetReverb : public EffectConversionHelperAidl {
+ public:
+ AidlConversionPresetReverb(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionPresetReverb() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.cpp
new file mode 100644
index 0000000..1dac479
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionSpatializer"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_spatializer.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionSpatializer.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionSpatializer::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ uint16_t value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type) || OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ // TODO
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionSpatializer::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0, value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ param.setStatus(BAD_VALUE);
+ return BAD_VALUE;
+ }
+ // TODO
+ return param.writeToValue(&value);
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.h
new file mode 100644
index 0000000..c44567c
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionSpatializer.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionSpatializer : public EffectConversionHelperAidl {
+ public:
+ AidlConversionSpatializer(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionSpatializer() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.cpp
new file mode 100644
index 0000000..a035614
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.cpp
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionVendorExtension"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionVendorExtension.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionVendorExtension::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ uint16_t value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type) || OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ // TODO
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionVendorExtension::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0, value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ param.setStatus(BAD_VALUE);
+ return BAD_VALUE;
+ }
+ // TODO
+ return param.writeToValue(&value);
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.h
new file mode 100644
index 0000000..fd22e5c
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVendorExtension.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionVendorExtension : public EffectConversionHelperAidl {
+ public:
+ AidlConversionVendorExtension(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionVendorExtension() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.cpp
new file mode 100644
index 0000000..482114d
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionVirtualizer"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_spatializer.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionVirtualizer.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionVirtualizer::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ uint16_t value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type) || OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ // TODO
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionVirtualizer::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0, value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ param.setStatus(BAD_VALUE);
+ return BAD_VALUE;
+ }
+ // TODO
+ return param.writeToValue(&value);
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.h
new file mode 100644
index 0000000..91c0fcd
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVirtualizer.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionVirtualizer : public EffectConversionHelperAidl {
+ public:
+ AidlConversionVirtualizer(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionVirtualizer() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.cpp b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.cpp
new file mode 100644
index 0000000..9ed601f
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstdint>
+#include <cstring>
+#include <optional>
+#define LOG_TAG "AidlConversionVisualizer"
+//#define LOG_NDEBUG 0
+
+#include <error/expected_utils.h>
+#include <media/AidlConversionNdk.h>
+#include <media/AidlConversionEffect.h>
+#include <media/audiohal/AudioEffectUuid.h>
+#include <system/audio_effects/effect_visualizer.h>
+
+#include <utils/Log.h>
+
+#include "AidlConversionVisualizer.h"
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::aidl_utils::statusTFromBinderStatus;
+using ::aidl::android::hardware::audio::effect::Parameter;
+using ::android::status_t;
+using utils::EffectParamReader;
+using utils::EffectParamWriter;
+
+status_t AidlConversionVisualizer::setParameter(EffectParamReader& param) {
+ uint32_t type = 0;
+ uint16_t value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint16_t)) ||
+ OK != param.readFromParameter(&type) || OK != param.readFromValue(&value)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ return BAD_VALUE;
+ }
+ Parameter aidlParam;
+ // TODO
+ return statusTFromBinderStatus(mEffect->setParameter(aidlParam));
+}
+
+status_t AidlConversionVisualizer::getParameter(EffectParamWriter& param) {
+ uint32_t type = 0, value = 0;
+ if (!param.validateParamValueSize(sizeof(uint32_t), sizeof(uint32_t)) ||
+ OK != param.readFromParameter(&type)) {
+ ALOGE("%s invalid param %s", __func__, param.toString().c_str());
+ param.setStatus(BAD_VALUE);
+ return BAD_VALUE;
+ }
+ // TODO
+ return param.writeToValue(&value);
+}
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.h b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.h
new file mode 100644
index 0000000..a7e4ea1
--- /dev/null
+++ b/media/libaudiohal/impl/effectsAidlConversion/AidlConversionVisualizer.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/IEffect.h>
+#include "EffectConversionHelperAidl.h"
+
+namespace android {
+namespace effect {
+
+class AidlConversionVisualizer : public EffectConversionHelperAidl {
+ public:
+ AidlConversionVisualizer(
+ std::shared_ptr<::aidl::android::hardware::audio::effect::IEffect> effect,
+ int32_t sessionId, int32_t ioId,
+ const ::aidl::android::hardware::audio::effect::Descriptor& desc)
+ : EffectConversionHelperAidl(effect, sessionId, ioId, desc) {}
+ ~AidlConversionVisualizer() {}
+
+ private:
+ status_t setParameter(utils::EffectParamReader& param) override;
+ status_t getParameter(utils::EffectParamWriter& param) override;
+};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/include/media/audiohal/AudioEffectUuid.h b/media/libaudiohal/include/media/audiohal/AudioEffectUuid.h
new file mode 100644
index 0000000..b21e4c9
--- /dev/null
+++ b/media/libaudiohal/include/media/audiohal/AudioEffectUuid.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/media/audio/common/AudioUuid.h>
+
+namespace android {
+namespace effect {
+
+using ::aidl::android::media::audio::common::AudioUuid;
+
+// 7b491460-8d4d-11e0-bd61-0002a5d5c51b.
+static const AudioUuid kAcousticEchoCancelerTypeUUID = {static_cast<int32_t>(0x7b491460),
+ 0x8d4d,
+ 0x11e0,
+ 0xbd61,
+ {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
+// 0xae3c653b-be18-4ab8-8938-418f0a7f06ac
+static const AudioUuid kAutomaticGainControl2TypeUUID = {static_cast<int32_t>(0xae3c653b),
+ 0xbe18,
+ 0x4ab8,
+ 0x8938,
+ {0x41, 0x8f, 0x0a, 0x7f, 0x06, 0xac}};
+// 0634f220-ddd4-11db-a0fc-0002a5d5c51b
+static const AudioUuid kBassBoostTypeUUID = {static_cast<int32_t>(0x0634f220),
+ 0xddd4,
+ 0x11db,
+ 0xa0fc,
+ {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
+// fa81862a-588b-11ed-9b6a-0242ac120002
+static const AudioUuid kDownmixTypeUUID = {static_cast<int32_t>(0xfa81862a),
+ 0x588b,
+ 0x11ed,
+ 0x9b6a,
+ {0x02, 0x42, 0xac, 0x12, 0x00, 0x02}};
+// 7261676f-6d75-7369-6364-28e2fd3ac39e
+static const AudioUuid kDynamicsProcessingTypeUUID = {static_cast<int32_t>(0x7261676f),
+ 0x6d75,
+ 0x7369,
+ 0x6364,
+ {0x28, 0xe2, 0xfd, 0x3a, 0xc3, 0x9e}};
+// 0bed4300-ddd6-11db-8f34-0002a5d5c51b.
+static const AudioUuid kEqualizerTypeUUID = {static_cast<int32_t>(0x0bed4300),
+ 0xddd6,
+ 0x11db,
+ 0x8f34,
+ {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
+// 1411e6d6-aecd-4021-a1cf-a6aceb0d71e5
+static const AudioUuid kHapticGeneratorTypeUUID = {static_cast<int32_t>(0x1411e6d6),
+ 0xaecd,
+ 0x4021,
+ 0xa1cf,
+ {0xa6, 0xac, 0xeb, 0x0d, 0x71, 0xe5}};
+// fe3199be-aed0-413f-87bb-11260eb63cf1
+static const AudioUuid kLoudnessEnhancerTypeUUID = {static_cast<int32_t>(0xfe3199be),
+ 0xaed0,
+ 0x413f,
+ 0x87bb,
+ {0x11, 0x26, 0x0e, 0xb6, 0x3c, 0xf1}};
+// c2e5d5f0-94bd-4763-9cac-4e234d06839e
+static const AudioUuid kEnvReverbTypeUUID = {static_cast<int32_t>(0xc2e5d5f0),
+ 0x94bd,
+ 0x4763,
+ 0x9cac,
+ {0x4e, 0x23, 0x4d, 0x06, 0x83, 0x9e}};
+// 58b4b260-8e06-11e0-aa8e-0002a5d5c51b
+static const AudioUuid kNoiseSuppressionTypeUUID = {static_cast<int32_t>(0x58b4b260),
+ 0x8e06,
+ 0x11e0,
+ 0xaa8e,
+ {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
+// 47382d60-ddd8-11db-bf3a-0002a5d5c51b
+static const AudioUuid kPresetReverbTypeUUID = {static_cast<int32_t>(0x47382d60),
+ 0xddd8,
+ 0x11db,
+ 0xbf3a,
+ {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
+// ccd4cf09-a79d-46c2-9aae-06a1698d6c8f
+static const AudioUuid kSpatializerTypeUUID = {static_cast<int32_t>(0xccd4cf09),
+ 0xa79d,
+ 0x46c2,
+ 0x9aae,
+ {0x06, 0xa1, 0x69, 0x8d, 0x6c, 0x8f}};
+// 37cc2c00-dddd-11db-8577-0002a5d5c51b
+static const AudioUuid kVirtualizerTypeUUID = {static_cast<int32_t>(0x37cc2c00),
+ 0xdddd,
+ 0x11db,
+ 0x8577,
+ {0x00, 0x02, 0xa5, 0xd5, 0xc5, 0x1b}};
+// fa819f3e-588b-11ed-9b6a-0242ac120002
+static const AudioUuid kVisualizerTypeUUID = {static_cast<int32_t>(0xfa819f3e),
+ 0x588b,
+ 0x11ed,
+ 0x9b6a,
+ {0x02, 0x42, 0xac, 0x12, 0x00, 0x02}};
+// fa81a2b8-588b-11ed-9b6a-0242ac120002
+static const AudioUuid kVolumeTypeUUID = {static_cast<int32_t>(0xfa81a2b8),
+ 0x588b,
+ 0x11ed,
+ 0x9b6a,
+ {0x02, 0x42, 0xac, 0x12, 0x00, 0x02}};
+
+} // namespace effect
+} // namespace android
diff --git a/media/libaudiohal/include/media/audiohal/AudioHalUtils.h b/media/libaudiohal/include/media/audiohal/AudioHalUtils.h
deleted file mode 100644
index 4862cba..0000000
--- a/media/libaudiohal/include/media/audiohal/AudioHalUtils.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2022 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma once
-
-#define RETURN_IF_BINDER_FAIL(expr) \
- do { \
- const ::ndk::ScopedAStatus _temp_status_ = (expr); \
- if (!_temp_status_.isOk()) { \
- ALOGE("%s:%d return with expr %s msg %s", __func__, __LINE__, #expr, \
- _temp_status_.getMessage()); \
- return _temp_status_.getStatus(); \
- } \
- } while (false)
-
-#define RETURN_IF_NOT_OK(statement) \
- do { \
- auto tmp = (statement); \
- if (tmp != OK) { \
- return tmp; \
- } \
- } while (false)
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index 2c8219e..2df2f5d 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -26,12 +26,16 @@
#include <utils/RefBase.h>
#include <utils/String8.h>
+namespace ndk {
+class SpAIBinder;
+}
+
namespace android {
class StreamInHalInterface;
class StreamOutHalInterface;
-class DeviceHalInterface : public RefBase
+class DeviceHalInterface : public virtual RefBase
{
public:
// Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
@@ -106,16 +110,10 @@
virtual status_t releaseAudioPatch(audio_patch_handle_t patch) = 0;
// Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port* port) {
- ALOGE("%s override me port %p", __func__, port);
- return OK;
- }
+ virtual status_t getAudioPort(struct audio_port* port) = 0;
// Fills the list of supported attributes for a given audio port.
- virtual status_t getAudioPort(struct audio_port_v7 *port) {
- ALOGE("%s override me port %p", __func__, port);
- return OK;
- }
+ virtual status_t getAudioPort(struct audio_port_v7 *port) = 0;
// Set audio port configuration.
virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
@@ -146,6 +144,10 @@
virtual status_t dump(int fd, const Vector<String16>& args) = 0;
+ // Returns the sound dose binder interface if it is supported by the HAL, nullptr otherwise
+ virtual status_t getSoundDoseInterface(const std::string& module,
+ ::ndk::SpAIBinder* soundDoseBinder) = 0;
+
protected:
// Subclasses can not be constructed directly by clients.
DeviceHalInterface() {}
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index 1d52b7d..a651d9b 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -110,8 +110,8 @@
virtual void onError() {}
protected:
- StreamOutHalInterfaceCallback() {}
- virtual ~StreamOutHalInterfaceCallback() {}
+ StreamOutHalInterfaceCallback() = default;
+ virtual ~StreamOutHalInterfaceCallback() = default;
};
class StreamOutHalInterfaceEventCallback : public virtual RefBase {
@@ -119,8 +119,8 @@
virtual void onCodecFormatChanged(const std::basic_string<uint8_t>& metadataBs) = 0;
protected:
- StreamOutHalInterfaceEventCallback() {}
- virtual ~StreamOutHalInterfaceEventCallback() {}
+ StreamOutHalInterfaceEventCallback() = default;
+ virtual ~StreamOutHalInterfaceEventCallback() = default;
};
class StreamOutHalInterfaceLatencyModeCallback : public virtual RefBase {
@@ -131,8 +131,8 @@
virtual void onRecommendedLatencyModeChanged(std::vector<audio_latency_mode_t> modes) = 0;
protected:
- StreamOutHalInterfaceLatencyModeCallback() {}
- virtual ~StreamOutHalInterfaceLatencyModeCallback() {}
+ StreamOutHalInterfaceLatencyModeCallback() = default;
+ virtual ~StreamOutHalInterfaceLatencyModeCallback() = default;
};
class StreamOutHalInterface : public virtual StreamHalInterface {
diff --git a/media/libaudiohal/tests/Android.bp b/media/libaudiohal/tests/Android.bp
index e20f74c..2f78dd0 100644
--- a/media/libaudiohal/tests/Android.bp
+++ b/media/libaudiohal/tests/Android.bp
@@ -29,7 +29,7 @@
],
defaults: [
- "latest_android_media_audio_common_types_cpp_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
],
cflags: [
@@ -37,12 +37,16 @@
"-Wextra",
"-Werror",
"-Wthread-safety",
+ "-DBACKEND_NDK",
],
shared_libs: [
"audioclient-types-aidl-cpp",
+ "libaudio_aidl_conversion_common_ndk",
"libaudiohal",
+ "liblog",
"libutils",
+ "libvibrator",
],
header_libs: [
diff --git a/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp b/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
index 83c7809..dda608b 100644
--- a/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
+++ b/media/libaudiohal/tests/EffectsFactoryHalInterface_test.cpp
@@ -15,16 +15,36 @@
*/
//#define LOG_NDEBUG 0
+#include <cstddef>
#include <cstdint>
+#include <cstring>
+#include <memory>
+#include <utility>
#define LOG_TAG "EffectsFactoryHalInterfaceTest"
+#include <aidl/android/media/audio/common/AudioUuid.h>
+#include <media/AidlConversionCppNdk.h>
#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <system/audio_effects/audio_effects_utils.h>
+#include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_agc2.h>
+#include <system/audio_effects/effect_bassboost.h>
+#include <system/audio_effects/effect_downmix.h>
+#include <system/audio_effects/effect_dynamicsprocessing.h>
+#include <system/audio_effects/effect_hapticgenerator.h>
+#include <system/audio_effects/effect_loudnessenhancer.h>
+#include <system/audio_effect.h>
#include <gtest/gtest.h>
#include <utils/RefBase.h>
+#include <vibrator/ExternalVibrationUtils.h>
namespace android {
+using effect::utils::EffectParamReader;
+using effect::utils::EffectParamWriter;
+using ::aidl::android::media::audio::common::AudioUuid;
+
// EffectsFactoryHalInterface
TEST(libAudioHalTest, createEffectsFactoryHalInterface) {
ASSERT_NE(nullptr, EffectsFactoryHalInterface::create());
@@ -78,6 +98,201 @@
EXPECT_NE(0, version.getMajorVersion());
}
+class EffectParamCombination {
+ public:
+ template <typename P, typename V>
+ void init(const P& p, const V& v, size_t len) {
+ setBuffer.resize(sizeof(effect_param_t) + sizeof(p) + sizeof(v) + 4);
+ getBuffer.resize(sizeof(effect_param_t) + sizeof(p) + len + 4);
+ expectBuffer.resize(sizeof(effect_param_t) + sizeof(p) + len + 4);
+ parameterSet =
+ std::make_shared<EffectParamReader>(createEffectParam(setBuffer.data(), p, v));
+ parameterGet =
+ std::make_shared<EffectParamReader>(createEffectParam(getBuffer.data(), p, v));
+ parameterExpect =
+ std::make_shared<EffectParamReader>(createEffectParam(expectBuffer.data(), p, v));
+ valueSize = len;
+ }
+
+ std::shared_ptr<EffectParamReader> parameterSet; /* setParameter */
+ std::shared_ptr<EffectParamReader> parameterGet; /* getParameter */
+ std::shared_ptr<EffectParamReader> parameterExpect; /* expected from getParameter */
+ size_t valueSize; /* ValueSize expect to write in reply data buffer */
+
+ private:
+ std::vector<uint8_t> setBuffer;
+ std::vector<uint8_t> getBuffer;
+ std::vector<uint8_t> expectBuffer;
+
+ template <typename P, typename V>
+ EffectParamReader createEffectParam(void* buf, const P& p, const V& v) {
+ effect_param_t* paramRet = (effect_param_t*)buf;
+ paramRet->psize = sizeof(P);
+ paramRet->vsize = sizeof(V);
+ EffectParamWriter writer(*paramRet);
+ EXPECT_EQ(OK, writer.writeToParameter(&p));
+ EXPECT_EQ(OK, writer.writeToValue(&v));
+ writer.finishValueWrite();
+ return writer;
+ }
+};
+
+template <typename P, typename V>
+std::shared_ptr<EffectParamCombination> createEffectParamCombination(const P& p, const V& v,
+ size_t len) {
+ auto comb = std::make_shared<EffectParamCombination>();
+ comb->init(p, v, len);
+ return comb;
+}
+
+enum ParamName { TUPLE_UUID, TUPLE_PARAM_COMBINATION };
+using EffectParamTestTuple =
+ std::tuple<const effect_uuid_t* /* type UUID */, std::shared_ptr<EffectParamCombination>>;
+
+std::vector<EffectParamTestTuple> testPairs = {
+ std::make_tuple(FX_IID_AEC,
+ createEffectParamCombination(AEC_PARAM_ECHO_DELAY, 0xff /* echoDelayMs */,
+ sizeof(int32_t) /* returnValueSize */)),
+ std::make_tuple(FX_IID_AGC2, createEffectParamCombination(
+ AGC2_PARAM_FIXED_DIGITAL_GAIN, 15 /* digitalGainDb */,
+ sizeof(int32_t) /* returnValueSize */)),
+ std::make_tuple(SL_IID_BASSBOOST,
+ createEffectParamCombination(BASSBOOST_PARAM_STRENGTH, 20 /* strength */,
+ sizeof(int32_t) /* returnValueSize */)),
+ std::make_tuple(EFFECT_UIID_DOWNMIX,
+ createEffectParamCombination(DOWNMIX_PARAM_TYPE, DOWNMIX_TYPE_FOLD,
+ sizeof(int32_t) /* returnValueSize */)),
+ std::make_tuple(SL_IID_DYNAMICSPROCESSING,
+ createEffectParamCombination(
+ std::array<uint32_t, 2>({DP_PARAM_INPUT_GAIN, 0 /* channel */}),
+ 30 /* gainDb */, sizeof(int32_t) /* returnValueSize */)),
+ std::make_tuple(
+ FX_IID_HAPTICGENERATOR,
+ createEffectParamCombination(
+ HG_PARAM_HAPTIC_INTENSITY,
+ std::array<uint32_t, 2>(
+ {1, uint32_t(::android::os::HapticScale::HIGH) /* scale */}),
+ 0 /* returnValueSize */)),
+ std::make_tuple(
+ FX_IID_LOUDNESS_ENHANCER,
+ createEffectParamCombination(LOUDNESS_ENHANCER_PARAM_TARGET_GAIN_MB, 5 /* gain */,
+ sizeof(int32_t) /* returnValueSize */))};
+
+class libAudioHalEffectParamTest : public ::testing::TestWithParam<EffectParamTestTuple> {
+ public:
+ libAudioHalEffectParamTest()
+ : mParamTuple(GetParam()),
+ mFactory(EffectsFactoryHalInterface::create()),
+ mTypeUuid(std::get<TUPLE_UUID>(mParamTuple)),
+ mCombination(std::get<TUPLE_PARAM_COMBINATION>(mParamTuple)),
+ mExpectedValue([&]() {
+ std::vector<uint8_t> expectData(mCombination->valueSize);
+ mCombination->parameterExpect->readFromValue(expectData.data(),
+ mCombination->valueSize);
+ return expectData;
+ }()),
+ mDescs([&]() {
+ std::vector<effect_descriptor_t> descs;
+ if (mFactory && mTypeUuid && OK == mFactory->getDescriptors(mTypeUuid, &descs)) {
+ return descs;
+ }
+ return descs;
+ }()) {}
+
+ void SetUp() override {
+ for (const auto& desc : mDescs) {
+ sp<EffectHalInterface> interface = createEffectHal(desc);
+ ASSERT_NE(nullptr, interface);
+ mHalInterfaces.push_back(interface);
+ }
+ }
+
+ void initEffect(const sp<EffectHalInterface>& interface) {
+ uint32_t initReply = 0;
+ uint32_t initReplySize = sizeof(initReply);
+ ASSERT_EQ(OK, interface->command(EFFECT_CMD_INIT, 0, nullptr, &initReplySize, &initReply));
+ }
+
+ void TearDown() override {
+ for (auto& interface : mHalInterfaces) {
+ interface->close();
+ }
+ }
+
+ sp<EffectHalInterface> createEffectHal(const effect_descriptor_t& desc) {
+ sp<EffectHalInterface> interface = nullptr;
+ if (0 == std::memcmp(&desc.type, mTypeUuid, sizeof(effect_uuid_t)) &&
+ OK == mFactory->createEffect(&desc.uuid, 1 /* sessionId */, 1 /* ioId */,
+ 1 /* deviceId */, &interface)) {
+ return interface;
+ }
+ return nullptr;
+ }
+
+ void setAndGetParameter(const sp<EffectHalInterface>& interface) {
+ uint32_t replySize = sizeof(uint32_t);
+ uint8_t reply[replySize];
+ auto parameterSet = mCombination->parameterSet;
+ ASSERT_EQ(OK,
+ interface->command(EFFECT_CMD_SET_PARAM, (uint32_t)parameterSet->getTotalSize(),
+ const_cast<effect_param_t*>(¶meterSet->getEffectParam()),
+ &replySize, &reply))
+ << parameterSet->toString();
+ ASSERT_EQ(replySize, sizeof(uint32_t));
+
+ effect_param_t* getParam =
+ const_cast<effect_param_t*>(&mCombination->parameterGet->getEffectParam());
+ size_t maxReplySize = mCombination->valueSize + sizeof(effect_param_t) +
+ sizeof(parameterSet->getPaddedParameterSize());
+ replySize = maxReplySize;
+ EXPECT_EQ(OK,
+ interface->command(EFFECT_CMD_GET_PARAM, (uint32_t)parameterSet->getTotalSize(),
+ const_cast<effect_param_t*>(¶meterSet->getEffectParam()),
+ &replySize, getParam));
+ EffectParamReader parameterGet(*getParam);
+ EXPECT_EQ(replySize, parameterGet.getTotalSize()) << parameterGet.toString();
+ if (mCombination->valueSize) {
+ std::vector<uint8_t> response(mCombination->valueSize);
+ EXPECT_EQ(OK, parameterGet.readFromValue(response.data(), mCombination->valueSize))
+ << parameterGet.toString();
+ EXPECT_EQ(response, mExpectedValue);
+ }
+ }
+
+ const EffectParamTestTuple mParamTuple;
+ const sp<EffectsFactoryHalInterface> mFactory;
+ const effect_uuid_t* mTypeUuid;
+ std::shared_ptr<EffectParamCombination> mCombination;
+ const std::vector<uint8_t> mExpectedValue;
+ const std::vector<effect_descriptor_t> mDescs;
+ std::vector<sp<EffectHalInterface>> mHalInterfaces;
+};
+
+TEST_P(libAudioHalEffectParamTest, setAndGetParam) {
+ for (auto& interface : mHalInterfaces) {
+ EXPECT_NO_FATAL_FAILURE(initEffect(interface));
+ EXPECT_NO_FATAL_FAILURE(setAndGetParameter(interface));
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(
+ libAudioHalEffectParamTest, libAudioHalEffectParamTest, ::testing::ValuesIn(testPairs),
+ [](const testing::TestParamInfo<libAudioHalEffectParamTest::ParamType>& info) {
+ AudioUuid uuid = ::aidl::android::legacy2aidl_audio_uuid_t_AudioUuid(
+ *std::get<TUPLE_UUID>(info.param))
+ .value();
+ std::string name = "UUID_" + uuid.toString();
+ std::replace_if(
+ name.begin(), name.end(), [](const char c) { return !std::isalnum(c); }, '_');
+ return name;
+ });
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(libAudioHalEffectParamTest);
+
+int main(int argc, char** argv) {
+ ::testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
+
// TODO: b/263986405 Add multi-thread testing
} // namespace android
diff --git a/media/libeffects/downmix/Android.bp b/media/libeffects/downmix/Android.bp
index 742626c..a5259aa 100644
--- a/media/libeffects/downmix/Android.bp
+++ b/media/libeffects/downmix/Android.bp
@@ -69,6 +69,7 @@
"libcutils",
"liblog",
],
+ relative_install_path: "soundfx",
visibility: [
"//hardware/interfaces/audio/aidl/default",
],
diff --git a/media/libeffects/downmix/aidl/DownmixContext.cpp b/media/libeffects/downmix/aidl/DownmixContext.cpp
index 6869689..43bfeed 100644
--- a/media/libeffects/downmix/aidl/DownmixContext.cpp
+++ b/media/libeffects/downmix/aidl/DownmixContext.cpp
@@ -21,6 +21,7 @@
#include "DownmixContext.h"
using aidl::android::hardware::audio::effect::IEffect;
+using ::aidl::android::media::audio::common::AudioChannelLayout;
using ::android::hardware::audio::common::getChannelCount;
namespace aidl::android::hardware::audio::effect {
diff --git a/media/libeffects/downmix/aidl/DownmixContext.h b/media/libeffects/downmix/aidl/DownmixContext.h
index 8a244ac..9a9f2da 100644
--- a/media/libeffects/downmix/aidl/DownmixContext.h
+++ b/media/libeffects/downmix/aidl/DownmixContext.h
@@ -22,9 +22,6 @@
namespace aidl::android::hardware::audio::effect {
-using media::audio::common::AudioChannelLayout;
-using media::audio::common::AudioDeviceDescription;
-
enum DownmixState {
DOWNMIX_STATE_UNINITIALIZED,
DOWNMIX_STATE_INITIALIZED,
@@ -45,34 +42,25 @@
}
Downmix::Type getDmType() const { return mType; }
- RetCode setVolumeStereo(const Parameter::VolumeStereo& volumeStereo) override {
- // FIXME change volume
- mVolumeStereo = volumeStereo;
- return RetCode::SUCCESS;
- }
- Parameter::VolumeStereo getVolumeStereo() override { return mVolumeStereo; }
-
- RetCode setOutputDevice(const AudioDeviceDescription& device) override {
+ RetCode setOutputDevice(
+ const std::vector<::aidl::android::media::audio::common::AudioDeviceDescription>&
+ device) override {
// FIXME change type if playing on headset vs speaker
mOutputDevice = device;
return RetCode::SUCCESS;
}
- AudioDeviceDescription getOutputDevice() { return mOutputDevice; }
IEffect::Status lvmProcess(float* in, float* out, int samples);
private:
DownmixState mState;
Downmix::Type mType;
- AudioChannelLayout mChMask;
+ ::aidl::android::media::audio::common::AudioChannelLayout mChMask;
::android::audio_utils::channels::ChannelMix mChannelMix;
// Common Params
- AudioDeviceDescription mOutputDevice;
- Parameter::VolumeStereo mVolumeStereo;
-
void init_params(const Parameter::Common& common);
- bool isChannelMaskValid(AudioChannelLayout channelMask);
+ bool isChannelMaskValid(::aidl::android::media::audio::common::AudioChannelLayout channelMask);
};
} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/dynamicsproc/Android.bp b/media/libeffects/dynamicsproc/Android.bp
index 84131a4..736a086 100644
--- a/media/libeffects/dynamicsproc/Android.bp
+++ b/media/libeffects/dynamicsproc/Android.bp
@@ -32,34 +32,67 @@
],
}
+cc_defaults {
+ name : "dynamicsprocessingdefaults",
+ srcs: [
+ "dsp/DPBase.cpp",
+ "dsp/DPFrequency.cpp",
+ ],
+
+ shared_libs: [
+ "libaudioutils",
+ "libbase",
+ "liblog",
+ "libutils",
+ ],
+ header_libs: [
+ "libaudioeffects",
+ "libeigen",
+ ],
+ cflags: [
+ "-Wthread-safety",
+ "-Wall",
+ "-Werror",
+ ],
+ relative_install_path: "soundfx",
+}
+
cc_library_shared {
name: "libdynproc",
vendor: true,
+ defaults: [
+ "dynamicsprocessingdefaults",
+ ],
+
srcs: [
"EffectDynamicsProcessing.cpp",
- "dsp/DPBase.cpp",
- "dsp/DPFrequency.cpp",
],
cflags: [
"-O2",
"-fvisibility=hidden",
+ ],
+}
- "-Wall",
- "-Werror",
+cc_library_shared {
+ name: "libdynamicsprocessingaidl",
+
+ srcs: [
+ "aidl/DynamicsProcessing.cpp",
+ "aidl/DynamicsProcessingContext.cpp",
+ ":effectCommonFile",
],
- shared_libs: [
- "libcutils",
- "liblog",
+ defaults: [
+ "aidlaudioservice_defaults",
+ "latest_android_hardware_audio_effect_ndk_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
+ "dynamicsprocessingdefaults",
],
- relative_install_path: "soundfx",
-
- header_libs: [
- "libaudioeffects",
- "libeigen",
+ visibility: [
+ "//hardware/interfaces/audio/aidl/default",
],
}
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.cpp b/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.cpp
new file mode 100644
index 0000000..203a27b
--- /dev/null
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.cpp
@@ -0,0 +1,313 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_DynamicsProcessingLibEffects"
+
+#include <android-base/logging.h>
+
+#include "DynamicsProcessing.h"
+
+#include <dsp/DPBase.h>
+#include <dsp/DPFrequency.h>
+
+using aidl::android::hardware::audio::effect::Descriptor;
+using aidl::android::hardware::audio::effect::DynamicsProcessingImpl;
+using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::kDynamicsProcessingImplUUID;
+using aidl::android::hardware::audio::effect::State;
+using aidl::android::media::audio::common::AudioUuid;
+using aidl::android::media::audio::common::PcmType;
+
+extern "C" binder_exception_t createEffect(const AudioUuid* in_impl_uuid,
+ std::shared_ptr<IEffect>* instanceSpp) {
+ if (!in_impl_uuid || *in_impl_uuid != kDynamicsProcessingImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (instanceSpp) {
+ *instanceSpp = ndk::SharedRefBase::make<DynamicsProcessingImpl>();
+ LOG(DEBUG) << __func__ << " instance " << instanceSpp->get() << " created";
+ return EX_NONE;
+ } else {
+ LOG(ERROR) << __func__ << " invalid input parameter!";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+}
+
+extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
+ if (!in_impl_uuid || *in_impl_uuid != kDynamicsProcessingImplUUID) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ *_aidl_return = DynamicsProcessingImpl::kDescriptor;
+ return EX_NONE;
+}
+
+namespace aidl::android::hardware::audio::effect {
+
+const std::string DynamicsProcessingImpl::kEffectName = "DynamicsProcessing";
+const DynamicsProcessing::Capability DynamicsProcessingImpl::kCapability = {.minCutOffFreq = 220,
+ .maxCutOffFreq = 20000};
+const Descriptor DynamicsProcessingImpl::kDescriptor = {
+ .common = {.id = {.type = kDynamicsProcessingTypeUUID,
+ .uuid = kDynamicsProcessingImplUUID,
+ .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::INSERT,
+ .insert = Flags::Insert::LAST,
+ .volume = Flags::Volume::CTRL},
+ .name = DynamicsProcessingImpl::kEffectName,
+ .implementor = "The Android Open Source Project"},
+ .capability = Capability::make<Capability::dynamicsProcessing>(
+ DynamicsProcessingImpl::kCapability)};
+
+ndk::ScopedAStatus DynamicsProcessingImpl::open(const Parameter::Common& common,
+ const std::optional<Parameter::Specific>& specific,
+ OpenEffectReturn* ret) {
+ LOG(DEBUG) << __func__;
+ // effect only support 32bits float
+ RETURN_IF(common.input.base.format.pcm != common.output.base.format.pcm ||
+ common.input.base.format.pcm != PcmType::FLOAT_32_BIT,
+ EX_ILLEGAL_ARGUMENT, "dataMustBe32BitsFloat");
+ RETURN_OK_IF(mState != State::INIT);
+ auto context = createContext(common);
+ RETURN_IF(!context, EX_NULL_POINTER, "createContextFailed");
+
+ RETURN_IF_ASTATUS_NOT_OK(setParameterCommon(common), "setCommParamErr");
+ if (specific.has_value()) {
+ RETURN_IF_ASTATUS_NOT_OK(setParameterSpecific(specific.value()), "setSpecParamErr");
+ } else {
+ Parameter::Specific defaultSpecific =
+ Parameter::Specific::make<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::engineArchitecture>(
+ mContext->getEngineArchitecture()));
+ RETURN_IF_ASTATUS_NOT_OK(setParameterSpecific(defaultSpecific), "setDefaultEngineErr");
+ }
+
+ mState = State::IDLE;
+ context->dupeFmq(ret);
+ RETURN_IF(createThread(context, getEffectName()) != RetCode::SUCCESS, EX_UNSUPPORTED_OPERATION,
+ "FailedToCreateWorker");
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus DynamicsProcessingImpl::getDescriptor(Descriptor* _aidl_return) {
+ RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
+ LOG(DEBUG) << __func__ << kDescriptor.toString();
+ *_aidl_return = kDescriptor;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus DynamicsProcessingImpl::commandImpl(CommandId command) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ switch (command) {
+ case CommandId::START:
+ mContext->enable();
+ return ndk::ScopedAStatus::ok();
+ case CommandId::STOP:
+ mContext->disable();
+ return ndk::ScopedAStatus::ok();
+ case CommandId::RESET:
+ mContext->disable();
+ mContext->resetBuffer();
+ return ndk::ScopedAStatus::ok();
+ default:
+ // Need this default handling for vendor extendable CommandId::VENDOR_COMMAND_*
+ LOG(ERROR) << __func__ << " commandId " << toString(command) << " not supported";
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commandIdNotSupported");
+ }
+}
+
+ndk::ScopedAStatus DynamicsProcessingImpl::setParameterSpecific(
+ const Parameter::Specific& specific) {
+ RETURN_IF(Parameter::Specific::dynamicsProcessing != specific.getTag(), EX_ILLEGAL_ARGUMENT,
+ "EffectNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto& param = specific.get<Parameter::Specific::dynamicsProcessing>();
+ auto tag = param.getTag();
+
+ switch (tag) {
+ case DynamicsProcessing::engineArchitecture: {
+ RETURN_IF(mContext->setEngineArchitecture(
+ param.get<DynamicsProcessing::engineArchitecture>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setEngineArchitectureFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::preEq: {
+ RETURN_IF(
+ mContext->setPreEq(param.get<DynamicsProcessing::preEq>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setPreEqFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::postEq: {
+ RETURN_IF(mContext->setPostEq(param.get<DynamicsProcessing::postEq>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setPostEqFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::preEqBand: {
+ RETURN_IF(mContext->setPreEqBand(param.get<DynamicsProcessing::preEqBand>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setPreEqBandFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::postEqBand: {
+ RETURN_IF(mContext->setPostEqBand(param.get<DynamicsProcessing::postEqBand>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setPostEqBandFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::mbc: {
+ RETURN_IF(mContext->setMbc(param.get<DynamicsProcessing::mbc>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setMbcFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::mbcBand: {
+ RETURN_IF(mContext->setMbcBand(param.get<DynamicsProcessing::mbcBand>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setMbcBandFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::limiter: {
+ RETURN_IF(mContext->setLimiter(param.get<DynamicsProcessing::limiter>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setLimiterFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::inputGain: {
+ RETURN_IF(mContext->setInputGain(param.get<DynamicsProcessing::inputGain>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setInputGainFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::vendorExtension: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "DPVendorExtensionTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus DynamicsProcessingImpl::getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
+ auto tag = id.getTag();
+ RETURN_IF(Parameter::Id::dynamicsProcessingTag != tag, EX_ILLEGAL_ARGUMENT, "wrongIdTag");
+ auto dpId = id.get<Parameter::Id::dynamicsProcessingTag>();
+ auto dpIdTag = dpId.getTag();
+ switch (dpIdTag) {
+ case DynamicsProcessing::Id::commonTag:
+ return getParameterDynamicsProcessing(dpId.get<DynamicsProcessing::Id::commonTag>(),
+ specific);
+ case DynamicsProcessing::Id::vendorExtensionTag:
+ LOG(ERROR) << __func__ << " unsupported ID: " << toString(dpIdTag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "DPVendorExtensionIdNotSupported");
+ }
+}
+
+ndk::ScopedAStatus DynamicsProcessingImpl::getParameterDynamicsProcessing(
+ const DynamicsProcessing::Tag& tag, Parameter::Specific* specific) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ switch (tag) {
+ case DynamicsProcessing::engineArchitecture: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::engineArchitecture>(
+ mContext->getEngineArchitecture()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::preEq: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::preEq>(mContext->getPreEq()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::postEq: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::postEq>(mContext->getPostEq()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::preEqBand: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::preEqBand>(
+ mContext->getPreEqBand()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::postEqBand: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::postEqBand>(
+ mContext->getPostEqBand()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::mbc: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::mbc>(mContext->getMbc()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::mbcBand: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::mbcBand>(mContext->getMbcBand()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::limiter: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::limiter>(mContext->getLimiter()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::inputGain: {
+ specific->set<Parameter::Specific::dynamicsProcessing>(
+ DynamicsProcessing::make<DynamicsProcessing::inputGain>(
+ mContext->getInputGain()));
+ return ndk::ScopedAStatus::ok();
+ }
+ case DynamicsProcessing::vendorExtension: {
+ LOG(ERROR) << __func__ << " wrong vendor tag in CommonTag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "DPVendorExtensionTagInWrongId");
+ }
+ }
+}
+
+std::shared_ptr<EffectContext> DynamicsProcessingImpl::createContext(
+ const Parameter::Common& common) {
+ if (mContext) {
+ LOG(DEBUG) << __func__ << " context already exist";
+ return mContext;
+ }
+
+ mContext = std::make_shared<DynamicsProcessingContext>(1 /* statusFmqDepth */, common);
+ return mContext;
+}
+
+RetCode DynamicsProcessingImpl::releaseContext() {
+ if (mContext) {
+ mContext->disable();
+ mContext->resetBuffer();
+ mContext.reset();
+ }
+ return RetCode::SUCCESS;
+}
+
+// Processing method running in EffectWorker thread.
+IEffect::Status DynamicsProcessingImpl::effectProcessImpl(float* in, float* out, int samples) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!mContext, status, "nullContext");
+ return mContext->lvmProcess(in, out, samples);
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.h b/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.h
new file mode 100644
index 0000000..824ebea
--- /dev/null
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessing.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+
+#include "effect-impl/EffectImpl.h"
+#include "effect-impl/EffectUUID.h"
+#include "DynamicsProcessingContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class DynamicsProcessingImpl final : public EffectImpl {
+ public:
+ static const std::string kEffectName;
+ static const Descriptor kDescriptor;
+ static const DynamicsProcessing::Capability kCapability;
+
+ DynamicsProcessingImpl() { LOG(DEBUG) << __func__; }
+ ~DynamicsProcessingImpl() {
+ cleanUp();
+ LOG(DEBUG) << __func__;
+ }
+
+ ndk::ScopedAStatus open(const Parameter::Common& common,
+ const std::optional<Parameter::Specific>& specific,
+ OpenEffectReturn* ret) override;
+ ndk::ScopedAStatus commandImpl(CommandId command) override;
+ ndk::ScopedAStatus getDescriptor(Descriptor* _aidl_return) override;
+ ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
+ ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) override;
+ IEffect::Status effectProcessImpl(float* in, float* out, int process) override;
+ std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+ RetCode releaseContext() override;
+
+ std::shared_ptr<EffectContext> getContext() override { return mContext; }
+ std::string getEffectName() override { return kEffectName; }
+
+ private:
+ std::shared_ptr<DynamicsProcessingContext> mContext;
+ ndk::ScopedAStatus getParameterDynamicsProcessing(const DynamicsProcessing::Tag& tag,
+ Parameter::Specific* specific);
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
new file mode 100644
index 0000000..57a2be9
--- /dev/null
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.cpp
@@ -0,0 +1,580 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "AHAL_DPLibEffectsContext"
+
+#include "DynamicsProcessing.h"
+#include "DynamicsProcessingContext.h"
+
+#include <functional>
+#include <sys/param.h>
+#include <unordered_set>
+
+namespace aidl::android::hardware::audio::effect {
+
+DynamicsProcessingContext::DynamicsProcessingContext(int statusDepth,
+ const Parameter::Common& common)
+ : EffectContext(statusDepth, common) {
+ LOG(DEBUG) << __func__;
+ init();
+}
+
+DynamicsProcessingContext::~DynamicsProcessingContext() {
+ LOG(DEBUG) << __func__;
+}
+
+RetCode DynamicsProcessingContext::enable() {
+ std::lock_guard lg(mMutex);
+ if (mState != DYNAMICS_PROCESSING_STATE_INITIALIZED) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = DYNAMICS_PROCESSING_STATE_ACTIVE;
+ return RetCode::SUCCESS;
+}
+
+RetCode DynamicsProcessingContext::disable() {
+ std::lock_guard lg(mMutex);
+ if (mState != DYNAMICS_PROCESSING_STATE_ACTIVE) {
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+ }
+ mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
+ return RetCode::SUCCESS;
+}
+
+void DynamicsProcessingContext::reset() {
+ std::lock_guard lg(mMutex);
+ if (mDpFreq != nullptr) {
+ mDpFreq.reset();
+ }
+}
+
+RetCode DynamicsProcessingContext::setCommon(const Parameter::Common& common) {
+ mCommon = common;
+ init();
+ return RetCode::SUCCESS;
+}
+
+void DynamicsProcessingContext::dpSetFreqDomainVariant_l(
+ const DynamicsProcessing::EngineArchitecture& engine) {
+ mDpFreq.reset(new dp_fx::DPFrequency());
+ mDpFreq->init(mChannelCount, engine.preEqStage.inUse, engine.preEqStage.bandCount,
+ engine.mbcStage.inUse, engine.mbcStage.bandCount, engine.postEqStage.inUse,
+ engine.postEqStage.bandCount, engine.limiterInUse);
+
+ int32_t sampleRate = mCommon.input.base.sampleRate;
+ int32_t minBlockSize = (int32_t)dp_fx::DPFrequency::getMinBockSize();
+ int32_t block = engine.preferredProcessingDurationMs * sampleRate / 1000.0f;
+ LOG(INFO) << __func__ << " sampleRate " << sampleRate << " block length "
+ << engine.preferredProcessingDurationMs << " ms (" << block << "samples)";
+ if (block < minBlockSize) {
+ block = minBlockSize;
+ } else if (!powerof2(block)) {
+ //find next highest power of 2.
+ block = 1 << (32 - __builtin_clz(block));
+ }
+ mDpFreq->configure(block, block >> 1, sampleRate);
+}
+
+RetCode DynamicsProcessingContext::setEngineArchitecture(
+ const DynamicsProcessing::EngineArchitecture& engineArchitecture) {
+ RETURN_VALUE_IF(!validateEngineConfig(engineArchitecture), RetCode::ERROR_ILLEGAL_PARAMETER,
+ "illegalEngineConfig");
+
+ std::lock_guard lg(mMutex);
+ if (!mEngineInited || mEngineArchitecture != engineArchitecture) {
+ if (engineArchitecture.resolutionPreference ==
+ DynamicsProcessing::ResolutionPreference::FAVOR_FREQUENCY_RESOLUTION) {
+ dpSetFreqDomainVariant_l(engineArchitecture);
+ } else {
+ LOG(WARNING) << __func__ << toString(engineArchitecture.resolutionPreference)
+ << " not available now";
+ }
+ mEngineInited = true;
+ mEngineArchitecture = engineArchitecture;
+ }
+ LOG(INFO) << __func__ << engineArchitecture.toString();
+ return RetCode::SUCCESS;
+}
+
+RetCode DynamicsProcessingContext::setPreEq(
+ const std::vector<DynamicsProcessing::ChannelConfig>& channels) {
+ std::lock_guard lg(mMutex);
+ return setDpChannels_l<dp_fx::DPEq>(channels, mEngineArchitecture.preEqStage.inUse,
+ StageType::PREEQ);
+}
+
+RetCode DynamicsProcessingContext::setPostEq(
+ const std::vector<DynamicsProcessing::ChannelConfig>& channels) {
+ std::lock_guard lg(mMutex);
+ return setDpChannels_l<dp_fx::DPEq>(channels, mEngineArchitecture.postEqStage.inUse,
+ StageType::POSTEQ);
+}
+
+RetCode DynamicsProcessingContext::setMbc(
+ const std::vector<DynamicsProcessing::ChannelConfig>& channels) {
+ std::lock_guard lg(mMutex);
+ return setDpChannels_l<dp_fx::DPMbc>(channels, mEngineArchitecture.mbcStage.inUse,
+ StageType::MBC);
+}
+
+RetCode DynamicsProcessingContext::setPreEqBand(
+ const std::vector<DynamicsProcessing::EqBandConfig>& bands) {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(!mEngineArchitecture.postEqStage.inUse, RetCode::ERROR_ILLEGAL_PARAMETER,
+ "postEqNotInUse");
+ return setBands_l<DynamicsProcessing::EqBandConfig>(
+ bands, mEngineArchitecture.preEqStage.bandCount, StageType::PREEQ);
+}
+
+RetCode DynamicsProcessingContext::setPostEqBand(
+ const std::vector<DynamicsProcessing::EqBandConfig>& bands) {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(!mEngineArchitecture.postEqStage.inUse, RetCode::ERROR_ILLEGAL_PARAMETER,
+ "postEqNotInUse");
+ return setBands_l<DynamicsProcessing::EqBandConfig>(
+ bands, mEngineArchitecture.postEqStage.bandCount, StageType::POSTEQ);
+}
+
+RetCode DynamicsProcessingContext::setMbcBand(
+ const std::vector<DynamicsProcessing::MbcBandConfig>& bands) {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(!mEngineArchitecture.mbcStage.inUse, RetCode::ERROR_ILLEGAL_PARAMETER,
+ "mbcNotInUse");
+ return setBands_l<DynamicsProcessing::MbcBandConfig>(
+ bands, mEngineArchitecture.preEqStage.bandCount, StageType::MBC);
+}
+
+RetCode DynamicsProcessingContext::setLimiter(
+ const std::vector<DynamicsProcessing::LimiterConfig>& limiters) {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(!mEngineArchitecture.limiterInUse, RetCode::ERROR_ILLEGAL_PARAMETER,
+ "limiterNotInUse");
+ return setBands_l<DynamicsProcessing::LimiterConfig>(limiters, -1, StageType::LIMITER);
+}
+
+RetCode DynamicsProcessingContext::setInputGain(
+ const std::vector<DynamicsProcessing::InputGain>& inputGains) {
+ std::lock_guard lg(mMutex);
+ return setBands_l<DynamicsProcessing::InputGain>(inputGains, -1, StageType::INPUTGAIN);
+}
+
+DynamicsProcessing::EngineArchitecture DynamicsProcessingContext::getEngineArchitecture() {
+ std::lock_guard lg(mMutex);
+ LOG(INFO) << __func__ << mEngineArchitecture.toString();
+ return mEngineArchitecture;
+}
+
+std::vector<DynamicsProcessing::ChannelConfig> DynamicsProcessingContext::getPreEq() {
+ return getChannelConfig(StageType::PREEQ);
+}
+
+std::vector<DynamicsProcessing::ChannelConfig> DynamicsProcessingContext::getPostEq() {
+ return getChannelConfig(StageType::POSTEQ);
+}
+
+std::vector<DynamicsProcessing::EqBandConfig> DynamicsProcessingContext::getPreEqBand() {
+ return getEqBandConfigs(StageType::PREEQ);
+}
+
+std::vector<DynamicsProcessing::EqBandConfig> DynamicsProcessingContext::getPostEqBand() {
+ return getEqBandConfigs(StageType::POSTEQ);
+}
+
+std::vector<DynamicsProcessing::ChannelConfig> DynamicsProcessingContext::getMbc() {
+ return getChannelConfig(StageType::MBC);
+}
+
+std::vector<DynamicsProcessing::MbcBandConfig> DynamicsProcessingContext::getMbcBand() {
+ std::vector<DynamicsProcessing::MbcBandConfig> bands;
+
+ std::lock_guard lg(mMutex);
+ auto maxBand = mEngineArchitecture.mbcStage.bandCount;
+ for (int32_t ch = 0; ch < mChannelCount; ch++) {
+ auto mbc = getMbc_l(ch);
+ if (!mbc) {
+ continue;
+ }
+ for (int32_t bandId = 0; bandId < maxBand; bandId++) {
+ auto band = mbc->getBand(bandId);
+ if (!band) {
+ continue;
+ }
+ bands.push_back({.channel = ch,
+ .band = bandId,
+ .enable = band->isEnabled(),
+ .cutoffFrequencyHz = band->getCutoffFrequency(),
+ .attackTimeMs = band->getAttackTime(),
+ .releaseTimeMs = band->getReleaseTime(),
+ .ratio = band->getRatio(),
+ .thresholdDb = band->getThreshold(),
+ .kneeWidthDb = band->getKneeWidth(),
+ .noiseGateThresholdDb = band->getNoiseGateThreshold(),
+ .expanderRatio = band->getExpanderRatio(),
+ .preGainDb = band->getPreGain(),
+ .postGainDb = band->getPostGain()});
+ }
+ }
+ return bands;
+}
+
+std::vector<DynamicsProcessing::LimiterConfig> DynamicsProcessingContext::getLimiter() {
+ std::vector<DynamicsProcessing::LimiterConfig> ret;
+
+ std::lock_guard lg(mMutex);
+ for (int32_t ch = 0; ch < mChannelCount; ch++) {
+ auto limiter = getLimiter_l(ch);
+ if (!limiter) {
+ continue;
+ }
+ ret.push_back({.channel = ch,
+ .enable = limiter->isEnabled(),
+ .linkGroup = static_cast<int32_t>(limiter->getLinkGroup()),
+ .attackTimeMs = limiter->getAttackTime(),
+ .releaseTimeMs = limiter->getReleaseTime(),
+ .ratio = limiter->getRatio(),
+ .thresholdDb = limiter->getThreshold(),
+ .postGainDb = limiter->getPostGain()});
+ }
+ return ret;
+}
+
+std::vector<DynamicsProcessing::InputGain> DynamicsProcessingContext::getInputGain() {
+ std::vector<DynamicsProcessing::InputGain> ret;
+
+ std::lock_guard lg(mMutex);
+ for (int32_t ch = 0; ch < mChannelCount; ch++) {
+ auto channel = getChannel_l(ch);
+ if (!channel) {
+ continue;
+ }
+ ret.push_back({.channel = ch, .gainDb = channel->getInputGain()});
+ }
+ return ret;
+}
+
+IEffect::Status DynamicsProcessingContext::lvmProcess(float* in, float* out, int samples) {
+ LOG(DEBUG) << __func__ << " in " << in << " out " << out << " sample " << samples;
+
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!in, status, "nullInput");
+ RETURN_VALUE_IF(!out, status, "nullOutput");
+ status = {EX_ILLEGAL_STATE, 0, 0};
+
+ LOG(DEBUG) << __func__ << " start processing";
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(mState != DynamicsProcessingState::DYNAMICS_PROCESSING_STATE_ACTIVE, status,
+ "notInActiveState");
+ RETURN_VALUE_IF(!mDpFreq, status, "engineNotInited");
+ mDpFreq->processSamples(in, out, samples);
+ }
+ return {STATUS_OK, samples, samples};
+}
+
+void DynamicsProcessingContext::init() {
+ std::lock_guard lg(mMutex);
+ mState = DYNAMICS_PROCESSING_STATE_INITIALIZED;
+ mChannelCount =
+ ::android::hardware::audio::common::getChannelCount(mCommon.input.base.channelMask);
+}
+
+dp_fx::DPChannel* DynamicsProcessingContext::getChannel_l(int channel) {
+ RETURN_VALUE_IF(mDpFreq == nullptr, nullptr, "DPFreqNotInited");
+
+ return mDpFreq->getChannel(channel);
+}
+
+dp_fx::DPEq* DynamicsProcessingContext::getPreEq_l(int ch) {
+ auto channel = getChannel_l(ch);
+ RETURN_VALUE_IF(channel == nullptr, nullptr, "ChannelNotExist");
+
+ return channel->getPreEq();
+}
+
+dp_fx::DPEq* DynamicsProcessingContext::getPostEq_l(int ch) {
+ auto channel = getChannel_l(ch);
+ RETURN_VALUE_IF(channel == nullptr, nullptr, "ChannelNotExist");
+
+ return channel->getPostEq();
+}
+
+dp_fx::DPMbc* DynamicsProcessingContext::getMbc_l(int ch) {
+ auto channel = getChannel_l(ch);
+ RETURN_VALUE_IF(channel == nullptr, nullptr, "ChannelNotExist");
+
+ return channel->getMbc();
+}
+
+dp_fx::DPLimiter* DynamicsProcessingContext::getLimiter_l(int ch) {
+ auto channel = getChannel_l(ch);
+ RETURN_VALUE_IF(channel == nullptr, nullptr, "ChannelNotExist");
+
+ return channel->getLimiter();
+}
+
+dp_fx::DPBandStage* DynamicsProcessingContext::getStageWithType_l(
+ DynamicsProcessingContext::StageType type, int ch) {
+ switch (type) {
+ case StageType::PREEQ: {
+ return getEqWithType_l(type, ch);
+ }
+ case StageType::POSTEQ: {
+ return getEqWithType_l(type, ch);
+ }
+ case StageType::MBC: {
+ return getMbc_l(ch);
+ }
+ case StageType::LIMITER:
+ FALLTHROUGH_INTENDED;
+ case StageType::INPUTGAIN: {
+ return nullptr;
+ }
+ }
+}
+
+dp_fx::DPEq* DynamicsProcessingContext::getEqWithType_l(DynamicsProcessingContext::StageType type,
+ int ch) {
+ switch (type) {
+ case StageType::PREEQ: {
+ return getPreEq_l(ch);
+ }
+ case StageType::POSTEQ: {
+ return getPostEq_l(ch);
+ }
+ case StageType::MBC:
+ FALLTHROUGH_INTENDED;
+ case StageType::LIMITER:
+ FALLTHROUGH_INTENDED;
+ case StageType::INPUTGAIN: {
+ return nullptr;
+ }
+ }
+}
+
+std::vector<DynamicsProcessing::ChannelConfig> DynamicsProcessingContext::getChannelConfig(
+ StageType type) {
+ std::vector<DynamicsProcessing::ChannelConfig> ret;
+
+ std::lock_guard lg(mMutex);
+ for (int32_t ch = 0; ch < mChannelCount; ch++) {
+ auto stage = getStageWithType_l(type, ch);
+ if (!stage) {
+ continue;
+ }
+ ret.push_back({.channel = ch, .enable = stage->isEnabled()});
+ }
+ return ret;
+}
+
+std::vector<DynamicsProcessing::EqBandConfig> DynamicsProcessingContext::getEqBandConfigs(
+ StageType type) {
+ std::vector<DynamicsProcessing::EqBandConfig> eqBands;
+
+ std::lock_guard lg(mMutex);
+ auto maxBand = mEngineArchitecture.preEqStage.bandCount;
+ for (int32_t ch = 0; ch < mChannelCount; ch++) {
+ auto eq = getEqWithType_l(type, ch);
+ if (!eq) {
+ continue;
+ }
+ for (int32_t bandId = 0; bandId < maxBand; bandId++) {
+ auto band = eq->getBand(bandId);
+ if (!band) {
+ continue;
+ }
+ eqBands.push_back({.channel = ch,
+ .band = bandId,
+ .enable = band->isEnabled(),
+ .cutoffFrequencyHz = band->getCutoffFrequency(),
+ .gainDb = band->getGain()});
+ }
+ }
+ return eqBands;
+}
+
+/**
+ * When StageEnablement is in use, bandCount needs to be positive.
+ */
+bool DynamicsProcessingContext::validateStageEnablement(
+ const DynamicsProcessing::StageEnablement& enablement) {
+ return !enablement.inUse || (enablement.inUse && enablement.bandCount > 0);
+}
+
+bool DynamicsProcessingContext::validateEngineConfig(
+ const DynamicsProcessing::EngineArchitecture& engine) {
+ return engine.preferredProcessingDurationMs >= 0 &&
+ validateStageEnablement(engine.preEqStage) &&
+ validateStageEnablement(engine.postEqStage) && validateStageEnablement(engine.mbcStage);
+}
+
+inline bool DynamicsProcessingContext::validateCutoffFrequency(float freq) {
+ return freq >= DynamicsProcessingImpl::kCapability.minCutOffFreq &&
+ freq <= DynamicsProcessingImpl::kCapability.maxCutOffFreq;
+}
+
+bool DynamicsProcessingContext::validateEqBandConfig(const DynamicsProcessing::EqBandConfig& band,
+ int maxChannel, int maxBand) {
+ return validateChannel(band.channel, maxChannel) && validateBand(band.band, maxBand) &&
+ validateCutoffFrequency(band.cutoffFrequencyHz);
+}
+
+bool DynamicsProcessingContext::validateMbcBandConfig(const DynamicsProcessing::MbcBandConfig& band,
+ int maxChannel, int maxBand) {
+ return validateChannel(band.channel, maxChannel) && validateBand(band.band, maxBand) &&
+ validateCutoffFrequency(band.cutoffFrequencyHz) && validateTime(band.attackTimeMs) &&
+ validateTime(band.releaseTimeMs) && validateRatio(band.ratio) &&
+ validateBandDb(band.thresholdDb) && validateBandDb(band.kneeWidthDb) &&
+ validateBandDb(band.noiseGateThresholdDb) && validateRatio(band.expanderRatio);
+}
+
+bool DynamicsProcessingContext::validateLimiterConfig(
+ const DynamicsProcessing::LimiterConfig& limiter, int maxChannel) {
+ return validateChannel(limiter.channel, maxChannel) && validateTime(limiter.attackTimeMs) &&
+ validateTime(limiter.releaseTimeMs) && validateRatio(limiter.ratio) &&
+ validateBandDb(limiter.thresholdDb);
+}
+
+bool DynamicsProcessingContext::validateInputGainConfig(const DynamicsProcessing::InputGain& gain,
+ int maxChannel) {
+ return validateChannel(gain.channel, maxChannel);
+}
+
+template <typename D>
+RetCode DynamicsProcessingContext::setDpChannels_l(
+ const std::vector<DynamicsProcessing::ChannelConfig>& channels, bool stageInUse,
+ StageType type) {
+ RetCode ret = RetCode::SUCCESS;
+ std::unordered_set<int> channelSet;
+
+ RETURN_VALUE_IF(!stageInUse, RetCode::ERROR_ILLEGAL_PARAMETER, "stageNotInUse");
+ for (auto& it : channels) {
+ if (0 != channelSet.count(it.channel)) {
+ LOG(WARNING) << __func__ << " duplicated channel " << it.channel;
+ ret = RetCode::ERROR_ILLEGAL_PARAMETER;
+ } else {
+ channelSet.insert(it.channel);
+ }
+ if (it.channel < 0 || it.channel >= mChannelCount) {
+ LOG(WARNING) << __func__ << " skip illegal ChannelConfig " << it.toString() << " max "
+ << mChannelCount;
+ ret = RetCode::ERROR_ILLEGAL_PARAMETER;
+ continue;
+ }
+ auto dp = getStageWithType_l(type, it.channel);
+ if (!dp) {
+ LOG(WARNING) << __func__ << " channel " << it.channel << " not exist";
+ ret = RetCode::ERROR_ILLEGAL_PARAMETER;
+ continue;
+ }
+ if (dp->isEnabled() != it.enable) {
+ LOG(INFO) << __func__ << it.toString();
+ dp->setEnabled(it.enable);
+ }
+ }
+ return ret;
+}
+
+RetCode DynamicsProcessingContext::setDpChannelBand_l(const std::any& anyConfig, StageType type,
+ int maxCh, int maxBand,
+ std::set<std::pair<int, int>>& chBandSet) {
+ RETURN_VALUE_IF(!anyConfig.has_value(), RetCode::ERROR_ILLEGAL_PARAMETER, "bandInvalid");
+ RetCode ret = RetCode::SUCCESS;
+ std::pair<int, int> chBandKey;
+ switch (type) {
+ case StageType::PREEQ:
+ FALLTHROUGH_INTENDED;
+ case StageType::POSTEQ: {
+ dp_fx::DPEq* dp;
+ const auto& config = std::any_cast<DynamicsProcessing::EqBandConfig>(anyConfig);
+ RETURN_VALUE_IF(!validateEqBandConfig(config, maxCh, maxBand),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "eqBandNotValid");
+ RETURN_VALUE_IF(
+ nullptr == (dp = getEqWithType_l(type, config.channel)) || !dp->isEnabled(),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "dpEqNotExist");
+ dp_fx::DPEqBand band;
+ band.init(config.enable, config.cutoffFrequencyHz, config.gainDb);
+ dp->setBand(config.band, band);
+ chBandKey = {config.channel, config.band};
+ break;
+ }
+ case StageType::MBC: {
+ dp_fx::DPMbc* dp;
+ const auto& config = std::any_cast<DynamicsProcessing::MbcBandConfig>(anyConfig);
+ RETURN_VALUE_IF(!validateMbcBandConfig(config, maxCh, maxBand),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "mbcBandNotValid");
+ RETURN_VALUE_IF(nullptr == (dp = getMbc_l(config.channel)) || !dp->isEnabled(),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "dpMbcNotExist");
+ dp_fx::DPMbcBand band;
+ band.init(config.enable, config.cutoffFrequencyHz, config.attackTimeMs,
+ config.releaseTimeMs, config.ratio, config.thresholdDb, config.kneeWidthDb,
+ config.noiseGateThresholdDb, config.expanderRatio, config.preGainDb,
+ config.postGainDb);
+ dp->setBand(config.band, band);
+ chBandKey = {config.channel, config.band};
+ break;
+ }
+ case StageType::LIMITER: {
+ dp_fx::DPChannel* dp;
+ const auto& config = std::any_cast<DynamicsProcessing::LimiterConfig>(anyConfig);
+ RETURN_VALUE_IF(!validateLimiterConfig(config, maxCh),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "limiterBandNotValid");
+ RETURN_VALUE_IF(nullptr == (dp = getChannel_l(config.channel)),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "dpChNotExist");
+ dp_fx::DPLimiter limiter;
+ limiter.init(mEngineArchitecture.limiterInUse, config.enable, config.linkGroup,
+ config.attackTimeMs, config.releaseTimeMs, config.ratio,
+ config.thresholdDb, config.postGainDb);
+ dp->setLimiter(limiter);
+ chBandKey = {config.channel, 0};
+ break;
+ }
+ case StageType::INPUTGAIN: {
+ dp_fx::DPChannel* dp;
+ const auto& config = std::any_cast<DynamicsProcessing::InputGain>(anyConfig);
+ RETURN_VALUE_IF(!validateInputGainConfig(config, maxCh),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "inputGainNotValid");
+ RETURN_VALUE_IF(nullptr == (dp = getChannel_l(config.channel)),
+ RetCode::ERROR_ILLEGAL_PARAMETER, "dpChNotExist");
+ dp->setInputGain(config.gainDb);
+ chBandKey = {config.channel, 0};
+ break;
+ }
+ }
+ RETURN_VALUE_IF(0 != chBandSet.count(chBandKey), RetCode::ERROR_ILLEGAL_PARAMETER,
+ "duplicatedBand");
+ chBandSet.insert(chBandKey);
+ return ret;
+}
+
+template <typename T /* BandConfig */>
+RetCode DynamicsProcessingContext::setBands_l(
+ const std::vector<T>& bands, int maxBand, StageType type) {
+ RetCode ret = RetCode::SUCCESS;
+ std::set<std::pair<int /* channel */, int /* band */>> bandSet;
+
+ for (const auto& it : bands) {
+ if (RetCode::SUCCESS !=
+ setDpChannelBand_l(std::make_any<T>(it), type, mChannelCount, maxBand, bandSet)) {
+ LOG(WARNING) << __func__ << " skipping band " << it.toString();
+ ret = RetCode::ERROR_ILLEGAL_PARAMETER;
+ continue;
+ }
+ LOG(INFO) << __func__ << it.toString();
+ }
+ return ret;
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.h b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.h
new file mode 100644
index 0000000..8be784e
--- /dev/null
+++ b/media/libeffects/dynamicsproc/aidl/DynamicsProcessingContext.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/thread_annotations.h>
+#include <audio_effects/effect_dynamicsprocessing.h>
+
+#include "effect-impl/EffectContext.h"
+
+#include <any>
+#include <cstddef>
+#include <dsp/DPBase.h>
+#include <dsp/DPFrequency.h>
+
+namespace aidl::android::hardware::audio::effect {
+
+enum DynamicsProcessingState {
+ DYNAMICS_PROCESSING_STATE_UNINITIALIZED,
+ DYNAMICS_PROCESSING_STATE_INITIALIZED,
+ DYNAMICS_PROCESSING_STATE_ACTIVE,
+};
+
+class DynamicsProcessingContext final : public EffectContext {
+ public:
+ DynamicsProcessingContext(int statusDepth, const Parameter::Common& common);
+ ~DynamicsProcessingContext();
+
+ RetCode enable();
+ RetCode disable();
+ void reset();
+
+ // override EffectContext::setCommon to update mChannelCount
+ RetCode setCommon(const Parameter::Common& common) override;
+
+ RetCode setEngineArchitecture(const DynamicsProcessing::EngineArchitecture& engineArchitecture);
+ RetCode setPreEq(const std::vector<DynamicsProcessing::ChannelConfig>& eqChannels);
+ RetCode setPostEq(const std::vector<DynamicsProcessing::ChannelConfig>& eqChannels);
+ RetCode setPreEqBand(const std::vector<DynamicsProcessing::EqBandConfig>& eqBands);
+ RetCode setPostEqBand(const std::vector<DynamicsProcessing::EqBandConfig>& eqBands);
+ RetCode setMbc(const std::vector<DynamicsProcessing::ChannelConfig>& mbcChannels);
+ RetCode setMbcBand(const std::vector<DynamicsProcessing::MbcBandConfig>& eqBands);
+ RetCode setLimiter(const std::vector<DynamicsProcessing::LimiterConfig>& limiters);
+ RetCode setInputGain(const std::vector<DynamicsProcessing::InputGain>& gain);
+
+ DynamicsProcessing::EngineArchitecture getEngineArchitecture();
+ std::vector<DynamicsProcessing::ChannelConfig> getPreEq();
+ std::vector<DynamicsProcessing::ChannelConfig> getPostEq();
+ std::vector<DynamicsProcessing::EqBandConfig> getPreEqBand();
+ std::vector<DynamicsProcessing::EqBandConfig> getPostEqBand();
+ std::vector<DynamicsProcessing::ChannelConfig> getMbc();
+ std::vector<DynamicsProcessing::MbcBandConfig> getMbcBand();
+ std::vector<DynamicsProcessing::LimiterConfig> getLimiter();
+ std::vector<DynamicsProcessing::InputGain> getInputGain();
+
+ IEffect::Status lvmProcess(float* in, float* out, int samples);
+
+ private:
+ static constexpr float kPreferredProcessingDurationMs = 10.0f;
+ static constexpr int kBandCount = 5;
+ std::mutex mMutex;
+ size_t mChannelCount GUARDED_BY(mMutex) = 0;
+ DynamicsProcessingState mState GUARDED_BY(mMutex) = DYNAMICS_PROCESSING_STATE_UNINITIALIZED;
+ std::unique_ptr<dp_fx::DPFrequency> mDpFreq GUARDED_BY(mMutex) = nullptr;
+ bool mEngineInited GUARDED_BY(mMutex) = false;
+ DynamicsProcessing::EngineArchitecture mEngineArchitecture GUARDED_BY(mMutex) = {
+ .resolutionPreference =
+ DynamicsProcessing::ResolutionPreference::FAVOR_FREQUENCY_RESOLUTION,
+ .preferredProcessingDurationMs = kPreferredProcessingDurationMs,
+ .preEqStage = {.inUse = true, .bandCount = kBandCount},
+ .postEqStage = {.inUse = true, .bandCount = kBandCount},
+ .mbcStage = {.inUse = true, .bandCount = kBandCount},
+ .limiterInUse = true,
+ };
+
+ enum class StageType { PREEQ, POSTEQ, MBC, LIMITER, INPUTGAIN };
+
+ void init();
+
+ void dpSetFreqDomainVariant_l(const DynamicsProcessing::EngineArchitecture& engine)
+ REQUIRES(mMutex);
+ dp_fx::DPChannel* getChannel_l(int ch) REQUIRES(mMutex);
+ dp_fx::DPEq* getPreEq_l(int ch) REQUIRES(mMutex);
+ dp_fx::DPEq* getPostEq_l(int ch) REQUIRES(mMutex);
+ dp_fx::DPMbc* getMbc_l(int ch) REQUIRES(mMutex);
+ dp_fx::DPLimiter* getLimiter_l(int ch) REQUIRES(mMutex);
+ dp_fx::DPBandStage* getStageWithType_l(StageType type, int ch) REQUIRES(mMutex);
+ dp_fx::DPEq* getEqWithType_l(StageType type, int ch) REQUIRES(mMutex);
+ template <typename D>
+ RetCode setDpChannels_l(const std::vector<DynamicsProcessing::ChannelConfig>& channels,
+ bool stageInUse, StageType type) REQUIRES(mMutex);
+ template <typename T /* BandConfig */>
+ RetCode setBands_l(const std::vector<T>& bands, int maxBand, StageType type) REQUIRES(mMutex);
+ RetCode setDpChannelBand_l(const std::any& anyConfig, StageType type, int maxCh, int maxBand,
+ std::set<std::pair<int, int>>& chBandSet) REQUIRES(mMutex);
+
+ std::vector<DynamicsProcessing::EqBandConfig> getEqBandConfigs(StageType type);
+ std::vector<DynamicsProcessing::ChannelConfig> getChannelConfig(StageType type);
+
+ bool validateStageEnablement(const DynamicsProcessing::StageEnablement& enablement);
+ bool validateEngineConfig(const DynamicsProcessing::EngineArchitecture& engine);
+ bool validateEqBandConfig(const DynamicsProcessing::EqBandConfig& band, int maxChannel,
+ int maxBand);
+ bool validateMbcBandConfig(const DynamicsProcessing::MbcBandConfig& band, int maxChannel,
+ int maxBand);
+ bool validateLimiterConfig(const DynamicsProcessing::LimiterConfig& limiter, int maxChannel);
+ bool validateInputGainConfig(const DynamicsProcessing::InputGain& gain, int maxChannel);
+
+ inline bool validateCutoffFrequency(float freq);
+ inline bool validateChannel(int ch, int maxCh) { return ch >= 0 && ch < maxCh; }
+ inline bool validateBand(int band, int maxBand) { return band >= 0 && band < maxBand; }
+ inline bool validateTime(int time) { return time >= 0; }
+ inline bool validateRatio(int ratio) { return ratio >= 0; }
+ inline bool validateBandDb(int db) { return db <= 0; }
+};
+
+} // namespace aidl::android::hardware::audio::effect
\ No newline at end of file
diff --git a/media/libeffects/hapticgenerator/Android.bp b/media/libeffects/hapticgenerator/Android.bp
index 07ba492..fc80211 100644
--- a/media/libeffects/hapticgenerator/Android.bp
+++ b/media/libeffects/hapticgenerator/Android.bp
@@ -37,6 +37,7 @@
header_libs: [
"libaudioeffects",
],
+ relative_install_path: "soundfx",
}
cc_library_shared {
@@ -62,8 +63,6 @@
// with/without `-ffast-math` for more context.
"-fvisibility=hidden",
],
-
- relative_install_path: "soundfx",
}
cc_library_shared {
diff --git a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
index 64f51c3..8ed579b 100644
--- a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
+++ b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.cpp
@@ -67,7 +67,7 @@
}
RetCode HapticGeneratorContext::setHgHapticScales(
- const std::vector<HapticGenerator::HapticScale> hapticScales) {
+ const std::vector<HapticGenerator::HapticScale>& hapticScales) {
std::lock_guard lg(mMutex);
for (auto hapticScale : hapticScales) {
mParams.mHapticScales.insert_or_assign(hapticScale.id, hapticScale.scale);
diff --git a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h
index dc43feb..a0a0a4c 100644
--- a/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h
+++ b/media/libeffects/hapticgenerator/aidl/HapticGeneratorContext.h
@@ -69,7 +69,7 @@
RetCode disable();
void reset();
- RetCode setHgHapticScales(const std::vector<HapticGenerator::HapticScale> hapticScales);
+ RetCode setHgHapticScales(const std::vector<HapticGenerator::HapticScale>& hapticScales);
std::vector<HapticGenerator::HapticScale> getHgHapticScales();
RetCode setHgVibratorInformation(const HapticGenerator::VibratorInformation& vibratorInfo);
diff --git a/media/libeffects/loudness/Android.bp b/media/libeffects/loudness/Android.bp
index fc0217b..7acba11 100644
--- a/media/libeffects/loudness/Android.bp
+++ b/media/libeffects/loudness/Android.bp
@@ -69,6 +69,7 @@
"libcutils",
"liblog",
],
+ relative_install_path: "soundfx",
visibility: [
"//hardware/interfaces/audio/aidl/default",
],
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
index 3aee721..e303efd 100644
--- a/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleContext.cpp
@@ -24,8 +24,9 @@
namespace aidl::android::hardware::audio::effect {
-using aidl::android::media::audio::common::AudioDeviceDescription;
-using aidl::android::media::audio::common::AudioDeviceType;
+using ::aidl::android::media::audio::common::AudioChannelLayout;
+using ::aidl::android::media::audio::common::AudioDeviceDescription;
+using ::aidl::android::media::audio::common::AudioDeviceType;
RetCode BundleContext::init() {
std::lock_guard lg(mMutex);
@@ -287,32 +288,47 @@
}
bool BundleContext::isDeviceSupportedBassBoost(
- const aidl::android::media::audio::common::AudioDeviceDescription& device) {
- return (device == AudioDeviceDescription{AudioDeviceType::OUT_SPEAKER, ""} ||
- device == AudioDeviceDescription{AudioDeviceType::OUT_CARKIT,
- AudioDeviceDescription::CONNECTION_BT_SCO} ||
- device == AudioDeviceDescription{AudioDeviceType::OUT_SPEAKER,
- AudioDeviceDescription::CONNECTION_BT_A2DP});
+ const std::vector<aidl::android::media::audio::common::AudioDeviceDescription>& devices) {
+ for (const auto& device : devices) {
+ if (device != AudioDeviceDescription{AudioDeviceType::OUT_SPEAKER, ""} &&
+ device != AudioDeviceDescription{AudioDeviceType::OUT_CARKIT,
+ AudioDeviceDescription::CONNECTION_BT_SCO} &&
+ device != AudioDeviceDescription{AudioDeviceType::OUT_SPEAKER,
+ AudioDeviceDescription::CONNECTION_BT_A2DP}) {
+ return false;
+ }
+ }
+ return true;
}
bool BundleContext::isDeviceSupportedVirtualizer(
- const aidl::android::media::audio::common::AudioDeviceDescription& device) {
- return (device == AudioDeviceDescription{AudioDeviceType::OUT_HEADSET,
- AudioDeviceDescription::CONNECTION_ANALOG} ||
- device == AudioDeviceDescription{AudioDeviceType::OUT_HEADPHONE,
- AudioDeviceDescription::CONNECTION_ANALOG} ||
- device == AudioDeviceDescription{AudioDeviceType::OUT_HEADPHONE,
- AudioDeviceDescription::CONNECTION_BT_A2DP} ||
- device == AudioDeviceDescription{AudioDeviceType::OUT_HEADSET,
- AudioDeviceDescription::CONNECTION_USB});
+ const std::vector<aidl::android::media::audio::common::AudioDeviceDescription>& devices) {
+ for (const auto& device : devices) {
+ if (device != AudioDeviceDescription{AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_ANALOG} &&
+ device != AudioDeviceDescription{AudioDeviceType::OUT_HEADPHONE,
+ AudioDeviceDescription::CONNECTION_ANALOG} &&
+ device != AudioDeviceDescription{AudioDeviceType::OUT_HEADPHONE,
+ AudioDeviceDescription::CONNECTION_BT_A2DP} &&
+ device != AudioDeviceDescription{AudioDeviceType::OUT_HEADSET,
+ AudioDeviceDescription::CONNECTION_USB}) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool BundleContext::isConfigSupportedVirtualizer(size_t channelCount,
+ const AudioDeviceDescription& device) {
+ return (channelCount >= 1 && channelCount <= FCC_2) && isDeviceSupportedVirtualizer({device});
}
RetCode BundleContext::setOutputDevice(
- const aidl::android::media::audio::common::AudioDeviceDescription& device) {
- mOutputDevice = device;
+ const std::vector<aidl::android::media::audio::common::AudioDeviceDescription>& devices) {
+ mOutputDevice = devices;
switch (mType) {
case lvm::BundleEffectType::BASS_BOOST:
- if (isDeviceSupportedBassBoost(device)) {
+ if (!isDeviceSupportedBassBoost(devices)) {
// If a device doesn't support bass boost, the effect must be temporarily disabled.
// The effect must still report its original state as this can only be changed by
// the start/stop commands.
@@ -330,7 +346,7 @@
}
break;
case lvm::BundleEffectType::VIRTUALIZER:
- if (isDeviceSupportedVirtualizer(device)) {
+ if (!isDeviceSupportedVirtualizer(devices)) {
if (mEnabled) {
disableOperatingMode();
}
@@ -459,6 +475,23 @@
return bandLevels;
}
+std::vector<int32_t> BundleContext::getEqualizerCenterFreqs() {
+ std::vector<int32_t> freqs;
+
+ LVM_ControlParams_t params;
+ {
+ std::lock_guard lg(mMutex);
+ /* Get the current settings */
+ RETURN_VALUE_IF(LVM_SUCCESS != LVM_GetControlParameters(mInstance, ¶ms), freqs,
+ " getControlParamFailed");
+ for (std::size_t i = 0; i < lvm::MAX_NUM_BANDS; i++) {
+ freqs.push_back((int32_t)params.pEQNB_BandDefinition[i].Frequency * 1000);
+ }
+ }
+
+ return freqs;
+}
+
bool BundleContext::isBandLevelIndexInRange(
const std::vector<Equalizer::BandLevel>& bandLevels) const {
const auto [min, max] =
@@ -573,6 +606,15 @@
return limitLevel();
}
+
+RetCode BundleContext::setForcedDevice(
+ const ::aidl::android::media::audio::common::AudioDeviceDescription& device) {
+ RETURN_VALUE_IF(true != isDeviceSupportedVirtualizer({device}), RetCode::ERROR_EFFECT_LIB_ERROR,
+ " deviceNotSupportVirtualizer");
+ mForceDevice = device;
+ return RetCode::SUCCESS;
+}
+
void BundleContext::initControlParameter(LVM_ControlParams_t& params) const {
/* General parameters */
params.OperatingMode = LVM_MODE_ON;
@@ -658,6 +700,28 @@
return HeadroomBandDef;
}
+std::vector<Virtualizer::ChannelAngle> BundleContext::getSpeakerAngles(
+ const Virtualizer::SpeakerAnglesPayload payload) {
+ std::vector<Virtualizer::ChannelAngle> angles;
+ auto chCount = ::android::hardware::audio::common::getChannelCount(payload.layout);
+ RETURN_VALUE_IF(!isConfigSupportedVirtualizer(chCount, payload.device), angles,
+ "payloadNotSupported");
+
+ if (chCount == 1) {
+ angles = {{.channel = (int32_t)AudioChannelLayout::CHANNEL_FRONT_LEFT,
+ .azimuthDegree = 0,
+ .elevationDegree = 0}};
+ } else {
+ angles = {{.channel = (int32_t)AudioChannelLayout::CHANNEL_FRONT_LEFT,
+ .azimuthDegree = -90,
+ .elevationDegree = 0},
+ {.channel = (int32_t)AudioChannelLayout::CHANNEL_FRONT_RIGHT,
+ .azimuthDegree = 90,
+ .elevationDegree = 0}};
+ }
+ return angles;
+}
+
IEffect::Status BundleContext::lvmProcess(float* in, float* out, int samples) {
IEffect::Status status = {EX_NULL_POINTER, 0, 0};
RETURN_VALUE_IF(!in, status, "nullInput");
diff --git a/media/libeffects/lvm/wrapper/Aidl/BundleContext.h b/media/libeffects/lvm/wrapper/Aidl/BundleContext.h
index be723f7..47d5e5a 100644
--- a/media/libeffects/lvm/wrapper/Aidl/BundleContext.h
+++ b/media/libeffects/lvm/wrapper/Aidl/BundleContext.h
@@ -57,17 +57,26 @@
return mChMask;
}
bool isDeviceSupportedBassBoost(
- const aidl::android::media::audio::common::AudioDeviceDescription& device);
+ const std::vector<aidl::android::media::audio::common::AudioDeviceDescription>&
+ devices);
bool isDeviceSupportedVirtualizer(
+ const std::vector<aidl::android::media::audio::common::AudioDeviceDescription>&
+ devices);
+ bool isConfigSupportedVirtualizer(
+ size_t channelCount,
const aidl::android::media::audio::common::AudioDeviceDescription& device);
+
RetCode setOutputDevice(
- const aidl::android::media::audio::common::AudioDeviceDescription& device) override;
+ const std::vector<aidl::android::media::audio::common::AudioDeviceDescription>& devices)
+ override;
RetCode setEqualizerPreset(const std::size_t presetIdx);
int getEqualizerPreset() const { return mCurPresetIdx; }
RetCode setEqualizerBandLevels(const std::vector<Equalizer::BandLevel>& bandLevels);
std::vector<Equalizer::BandLevel> getEqualizerBandLevels() const;
+ std::vector<int32_t> getEqualizerCenterFreqs();
+
RetCode setBassBoostStrength(int strength);
int getBassBoostStrength() const { return mBassStrengthSaved; }
@@ -80,6 +89,14 @@
RetCode setVirtualizerStrength(int strength);
int getVirtualizerStrength() const { return mVirtStrengthSaved; }
+ RetCode setForcedDevice(
+ const ::aidl::android::media::audio::common::AudioDeviceDescription& device);
+ aidl::android::media::audio::common::AudioDeviceDescription getForcedDevice() const {
+ return mForceDevice;
+ }
+ std::vector<Virtualizer::ChannelAngle> getSpeakerAngles(
+ const Virtualizer::SpeakerAnglesPayload payload);
+
RetCode setVolumeStereo(const Parameter::VolumeStereo& volumeStereo) override;
Parameter::VolumeStereo getVolumeStereo() override { return mVolumeStereo; }
@@ -122,6 +139,7 @@
// Virtualizer
int mVirtStrengthSaved = 0; /* Conversion between Get/Set */
bool mVirtualizerTempDisabled = false;
+ ::aidl::android::media::audio::common::AudioDeviceDescription mForceDevice;
// Volume
int mLevelSaved = 0; /* for when mute is set, level must be saved */
int mVolume = 0;
diff --git a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
index 81b8aca..fd9f3dc 100644
--- a/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
+++ b/media/libeffects/lvm/wrapper/Aidl/EffectBundleAidl.cpp
@@ -218,10 +218,18 @@
EX_ILLEGAL_ARGUMENT, "setStrengthFailed");
return ndk::ScopedAStatus::ok();
}
- default:
- LOG(ERROR) << __func__ << " unsupported parameter " << specific.toString();
+ case Virtualizer::device: {
+ RETURN_IF(mContext->setForcedDevice(vr.get<Virtualizer::device>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setDeviceFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case Virtualizer::speakerAngles:
+ FALLTHROUGH_INTENDED;
+ case Virtualizer::vendor: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(vrTag);
return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
- "vrTagNotSupported");
+ "VirtualizerTagNotSupported");
+ }
}
}
@@ -283,6 +291,10 @@
eqParam.set<Equalizer::preset>(mContext->getEqualizerPreset());
break;
}
+ case Equalizer::centerFreqMh: {
+ eqParam.set<Equalizer::centerFreqMh>(mContext->getEqualizerCenterFreqs());
+ break;
+ }
default: {
LOG(ERROR) << __func__ << " not handled tag: " << toString(tag);
return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
@@ -354,14 +366,27 @@
RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
Virtualizer vrParam;
+ if (id.getTag() == Virtualizer::Id::speakerAnglesPayload) {
+ auto angles = mContext->getSpeakerAngles(id.get<Virtualizer::Id::speakerAnglesPayload>());
+ Virtualizer param = Virtualizer::make<Virtualizer::speakerAngles>(angles);
+ specific->set<Parameter::Specific::virtualizer>(param);
+ return ndk::ScopedAStatus::ok();
+ }
+
auto tag = id.get<Virtualizer::Id::commonTag>();
switch (tag) {
case Virtualizer::strengthPm: {
vrParam.set<Virtualizer::strengthPm>(mContext->getVirtualizerStrength());
break;
}
- default: {
- LOG(ERROR) << __func__ << " not handled tag: " << toString(tag);
+ case Virtualizer::device: {
+ vrParam.set<Virtualizer::device>(mContext->getForcedDevice());
+ break;
+ }
+ case Virtualizer::speakerAngles:
+ FALLTHROUGH_INTENDED;
+ case Virtualizer::vendor: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
"VirtualizerTagNotSupported");
}
diff --git a/media/libeffects/lvm/wrapper/Android.bp b/media/libeffects/lvm/wrapper/Android.bp
index aef9295..fa300d2 100644
--- a/media/libeffects/lvm/wrapper/Android.bp
+++ b/media/libeffects/lvm/wrapper/Android.bp
@@ -125,7 +125,41 @@
cflags: [
"-Wthread-safety",
],
+ relative_install_path: "soundfx",
visibility: [
"//hardware/interfaces/audio/aidl/default",
],
-}
\ No newline at end of file
+}
+
+cc_library_shared {
+ name: "libreverbaidl",
+ srcs: [
+ "Reverb/aidl/ReverbContext.cpp",
+ "Reverb/aidl/EffectReverb.cpp",
+ ":effectCommonFile",
+ ],
+ static_libs: ["libreverb"],
+ defaults: [
+ "aidlaudioservice_defaults",
+ "latest_android_hardware_audio_effect_ndk_shared",
+ "latest_android_media_audio_common_types_ndk_shared",
+ ],
+ local_include_dirs: ["Reverb/aidl"],
+ header_libs: [
+ "libaudioeffects",
+ "libhardware_headers",
+ ],
+ shared_libs: [
+ "libbase",
+ "libaudioutils",
+ "libcutils",
+ "liblog",
+ ],
+ cflags: [
+ "-Wthread-safety",
+ ],
+ relative_install_path: "soundfx",
+ visibility: [
+ "//hardware/interfaces/audio/aidl/default",
+ ],
+}
diff --git a/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.cpp b/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.cpp
new file mode 100644
index 0000000..51825ca
--- /dev/null
+++ b/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.cpp
@@ -0,0 +1,373 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "EffectReverb"
+#include <Utils.h>
+#include <algorithm>
+#include <unordered_set>
+
+#include <android-base/logging.h>
+#include <fmq/AidlMessageQueue.h>
+#include <audio_effects/effect_bassboost.h>
+#include <audio_effects/effect_equalizer.h>
+#include <audio_effects/effect_virtualizer.h>
+
+#include "EffectReverb.h"
+#include <limits.h>
+
+using aidl::android::hardware::audio::effect::Descriptor;
+using aidl::android::hardware::audio::effect::EffectReverb;
+using aidl::android::hardware::audio::effect::IEffect;
+using aidl::android::hardware::audio::effect::kAuxEnvReverbImplUUID;
+using aidl::android::hardware::audio::effect::kAuxPresetReverbImplUUID;
+using aidl::android::hardware::audio::effect::kInsertEnvReverbImplUUID;
+using aidl::android::hardware::audio::effect::kInsertPresetReverbImplUUID;
+using aidl::android::hardware::audio::effect::State;
+using aidl::android::media::audio::common::AudioUuid;
+
+bool isReverbUuidSupported(const AudioUuid* uuid) {
+ return (*uuid == kAuxEnvReverbImplUUID || *uuid == kInsertEnvReverbImplUUID ||
+ *uuid == kAuxPresetReverbImplUUID || *uuid == kInsertPresetReverbImplUUID);
+}
+
+extern "C" binder_exception_t createEffect(const AudioUuid* uuid,
+ std::shared_ptr<IEffect>* instanceSpp) {
+ if (uuid == nullptr || !isReverbUuidSupported(uuid)) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (instanceSpp) {
+ *instanceSpp = ndk::SharedRefBase::make<EffectReverb>(*uuid);
+ LOG(DEBUG) << __func__ << " instance " << instanceSpp->get() << " created";
+ return EX_NONE;
+ } else {
+ LOG(ERROR) << __func__ << " invalid input parameter!";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+}
+
+extern "C" binder_exception_t queryEffect(const AudioUuid* in_impl_uuid, Descriptor* _aidl_return) {
+ if (!in_impl_uuid || !isReverbUuidSupported(in_impl_uuid)) {
+ LOG(ERROR) << __func__ << "uuid not supported";
+ return EX_ILLEGAL_ARGUMENT;
+ }
+ if (*in_impl_uuid == kAuxEnvReverbImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::lvm::kAuxEnvReverbDesc;
+ } else if (*in_impl_uuid == kInsertEnvReverbImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::lvm::kInsertEnvReverbDesc;
+ } else if (*in_impl_uuid == kAuxPresetReverbImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::lvm::kAuxPresetReverbDesc;
+ } else if (*in_impl_uuid == kInsertPresetReverbImplUUID) {
+ *_aidl_return = aidl::android::hardware::audio::effect::lvm::kInsertPresetReverbDesc;
+ }
+ return EX_NONE;
+}
+
+namespace aidl::android::hardware::audio::effect {
+
+EffectReverb::EffectReverb(const AudioUuid& uuid) {
+ LOG(DEBUG) << __func__ << uuid.toString();
+ if (uuid == kAuxEnvReverbImplUUID) {
+ mType = lvm::ReverbEffectType::AUX_ENV;
+ mDescriptor = &lvm::kAuxEnvReverbDesc;
+ mEffectName = &lvm::kAuxEnvReverbEffectName;
+ } else if (uuid == kInsertEnvReverbImplUUID) {
+ mType = lvm::ReverbEffectType::INSERT_ENV;
+ mDescriptor = &lvm::kInsertEnvReverbDesc;
+ mEffectName = &lvm::kInsertEnvReverbEffectName;
+ } else if (uuid == kAuxPresetReverbImplUUID) {
+ mType = lvm::ReverbEffectType::AUX_PRESET;
+ mDescriptor = &lvm::kAuxPresetReverbDesc;
+ mEffectName = &lvm::kAuxPresetReverbEffectName;
+ } else if (uuid == kInsertPresetReverbImplUUID) {
+ mType = lvm::ReverbEffectType::INSERT_PRESET;
+ mDescriptor = &lvm::kInsertPresetReverbDesc;
+ mEffectName = &lvm::kInsertPresetReverbEffectName;
+ } else {
+ LOG(ERROR) << __func__ << uuid.toString() << " not supported!";
+ }
+}
+
+EffectReverb::~EffectReverb() {
+ cleanUp();
+ LOG(DEBUG) << __func__;
+}
+
+ndk::ScopedAStatus EffectReverb::getDescriptor(Descriptor* _aidl_return) {
+ RETURN_IF(!_aidl_return, EX_ILLEGAL_ARGUMENT, "Parameter:nullptr");
+ LOG(DEBUG) << _aidl_return->toString();
+ *_aidl_return = *mDescriptor;
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectReverb::setParameterSpecific(const Parameter::Specific& specific) {
+ LOG(DEBUG) << __func__ << " specific " << specific.toString();
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+
+ auto tag = specific.getTag();
+ switch (tag) {
+ case Parameter::Specific::presetReverb:
+ return setParameterPresetReverb(specific);
+ case Parameter::Specific::environmentalReverb:
+ return setParameterEnvironmentalReverb(specific);
+ default:
+ LOG(ERROR) << __func__ << " unsupported tag " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "specificParamNotSupported");
+ }
+}
+
+ndk::ScopedAStatus EffectReverb::setParameterPresetReverb(const Parameter::Specific& specific) {
+ auto& prParam = specific.get<Parameter::Specific::presetReverb>();
+ auto tag = prParam.getTag();
+
+ switch (tag) {
+ case PresetReverb::preset: {
+ RETURN_IF(mContext->setPresetReverbPreset(prParam.get<PresetReverb::preset>()) !=
+ RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setPresetFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "PresetReverbTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus EffectReverb::setParameterEnvironmentalReverb(
+ const Parameter::Specific& specific) {
+ auto& erParam = specific.get<Parameter::Specific::environmentalReverb>();
+ auto tag = erParam.getTag();
+
+ switch (tag) {
+ case EnvironmentalReverb::roomLevelMb: {
+ RETURN_IF(mContext->setEnvironmentalReverbRoomLevel(
+ erParam.get<EnvironmentalReverb::roomLevelMb>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setRoomLevelFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case EnvironmentalReverb::roomHfLevelMb: {
+ RETURN_IF(
+ mContext->setEnvironmentalReverbRoomHfLevel(
+ erParam.get<EnvironmentalReverb::roomHfLevelMb>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setRoomHfLevelFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case EnvironmentalReverb::decayTimeMs: {
+ RETURN_IF(mContext->setEnvironmentalReverbDecayTime(
+ erParam.get<EnvironmentalReverb::decayTimeMs>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setDecayTimeFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case EnvironmentalReverb::decayHfRatioPm: {
+ RETURN_IF(
+ mContext->setEnvironmentalReverbDecayHfRatio(
+ erParam.get<EnvironmentalReverb::decayHfRatioPm>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setDecayHfRatioFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case EnvironmentalReverb::levelMb: {
+ RETURN_IF(mContext->setEnvironmentalReverbLevel(
+ erParam.get<EnvironmentalReverb::levelMb>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setLevelFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case EnvironmentalReverb::delayMs: {
+ RETURN_IF(mContext->setEnvironmentalReverbDelay(
+ erParam.get<EnvironmentalReverb::delayMs>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setDelayFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case EnvironmentalReverb::diffusionPm: {
+ RETURN_IF(mContext->setEnvironmentalReverbDiffusion(
+ erParam.get<EnvironmentalReverb::diffusionPm>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setDiffusionFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case EnvironmentalReverb::densityPm: {
+ RETURN_IF(mContext->setEnvironmentalReverbDensity(
+ erParam.get<EnvironmentalReverb::densityPm>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setDensityFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ case EnvironmentalReverb::bypass: {
+ RETURN_IF(mContext->setEnvironmentalReverbBypass(
+ erParam.get<EnvironmentalReverb::bypass>()) != RetCode::SUCCESS,
+ EX_ILLEGAL_ARGUMENT, "setBypassFailed");
+ return ndk::ScopedAStatus::ok();
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "EnvironmentalReverbTagNotSupported");
+ }
+ }
+}
+
+ndk::ScopedAStatus EffectReverb::getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(!specific, EX_NULL_POINTER, "nullPtr");
+ auto tag = id.getTag();
+
+ switch (tag) {
+ case Parameter::Id::presetReverbTag:
+ return getParameterPresetReverb(id.get<Parameter::Id::presetReverbTag>(), specific);
+ case Parameter::Id::environmentalReverbTag:
+ return getParameterEnvironmentalReverb(id.get<Parameter::Id::environmentalReverbTag>(),
+ specific);
+ default:
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "wrongIdTag");
+ }
+}
+
+ndk::ScopedAStatus EffectReverb::getParameterPresetReverb(const PresetReverb::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != PresetReverb::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "PresetReverbTagNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ PresetReverb prParam;
+ auto tag = id.get<PresetReverb::Id::commonTag>();
+ switch (tag) {
+ case PresetReverb::preset: {
+ prParam.set<PresetReverb::preset>(mContext->getPresetReverbPreset());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "PresetReverbTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::presetReverb>(prParam);
+ return ndk::ScopedAStatus::ok();
+}
+
+ndk::ScopedAStatus EffectReverb::getParameterEnvironmentalReverb(const EnvironmentalReverb::Id& id,
+ Parameter::Specific* specific) {
+ RETURN_IF(id.getTag() != EnvironmentalReverb::Id::commonTag, EX_ILLEGAL_ARGUMENT,
+ "EnvironmentalReverbTagNotSupported");
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ EnvironmentalReverb erParam;
+
+ auto tag = id.get<EnvironmentalReverb::Id::commonTag>();
+ switch (tag) {
+ case EnvironmentalReverb::roomLevelMb: {
+ erParam.set<EnvironmentalReverb::roomLevelMb>(
+ mContext->getEnvironmentalReverbRoomLevel());
+ break;
+ }
+ case EnvironmentalReverb::roomHfLevelMb: {
+ erParam.set<EnvironmentalReverb::roomHfLevelMb>(
+ mContext->getEnvironmentalReverbRoomHfLevel());
+ break;
+ }
+ case EnvironmentalReverb::decayTimeMs: {
+ erParam.set<EnvironmentalReverb::decayTimeMs>(
+ mContext->getEnvironmentalReverbDecayTime());
+ break;
+ }
+ case EnvironmentalReverb::decayHfRatioPm: {
+ erParam.set<EnvironmentalReverb::decayHfRatioPm>(
+ mContext->getEnvironmentalReverbDecayHfRatio());
+ break;
+ }
+ case EnvironmentalReverb::levelMb: {
+ erParam.set<EnvironmentalReverb::levelMb>(mContext->getEnvironmentalReverbLevel());
+ break;
+ }
+ case EnvironmentalReverb::delayMs: {
+ erParam.set<EnvironmentalReverb::delayMs>(mContext->getEnvironmentalReverbDelay());
+ break;
+ }
+ case EnvironmentalReverb::diffusionPm: {
+ erParam.set<EnvironmentalReverb::diffusionPm>(
+ mContext->getEnvironmentalReverbDiffusion());
+ break;
+ }
+ case EnvironmentalReverb::densityPm: {
+ erParam.set<EnvironmentalReverb::densityPm>(mContext->getEnvironmentalReverbDensity());
+ break;
+ }
+ case EnvironmentalReverb::bypass: {
+ erParam.set<EnvironmentalReverb::bypass>(mContext->getEnvironmentalReverbBypass());
+ break;
+ }
+ default: {
+ LOG(ERROR) << __func__ << " unsupported tag: " << toString(tag);
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(
+ EX_ILLEGAL_ARGUMENT, "EnvironmentalReverbTagNotSupported");
+ }
+ }
+
+ specific->set<Parameter::Specific::environmentalReverb>(erParam);
+ return ndk::ScopedAStatus::ok();
+}
+
+std::shared_ptr<EffectContext> EffectReverb::createContext(const Parameter::Common& common) {
+ if (mContext) {
+ LOG(DEBUG) << __func__ << " context already exist";
+ } else {
+ mContext = std::make_shared<ReverbContext>(1 /* statusFmqDepth */, common, mType);
+ }
+
+ return mContext;
+}
+
+std::shared_ptr<EffectContext> EffectReverb::getContext() {
+ return mContext;
+}
+
+RetCode EffectReverb::releaseContext() {
+ if (mContext) {
+ mContext.reset();
+ }
+ return RetCode::SUCCESS;
+}
+
+ndk::ScopedAStatus EffectReverb::commandImpl(CommandId command) {
+ RETURN_IF(!mContext, EX_NULL_POINTER, "nullContext");
+ switch (command) {
+ case CommandId::START:
+ mContext->enable();
+ break;
+ case CommandId::STOP:
+ mContext->disable();
+ break;
+ case CommandId::RESET:
+ mContext->disable();
+ mContext->resetBuffer();
+ break;
+ default:
+ LOG(ERROR) << __func__ << " commandId " << toString(command) << " not supported";
+ return ndk::ScopedAStatus::fromExceptionCodeWithMessage(EX_ILLEGAL_ARGUMENT,
+ "commandIdNotSupported");
+ }
+ return ndk::ScopedAStatus::ok();
+}
+
+// Processing method running in EffectWorker thread.
+IEffect::Status EffectReverb::effectProcessImpl(float* in, float* out, int sampleToProcess) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!mContext, status, "nullContext");
+ return mContext->lvmProcess(in, out, sampleToProcess);
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.h b/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.h
new file mode 100644
index 0000000..d7d2bbd
--- /dev/null
+++ b/media/libeffects/lvm/wrapper/Reverb/aidl/EffectReverb.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+
+#include "effect-impl/EffectImpl.h"
+#include "ReverbContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+class EffectReverb final : public EffectImpl {
+ public:
+ explicit EffectReverb(const AudioUuid& uuid);
+ ~EffectReverb() override;
+
+ ndk::ScopedAStatus getDescriptor(Descriptor* _aidl_return) override;
+
+ ndk::ScopedAStatus setParameterSpecific(const Parameter::Specific& specific) override;
+ ndk::ScopedAStatus getParameterSpecific(const Parameter::Id& id,
+ Parameter::Specific* specific) override;
+
+ std::shared_ptr<EffectContext> createContext(const Parameter::Common& common) override;
+ std::shared_ptr<EffectContext> getContext() override;
+ RetCode releaseContext() override;
+
+ IEffect::Status effectProcessImpl(float* in, float* out, int samples) override;
+
+ ndk::ScopedAStatus commandImpl(CommandId command) override;
+
+ std::string getEffectName() override { return *mEffectName; }
+
+ private:
+ std::shared_ptr<ReverbContext> mContext;
+ const Descriptor* mDescriptor;
+ const std::string* mEffectName;
+ lvm::ReverbEffectType mType;
+
+ IEffect::Status status(binder_status_t status, size_t consumed, size_t produced);
+
+ ndk::ScopedAStatus setParameterPresetReverb(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterPresetReverb(const PresetReverb::Id& id,
+ Parameter::Specific* specific);
+
+ ndk::ScopedAStatus setParameterEnvironmentalReverb(const Parameter::Specific& specific);
+ ndk::ScopedAStatus getParameterEnvironmentalReverb(const EnvironmentalReverb::Id& id,
+ Parameter::Specific* specific);
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp
new file mode 100644
index 0000000..d35c22b
--- /dev/null
+++ b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.cpp
@@ -0,0 +1,560 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <cstddef>
+#define LOG_TAG "ReverbContext"
+#include <Utils.h>
+
+#include "ReverbContext.h"
+#include "VectorArithmetic.h"
+#include "math.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+using aidl::android::media::audio::common::AudioDeviceDescription;
+using aidl::android::media::audio::common::AudioDeviceType;
+
+#define GOTO_IF_LVREV_ERROR(status, tag, log) \
+ do { \
+ LVREV_ReturnStatus_en temp = (status); \
+ if (temp != LVREV_SUCCESS) { \
+ LOG(ERROR) << __func__ << " return status: " << temp << " " << (log); \
+ goto tag; \
+ } \
+ } while (0)
+
+RetCode ReverbContext::init() {
+ if (isPreset()) {
+ // force reloading preset at first call to process()
+ mPreset = PresetReverb::Presets::NONE;
+ mNextPreset = PresetReverb::Presets::NONE;
+ }
+
+ mVolume.left = kUnitVolume;
+ mVolume.right = kUnitVolume;
+ mPrevVolume.left = kUnitVolume;
+ mPrevVolume.right = kUnitVolume;
+ volumeMode = VOLUME_FLAT;
+
+ mSamplesToExitCount = kDefaultDecayTime * mCommon.input.base.sampleRate / 1000;
+
+ /* Saved strength is used to return the exact strength that was used in the set to the get
+ * because we map the original strength range of 0:1000 to 1:15, and this will avoid
+ * quantisation like effect when returning
+ */
+ mRoomLevel = lvm::kMinLevel;
+ mRoomHfLevel = 0;
+ mEnabled = LVM_FALSE;
+ mDecayTime = kDefaultDecayTime;
+ mDecayHfRatio = kDefaultDamping * 20;
+ mDensity = kDefaultRoomSize * 10;
+ mDiffusion = kDefaultDensity * 10;
+ mLevel = lvm::kMinLevel;
+
+ // allocate lvm reverb instance
+ LVREV_ReturnStatus_en status = LVREV_SUCCESS;
+ {
+ std::lock_guard lg(mMutex);
+ LVREV_InstanceParams_st params = {
+ .MaxBlockSize = lvm::kMaxCallSize,
+ // Max format, could be mono during process
+ .SourceFormat = LVM_STEREO,
+ .NumDelays = LVREV_DELAYLINES_4,
+ };
+ /* Init sets the instance handle */
+ status = LVREV_GetInstanceHandle(&mInstance, ¶ms);
+ GOTO_IF_LVREV_ERROR(status, deinit, "LVREV_GetInstanceHandleFailed");
+
+ // set control
+ LVREV_ControlParams_st controlParams;
+ initControlParameter(controlParams);
+ status = LVREV_SetControlParameters(mInstance, &controlParams);
+ GOTO_IF_LVREV_ERROR(status, deinit, "LVREV_SetControlParametersFailed");
+ }
+
+ return RetCode::SUCCESS;
+
+deinit:
+ deInit();
+ return RetCode::ERROR_EFFECT_LIB_ERROR;
+}
+
+void ReverbContext::deInit() {
+ std::lock_guard lg(mMutex);
+ if (mInstance) {
+ LVREV_FreeInstance(mInstance);
+ mInstance = nullptr;
+ }
+}
+
+RetCode ReverbContext::enable() {
+ if (mEnabled) return RetCode::ERROR_ILLEGAL_PARAMETER;
+ mEnabled = true;
+ mSamplesToExitCount = (mDecayTime * mCommon.input.base.sampleRate) / 1000;
+ // force no volume ramp for first buffer processed after enabling the effect
+ volumeMode = VOLUME_FLAT;
+ return RetCode::SUCCESS;
+}
+
+RetCode ReverbContext::disable() {
+ if (!mEnabled) return RetCode::ERROR_ILLEGAL_PARAMETER;
+ mEnabled = false;
+ return RetCode::SUCCESS;
+}
+
+bool ReverbContext::isAuxiliary() {
+ return (mType == lvm::ReverbEffectType::AUX_ENV || mType == lvm::ReverbEffectType::AUX_PRESET);
+}
+
+bool ReverbContext::isPreset() {
+ return (mType == lvm::ReverbEffectType::AUX_PRESET ||
+ mType == lvm::ReverbEffectType::INSERT_PRESET);
+}
+
+RetCode ReverbContext::setVolumeStereo(const Parameter::VolumeStereo& volume) {
+ if (volumeMode == VOLUME_OFF) {
+ // force no volume ramp for first buffer processed after getting volume control
+ volumeMode = VOLUME_FLAT;
+ }
+ mVolumeStereo = volume;
+ return RetCode::SUCCESS;
+}
+
+RetCode ReverbContext::setPresetReverbPreset(const PresetReverb::Presets& preset) {
+ mNextPreset = preset;
+ return RetCode::SUCCESS;
+}
+
+RetCode ReverbContext::setEnvironmentalReverbRoomLevel(int roomLevel) {
+ if (roomLevel < lvm::kEnvReverbCap.minRoomLevelMb ||
+ roomLevel > lvm::kEnvReverbCap.maxRoomLevelMb) {
+ LOG(ERROR) << __func__ << " invalid roomLevel: " << roomLevel;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ // Update Control Parameter
+ LVREV_ControlParams_st params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ // Sum of room and reverb level controls
+ // needs to subtract max levels for both room level and reverb level
+ int combinedLevel = (roomLevel + mLevel) - lvm::kMaxReverbLevel;
+ params.Level = convertLevel(combinedLevel);
+
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+ }
+ mRoomLevel = roomLevel;
+ return RetCode::SUCCESS;
+}
+
+RetCode ReverbContext::setEnvironmentalReverbRoomHfLevel(int roomHfLevel) {
+ if (roomHfLevel < lvm::kEnvReverbCap.minRoomHfLevelMb ||
+ roomHfLevel > lvm::kEnvReverbCap.maxRoomHfLevelMb) {
+ LOG(ERROR) << __func__ << " invalid roomHfLevel: " << roomHfLevel;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ // Update Control Parameter
+ LVREV_ControlParams_st params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ params.LPF = convertHfLevel(roomHfLevel);
+
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+ }
+ mRoomHfLevel = roomHfLevel;
+ return RetCode::SUCCESS;
+}
+
+RetCode ReverbContext::setEnvironmentalReverbDecayTime(int decayTime) {
+ if (decayTime < 0 || decayTime > lvm::kEnvReverbCap.maxDecayTimeMs) {
+ LOG(ERROR) << __func__ << " invalid decayTime: " << decayTime;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+ int time = decayTime;
+ if (time > lvm::kMaxT60) {
+ time = lvm::kMaxT60;
+ }
+
+ // Update Control Parameter
+ LVREV_ControlParams_st params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ params.T60 = (LVM_UINT16)time;
+ mSamplesToExitCount = (params.T60 * mCommon.input.base.sampleRate) / 1000;
+
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+ }
+ mDecayTime = time;
+ return RetCode::SUCCESS;
+}
+
+RetCode ReverbContext::setEnvironmentalReverbDecayHfRatio(int decayHfRatio) {
+ if (decayHfRatio < lvm::kEnvReverbCap.minDecayHfRatioPm ||
+ decayHfRatio > lvm::kEnvReverbCap.maxDecayHfRatioPm) {
+ LOG(ERROR) << __func__ << " invalid decayHfRatio: " << decayHfRatio;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ // Update Control Parameter
+ LVREV_ControlParams_st params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ params.Damping = (LVM_INT16)(decayHfRatio / 20);
+
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+ }
+ mDecayHfRatio = decayHfRatio;
+ return RetCode::SUCCESS;
+}
+
+RetCode ReverbContext::setEnvironmentalReverbLevel(int level) {
+ if (level < lvm::kEnvReverbCap.minLevelMb || level > lvm::kEnvReverbCap.maxLevelMb) {
+ LOG(ERROR) << __func__ << " invalid level: " << level;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ // Update Control Parameter
+ LVREV_ControlParams_st params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ // Sum of room and reverb level controls
+ // needs to subtract max levels for both room level and level
+ int combinedLevel = (level + mRoomLevel) - lvm::kMaxReverbLevel;
+ params.Level = convertLevel(combinedLevel);
+
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+ }
+ mLevel = level;
+ return RetCode::SUCCESS;
+}
+
+RetCode ReverbContext::setEnvironmentalReverbDelay(int delay) {
+ if (delay < 0 || delay > lvm::kEnvReverbCap.maxDelayMs) {
+ LOG(ERROR) << __func__ << " invalid delay: " << delay;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+ mDelay = delay;
+ return RetCode::SUCCESS;
+}
+
+RetCode ReverbContext::setEnvironmentalReverbDiffusion(int diffusion) {
+ if (diffusion < 0 || diffusion > lvm::kEnvReverbCap.maxDiffusionPm) {
+ LOG(ERROR) << __func__ << " invalid diffusion: " << diffusion;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ // Update Control Parameter
+ LVREV_ControlParams_st params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ params.Density = (LVM_INT16)(diffusion / 10);
+
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+ }
+ mDiffusion = diffusion;
+ return RetCode::SUCCESS;
+}
+
+RetCode ReverbContext::setEnvironmentalReverbDensity(int density) {
+ if (density < 0 || density > lvm::kEnvReverbCap.maxDensityPm) {
+ LOG(ERROR) << __func__ << " invalid density: " << density;
+ return RetCode::ERROR_ILLEGAL_PARAMETER;
+ }
+
+ // Update Control Parameter
+ LVREV_ControlParams_st params;
+ {
+ std::lock_guard lg(mMutex);
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_GetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " getControlParamFailed");
+
+ params.RoomSize = (LVM_INT16)(((density * 99) / 1000) + 1);
+
+ RETURN_VALUE_IF(LVREV_SUCCESS != LVREV_SetControlParameters(mInstance, ¶ms),
+ RetCode::ERROR_EFFECT_LIB_ERROR, " setControlParamFailed");
+ }
+ mDensity = density;
+ return RetCode::SUCCESS;
+}
+
+RetCode ReverbContext::setEnvironmentalReverbBypass(bool bypass) {
+ mBypass = bypass;
+ return RetCode::SUCCESS;
+}
+
+void ReverbContext::loadPreset() {
+ // TODO: add delay when early reflections are implemented
+ mPreset = mNextPreset;
+
+ if (mPreset != PresetReverb::Presets::NONE) {
+ const t_reverb_settings preset = mReverbPresets[mPreset];
+ setEnvironmentalReverbRoomLevel(preset.roomLevel);
+ setEnvironmentalReverbRoomHfLevel(preset.roomHFLevel);
+ setEnvironmentalReverbDecayTime(preset.decayTime);
+ setEnvironmentalReverbDecayHfRatio(preset.decayHFRatio);
+ setEnvironmentalReverbLevel(preset.reverbLevel);
+ // reverbDelay
+ setEnvironmentalReverbDiffusion(preset.diffusion);
+ setEnvironmentalReverbDensity(preset.density);
+ }
+}
+
+void ReverbContext::initControlParameter(LVREV_ControlParams_st& params) {
+ /* Set the initial process parameters */
+ /* General parameters */
+ params.OperatingMode = LVM_MODE_ON;
+ params.SampleRate = LVM_FS_44100;
+ params.SourceFormat = (::android::hardware::audio::common::getChannelCount(
+ mCommon.input.base.channelMask) == 1
+ ? LVM_MONO
+ : LVM_STEREO);
+
+ if (!isAuxiliary() && params.SourceFormat == LVM_MONO) {
+ params.SourceFormat = LVM_STEREO;
+ }
+
+ /* Reverb parameters */
+ params.Level = kDefaultLevel;
+ params.LPF = kDefaultLPF;
+ params.HPF = kDefaultHPF;
+ params.T60 = kDefaultDecayTime;
+ params.Density = kDefaultDensity;
+ params.Damping = kDefaultDamping;
+ params.RoomSize = kDefaultRoomSize;
+}
+
+/*
+ * Convert level from OpenSL ES format to LVM format
+ *
+ * @param level : level to be applied
+ */
+
+int ReverbContext::convertLevel(int level) {
+ for (int i = 0; i < kLevelMapping.size(); i++) {
+ if (level <= kLevelMapping[i]) {
+ return i;
+ }
+ }
+ return kDefaultLevel;
+}
+
+/*
+ * Convert level HF from OpenSL ES format to LVM format
+ *
+ * @param hfLevel : level to be applied
+ */
+
+int16_t ReverbContext::convertHfLevel(int hfLevel) {
+ for (auto lpfPair : kLPFMapping) {
+ if (hfLevel <= lpfPair.roomHf) {
+ return lpfPair.lpf;
+ }
+ }
+ return kDefaultLPF;
+}
+
+IEffect::Status ReverbContext::lvmProcess(float* in, float* out, int samples) {
+ IEffect::Status status = {EX_NULL_POINTER, 0, 0};
+ RETURN_VALUE_IF(!in, status, "nullInput");
+ RETURN_VALUE_IF(!out, status, "nullOutput");
+ status = {EX_ILLEGAL_STATE, 0, 0};
+ int64_t inputFrameCount = getCommon().input.frameCount;
+ int64_t outputFrameCount = getCommon().output.frameCount;
+ RETURN_VALUE_IF(inputFrameCount != outputFrameCount, status, "FrameCountMismatch");
+ RETURN_VALUE_IF(0 == getInputFrameSize(), status, "zeroFrameSize");
+
+ LOG(DEBUG) << __func__ << " start processing";
+ std::lock_guard lg(mMutex);
+
+ int channels =
+ ::android::hardware::audio::common::getChannelCount(mCommon.input.base.channelMask);
+ int outChannels =
+ ::android::hardware::audio::common::getChannelCount(mCommon.output.base.channelMask);
+ int frameCount = mCommon.input.frameCount;
+
+ // Reverb only effects the stereo channels in multichannel source.
+ if (channels < 1 || channels > LVM_MAX_CHANNELS) {
+ LOG(ERROR) << __func__ << " process invalid PCM channels " << channels;
+ return status;
+ }
+
+ std::vector<float> inFrames(samples);
+ std::vector<float> outFrames(frameCount * FCC_2);
+
+ if (isPreset() && mNextPreset != mPreset) {
+ loadPreset();
+ }
+
+ if (isAuxiliary()) {
+ inFrames.assign(in, in + samples);
+ } else {
+ // mono input is duplicated
+ if (channels >= FCC_2) {
+ for (int i = 0; i < frameCount; i++) {
+ inFrames[FCC_2 * i] = in[channels * i] * kSendLevel;
+ inFrames[FCC_2 * i + 1] = in[channels * i + 1] * kSendLevel;
+ }
+ } else {
+ for (int i = 0; i < frameCount; i++) {
+ inFrames[FCC_2 * i] = inFrames[FCC_2 * i + 1] = in[i] * kSendLevel;
+ }
+ }
+ }
+
+ if (isPreset() && mPreset == PresetReverb::Presets::NONE) {
+ std::fill(outFrames.begin(), outFrames.end(), 0); // always stereo here
+ } else {
+ if (!mEnabled && mSamplesToExitCount > 0) {
+ std::fill(outFrames.begin(), outFrames.end(), 0);
+ LOG(VERBOSE) << "Zeroing " << channels << " samples per frame at the end of call ";
+ }
+
+ /* Process the samples, producing a stereo output */
+ LVREV_ReturnStatus_en lvrevStatus =
+ LVREV_Process(mInstance, /* Instance handle */
+ inFrames.data(), /* Input buffer */
+ outFrames.data(), /* Output buffer */
+ frameCount); /* Number of samples to read */
+ if (lvrevStatus != LVREV_SUCCESS) {
+ LOG(ERROR) << __func__ << lvrevStatus;
+ return {EX_UNSUPPORTED_OPERATION, 0, 0};
+ }
+ }
+ // Convert to 16 bits
+ if (isAuxiliary()) {
+ // nothing to do here
+ } else {
+ if (channels >= FCC_2) {
+ for (int i = 0; i < frameCount; i++) {
+ // Mix with dry input
+ outFrames[FCC_2 * i] += in[channels * i];
+ outFrames[FCC_2 * i + 1] += in[channels * i + 1];
+ }
+ } else {
+ for (int i = 0; i < frameCount; i++) {
+ // Mix with dry input
+ outFrames[FCC_2 * i] += in[i];
+ outFrames[FCC_2 * i + 1] += in[i];
+ }
+ }
+
+ // apply volume with ramp if needed
+ if (mVolume != mPrevVolume && volumeMode == VOLUME_RAMP) {
+ float vl = mPrevVolume.left;
+ float incl = (mVolume.left - vl) / frameCount;
+ float vr = mPrevVolume.right;
+ float incr = (mVolume.right - vr) / frameCount;
+
+ for (int i = 0; i < frameCount; i++) {
+ outFrames[FCC_2 * i] *= vl;
+ outFrames[FCC_2 * i + 1] *= vr;
+
+ vl += incl;
+ vr += incr;
+ }
+ mPrevVolume = mVolume;
+ } else if (volumeMode != VOLUME_OFF) {
+ if (mVolume.left != kUnitVolume || mVolume.right != kUnitVolume) {
+ for (int i = 0; i < frameCount; i++) {
+ outFrames[FCC_2 * i] *= mVolume.left;
+ outFrames[FCC_2 * i + 1] *= mVolume.right;
+ }
+ }
+ mPrevVolume = mVolume;
+ volumeMode = VOLUME_RAMP;
+ }
+ }
+
+ bool accumulate = false;
+ if (outChannels > 2) {
+ // Accumulate if required
+ if (accumulate) {
+ for (int i = 0; i < frameCount; i++) {
+ out[outChannels * i] += outFrames[FCC_2 * i];
+ out[outChannels * i + 1] += outFrames[FCC_2 * i + 1];
+ }
+ } else {
+ for (int i = 0; i < frameCount; i++) {
+ out[outChannels * i] = outFrames[FCC_2 * i];
+ out[outChannels * i + 1] = outFrames[FCC_2 * i + 1];
+ }
+ }
+ if (!isAuxiliary()) {
+ for (int i = 0; i < frameCount; i++) {
+ // channels and outChannels are expected to be same.
+ for (int j = FCC_2; j < outChannels; j++) {
+ out[outChannels * i + j] = in[outChannels * i + j];
+ }
+ }
+ }
+ } else {
+ if (accumulate) {
+ if (outChannels == FCC_1) {
+ for (int i = 0; i < frameCount; i++) {
+ out[i] += ((outFrames[i * FCC_2] + outFrames[i * FCC_2 + 1]) * 0.5f);
+ }
+ } else {
+ for (int i = 0; i < frameCount * FCC_2; i++) {
+ out[i] += outFrames[i];
+ }
+ }
+ } else {
+ if (outChannels == FCC_1) {
+ From2iToMono_Float(outFrames.data(), out, frameCount);
+ } else {
+ for (int i = 0; i < frameCount * FCC_2; i++) {
+ out[i] = outFrames[i];
+ }
+ }
+ }
+ }
+
+ LOG(DEBUG) << __func__ << " done processing";
+
+ if (!mEnabled && mSamplesToExitCount > 0) {
+ // signed - unsigned will trigger integer overflow if result becomes negative.
+ mSamplesToExitCount -= samples;
+ }
+
+ return {STATUS_OK, samples, outChannels * frameCount};
+}
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.h b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.h
new file mode 100644
index 0000000..af49a25
--- /dev/null
+++ b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbContext.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <android-base/logging.h>
+#include <android-base/thread_annotations.h>
+#include <unordered_map>
+
+#include "ReverbTypes.h"
+#include "effect-impl/EffectContext.h"
+
+namespace aidl::android::hardware::audio::effect {
+
+enum VolumeMode {
+ VOLUME_OFF,
+ VOLUME_FLAT,
+ VOLUME_RAMP,
+};
+
+struct LPFPair {
+ int roomHf;
+ int lpf;
+};
+
+class ReverbContext final : public EffectContext {
+ public:
+ ReverbContext(int statusDepth, const Parameter::Common& common,
+ const lvm::ReverbEffectType& type)
+ : EffectContext(statusDepth, common), mType(type) {
+ LOG(DEBUG) << __func__ << type;
+ init();
+ }
+ ~ReverbContext() override {
+ LOG(DEBUG) << __func__;
+ deInit();
+ }
+
+ RetCode init();
+ void deInit();
+
+ RetCode enable();
+ RetCode disable();
+
+ bool isAuxiliary();
+ bool isPreset();
+
+ RetCode setPresetReverbPreset(const PresetReverb::Presets& preset);
+ PresetReverb::Presets getPresetReverbPreset() const { return mNextPreset; }
+
+ RetCode setEnvironmentalReverbRoomLevel(int roomLevel);
+ int getEnvironmentalReverbRoomLevel() const { return mRoomLevel; }
+ RetCode setEnvironmentalReverbRoomHfLevel(int roomHfLevel);
+ int getEnvironmentalReverbRoomHfLevel() const { return mRoomHfLevel; }
+ RetCode setEnvironmentalReverbDecayTime(int decayTime);
+ int getEnvironmentalReverbDecayTime() const { return mDecayTime; }
+ RetCode setEnvironmentalReverbDecayHfRatio(int decayHfRatio);
+ int getEnvironmentalReverbDecayHfRatio() const { return mDecayHfRatio; }
+ RetCode setEnvironmentalReverbLevel(int level);
+ int getEnvironmentalReverbLevel() const { return mLevel; }
+ RetCode setEnvironmentalReverbDelay(int delay);
+ int getEnvironmentalReverbDelay() const { return mDelay; }
+ RetCode setEnvironmentalReverbDiffusion(int diffusion);
+ int getEnvironmentalReverbDiffusion() const { return mDiffusion; }
+ RetCode setEnvironmentalReverbDensity(int density);
+ int getEnvironmentalReverbDensity() const { return mDensity; }
+ RetCode setEnvironmentalReverbBypass(bool bypass);
+ bool getEnvironmentalReverbBypass() const { return mBypass; }
+
+ RetCode setVolumeStereo(const Parameter::VolumeStereo& volumeStereo) override;
+ Parameter::VolumeStereo getVolumeStereo() override { return mVolumeStereo; }
+
+ IEffect::Status lvmProcess(float* in, float* out, int samples);
+
+ private:
+ static constexpr inline float kUnitVolume = 1;
+ static constexpr inline float kSendLevel = 0.75f;
+ static constexpr inline int kDefaultLevel = 0;
+ static constexpr inline int kDefaultLPF = 23999; /* Default low pass filter, in Hz */
+ static constexpr inline int kDefaultHPF = 50; /* Default high pass filter, in Hz */
+ static constexpr inline int kDefaultDecayTime = 1490; /* Default Decay time, in ms */
+ static constexpr inline int kDefaultDensity = 100; /* Default Echo density */
+ static constexpr inline int kDefaultDamping = 21;
+ static constexpr inline int kDefaultRoomSize = 100;
+
+ static inline const std::vector<LPFPair> kLPFMapping = {
+ // Limit range to 50 for LVREV parameter range
+ {-10000, 50}, {-5000, 50}, {-4000, 50}, {-3000, 158}, {-2000, 502}, {-1000, 1666},
+ {-900, 1897}, {-800, 2169}, {-700, 2496}, {-600, 2895}, {-500, 3400}, {-400, 4066},
+ {-300, 5011}, {-200, 6537}, {-100, 9826}, {-99, 9881}, {-98, 9937}, {-97, 9994},
+ {-96, 10052}, {-95, 10111}, {-94, 10171}, {-93, 10231}, {-92, 10293}, {-91, 10356},
+ {-90, 10419}, {-89, 10484}, {-88, 10549}, {-87, 10616}, {-86, 10684}, {-85, 10753},
+ {-84, 10823}, {-83, 10895}, {-82, 10968}, {-81, 11042}, {-80, 11117}, {-79, 11194},
+ {-78, 11272}, {-77, 11352}, {-76, 11433}, {-75, 11516}, {-74, 11600}, {-73, 11686},
+ {-72, 11774}, {-71, 11864}, {-70, 11955}, {-69, 12049}, {-68, 12144}, {-67, 12242},
+ {-66, 12341}, {-65, 12443}, {-64, 12548}, {-63, 12654}, {-62, 12763}, {-61, 12875},
+ {-60, 12990}, {-59, 13107}, {-58, 13227}, {-57, 13351}, {-56, 13477}, {-55, 13607},
+ {-54, 13741}, {-53, 13878}, {-52, 14019}, {-51, 14164}, {-50, 14313}, {-49, 14467},
+ {-48, 14626}, {-47, 14789}, {-46, 14958}, {-45, 15132}, {-44, 15312}, {-43, 15498},
+ {-42, 15691}, {-41, 15890}, {-40, 16097}, {-39, 16311}, {-38, 16534}, {-37, 16766},
+ {-36, 17007}, {-35, 17259}, {-34, 17521}, {-33, 17795}, {-32, 18081}, {-31, 18381},
+ {-30, 18696}, {-29, 19027}, {-28, 19375}, {-27, 19742}, {-26, 20129}, {-25, 20540},
+ {-24, 20976}, {-23, 21439}, {-22, 21934}, {-21, 22463}, {-20, 23031}, {-19, 23643},
+ {-18, 23999}};
+
+ static inline const std::vector<int> kLevelMapping = {
+ -12000, -4000, -3398, -3046, -2796, -2603, -2444, -2310, -2194, -2092, -2000, -1918,
+ -1842, -1773, -1708, -1648, -1592, -1540, -1490, -1443, -1398, -1356, -1316, -1277,
+ -1240, -1205, -1171, -1138, -1106, -1076, -1046, -1018, -990, -963, -938, -912,
+ -888, -864, -841, -818, -796, -775, -754, -734, -714, -694, -675, -656,
+ -638, -620, -603, -585, -568, -552, -536, -520, -504, -489, -474, -459,
+ -444, -430, -416, -402, -388, -375, -361, -348, -335, -323, -310, -298,
+ -286, -274, -262, -250, -239, -228, -216, -205, -194, -184, -173, -162,
+ -152, -142, -132, -121, -112, -102, -92, -82, -73, -64, -54, -45,
+ -36, -27, -18, -9, 0};
+
+ static inline std::unordered_map<PresetReverb::Presets, t_reverb_settings> mReverbPresets = {
+ {PresetReverb::Presets::NONE, {0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
+ {PresetReverb::Presets::SMALLROOM,
+ {-400, -600, 1100, 830, -400, 5, 500, 10, 1000, 1000}},
+ {PresetReverb::Presets::MEDIUMROOM,
+ {-400, -600, 1300, 830, -1000, 20, -200, 20, 1000, 1000}},
+ {PresetReverb::Presets::LARGEROOM,
+ {-400, -600, 1500, 830, -1600, 5, -1000, 40, 1000, 1000}},
+ {PresetReverb::Presets::MEDIUMHALL,
+ {-400, -600, 1800, 700, -1300, 15, -800, 30, 1000, 1000}},
+ {PresetReverb::Presets::LARGEHALL,
+ {-400, -600, 1800, 700, -2000, 30, -1400, 60, 1000, 1000}},
+ {PresetReverb::Presets::PLATE, {-400, -200, 1300, 900, 0, 2, 0, 10, 1000, 750}}};
+
+ std::mutex mMutex;
+ const lvm::ReverbEffectType mType;
+ bool mEnabled = false;
+ LVREV_Handle_t mInstance GUARDED_BY(mMutex);
+
+ int mRoomLevel;
+ int mRoomHfLevel;
+ int mDecayTime;
+ int mDecayHfRatio;
+ int mLevel;
+ int mDelay;
+ int mDiffusion;
+ int mDensity;
+ bool mBypass;
+
+ PresetReverb::Presets mPreset;
+ PresetReverb::Presets mNextPreset;
+
+ int mSamplesToExitCount;
+
+ Parameter::VolumeStereo mVolume;
+ Parameter::VolumeStereo mPrevVolume;
+ VolumeMode volumeMode;
+
+ void initControlParameter(LVREV_ControlParams_st& params);
+ int16_t convertHfLevel(int hfLevel);
+ int convertLevel(int level);
+ void loadPreset();
+};
+
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbTypes.h b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbTypes.h
new file mode 100644
index 0000000..e37602c
--- /dev/null
+++ b/media/libeffects/lvm/wrapper/Reverb/aidl/ReverbTypes.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <aidl/android/hardware/audio/effect/BnEffect.h>
+#include <android/binder_enums.h>
+#include <audio_effects/effect_environmentalreverb.h>
+#include <audio_effects/effect_presetreverb.h>
+#include "effect-impl/EffectUUID.h"
+// from Reverb/lib
+#include "LVREV.h"
+
+namespace aidl::android::hardware::audio::effect {
+namespace lvm {
+
+constexpr inline int kMaxCallSize = 256;
+constexpr inline int kMinLevel = -6000;
+constexpr inline int kMaxT60 = 7000; /* Maximum decay time */
+constexpr inline int kMaxReverbLevel = 2000;
+constexpr inline int kMaxFrameSize = 2560;
+constexpr inline int kCpuLoadARM9E = 470; // Expressed in 0.1 MIPS
+constexpr inline int kMemUsage = (71 + (kMaxFrameSize >> 7)); // Expressed in kB
+
+static const EnvironmentalReverb::Capability kEnvReverbCap = {.minRoomLevelMb = lvm::kMinLevel,
+ .maxRoomLevelMb = 0,
+ .minRoomHfLevelMb = -4000,
+ .maxRoomHfLevelMb = 0,
+ .maxDecayTimeMs = lvm::kMaxT60,
+ .minDecayHfRatioPm = 100,
+ .maxDecayHfRatioPm = 2000,
+ .minLevelMb = lvm::kMinLevel,
+ .maxLevelMb = 0,
+ .maxDelayMs = 65,
+ .maxDiffusionPm = 1000,
+ .maxDensityPm = 1000};
+
+// NXP SW auxiliary environmental reverb
+static const std::string kAuxEnvReverbEffectName = "Auxiliary Environmental Reverb";
+static const Descriptor kAuxEnvReverbDesc = {
+ .common = {.id = {.type = kEnvReverbTypeUUID,
+ .uuid = kAuxEnvReverbImplUUID,
+ .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::AUXILIARY},
+ .cpuLoad = kCpuLoadARM9E,
+ .memoryUsage = kMemUsage,
+ .name = kAuxEnvReverbEffectName,
+ .implementor = "NXP Software Ltd."},
+ .capability = Capability::make<Capability::environmentalReverb>(kEnvReverbCap)};
+
+// NXP SW insert environmental reverb
+static const std::string kInsertEnvReverbEffectName = "Insert Environmental Reverb";
+static const Descriptor kInsertEnvReverbDesc = {
+ .common = {.id = {.type = kEnvReverbTypeUUID,
+ .uuid = kInsertEnvReverbImplUUID,
+ .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::INSERT,
+ .insert = Flags::Insert::FIRST,
+ .volume = Flags::Volume::CTRL},
+ .cpuLoad = kCpuLoadARM9E,
+ .memoryUsage = kMemUsage,
+ .name = kInsertEnvReverbEffectName,
+ .implementor = "NXP Software Ltd."},
+ .capability = Capability::make<Capability::environmentalReverb>(kEnvReverbCap)};
+
+static const std::vector<PresetReverb::Presets> kSupportedPresets{
+ ndk::enum_range<PresetReverb::Presets>().begin(),
+ ndk::enum_range<PresetReverb::Presets>().end()};
+static const PresetReverb::Capability kPresetReverbCap = {.supportedPresets = kSupportedPresets};
+
+// NXP SW auxiliary preset reverb
+static const std::string kAuxPresetReverbEffectName = "Auxiliary Preset Reverb";
+static const Descriptor kAuxPresetReverbDesc = {
+ .common = {.id = {.type = kPresetReverbTypeUUID,
+ .uuid = kAuxPresetReverbImplUUID,
+ .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::AUXILIARY},
+ .cpuLoad = kCpuLoadARM9E,
+ .memoryUsage = kMemUsage,
+ .name = kAuxPresetReverbEffectName,
+ .implementor = "NXP Software Ltd."},
+ .capability = Capability::make<Capability::presetReverb>(kPresetReverbCap)};
+
+// NXP SW insert preset reverb
+static const std::string kInsertPresetReverbEffectName = "Insert Preset Reverb";
+static const Descriptor kInsertPresetReverbDesc = {
+ .common = {.id = {.type = kPresetReverbTypeUUID,
+ .uuid = kInsertPresetReverbImplUUID,
+ .proxy = std::nullopt},
+ .flags = {.type = Flags::Type::INSERT,
+ .insert = Flags::Insert::FIRST,
+ .volume = Flags::Volume::CTRL},
+ .cpuLoad = kCpuLoadARM9E,
+ .memoryUsage = kMemUsage,
+ .name = kInsertPresetReverbEffectName,
+ .implementor = "NXP Software Ltd."},
+ .capability = Capability::make<Capability::presetReverb>(kPresetReverbCap)};
+
+enum class ReverbEffectType {
+ AUX_ENV,
+ INSERT_ENV,
+ AUX_PRESET,
+ INSERT_PRESET,
+};
+
+inline std::ostream& operator<<(std::ostream& out, const ReverbEffectType& type) {
+ switch (type) {
+ case ReverbEffectType::AUX_ENV:
+ return out << kAuxEnvReverbEffectName;
+ case ReverbEffectType::INSERT_ENV:
+ return out << kInsertEnvReverbEffectName;
+ case ReverbEffectType::AUX_PRESET:
+ return out << kAuxPresetReverbEffectName;
+ case ReverbEffectType::INSERT_PRESET:
+ return out << kInsertPresetReverbEffectName;
+ }
+ return out << "EnumReverbEffectTypeError";
+}
+
+inline std::ostream& operator<<(std::ostream& out, const LVREV_ReturnStatus_en& status) {
+ switch (status) {
+ case LVREV_SUCCESS:
+ return out << "LVREV_SUCCESS";
+ case LVREV_NULLADDRESS:
+ return out << "LVREV_NULLADDRESS";
+ case LVREV_OUTOFRANGE:
+ return out << "LVREV_OUTOFRANGE";
+ case LVREV_INVALIDNUMSAMPLES:
+ return out << "LVREV_INVALIDNUMSAMPLES";
+ case LVREV_RETURNSTATUS_DUMMY:
+ return out << "LVREV_RETURNSTATUS_DUMMY";
+ }
+ return out << "EnumLvrevRetStatusError";
+}
+
+} // namespace lvm
+} // namespace aidl::android::hardware::audio::effect
diff --git a/media/libeffects/visualizer/Android.bp b/media/libeffects/visualizer/Android.bp
index fb9d7b7..cf782f7 100644
--- a/media/libeffects/visualizer/Android.bp
+++ b/media/libeffects/visualizer/Android.bp
@@ -70,6 +70,7 @@
shared_libs: [
"libcutils",
],
+ relative_install_path: "soundfx",
visibility: [
"//hardware/interfaces/audio/aidl/default",
],
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
index 101b825..6b62004 100644
--- a/media/libheadtracking/HeadTrackingProcessor.cpp
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -97,7 +97,7 @@
mModeSelector.setScreenStable(mWorldToScreenTimestamp.value(), screenStable);
// Whenever the screen is unstable, recenter the head pose.
if (!screenStable) {
- recenter(true, false);
+ recenter(true, false, "calculate: screen movement");
}
mScreenHeadFusion.setWorldToScreenPose(mWorldToScreenTimestamp.value(),
worldToLogicalScreen);
@@ -109,7 +109,7 @@
// Auto-recenter.
bool headStable = mHeadStillnessDetector.calculate(timestamp);
if (headStable || !screenStable) {
- recenter(true, false);
+ recenter(true, false, "calculate: head movement");
worldToHead = mHeadPoseBias.getOutput();
}
@@ -139,16 +139,16 @@
HeadTrackingMode getActualMode() const override { return mModeSelector.getActualMode(); }
- void recenter(bool recenterHead, bool recenterScreen) override {
+ void recenter(bool recenterHead, bool recenterScreen, std::string source) override {
if (recenterHead) {
mHeadPoseBias.recenter();
mHeadStillnessDetector.reset();
- mLocalLog.log("recenter Head");
+ mLocalLog.log("recenter Head from %s", source.c_str());
}
if (recenterScreen) {
mScreenPoseBias.recenter();
mScreenStillnessDetector.reset();
- mLocalLog.log("recenter Screen");
+ mLocalLog.log("recenter Screen from %s", source.c_str());
}
// If a sensor being recentered is included in the current mode, apply rate limiting to
diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h
index 8ef8ab0..b4c78a0 100644
--- a/media/libheadtracking/include/media/HeadTrackingProcessor.h
+++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h
@@ -95,7 +95,8 @@
/**
* This causes the current poses for both the head and/or screen to be considered "center".
*/
- virtual void recenter(bool recenterHead = true, bool recenterScreen = true) = 0;
+ virtual void recenter(
+ bool recenterHead = true, bool recenterScreen = true, std::string source = "") = 0;
/**
* Dump HeadTrackingProcessor parameters under caller lock.
diff --git a/media/libmedia/Android.bp b/media/libmedia/Android.bp
index ab1cf69..590a7b7 100644
--- a/media/libmedia/Android.bp
+++ b/media/libmedia/Android.bp
@@ -384,7 +384,6 @@
export_shared_lib_headers: [
"libaudioclient",
"libbinder",
- "libandroidicu",
//"libsonivox",
"libmedia_omx",
"framework-permission-aidl-cpp",
diff --git a/media/libmedia/include/media/CharacterEncodingDetector.h b/media/libmedia/include/media/CharacterEncodingDetector.h
index 62564b1..2acc868 100644
--- a/media/libmedia/include/media/CharacterEncodingDetector.h
+++ b/media/libmedia/include/media/CharacterEncodingDetector.h
@@ -21,9 +21,12 @@
#include "StringArray.h"
-#include "unicode/ucnv.h"
-#include "unicode/ucsdet.h"
-#include "unicode/ustring.h"
+/** Declare opaque structures from ICU4C. */
+struct UConverter;
+typedef struct UConverter UConverter;
+
+struct UCharsetMatch;
+typedef struct UCharsetMatch UCharsetMatch;
namespace android {
diff --git a/media/libmedia/include/media/mediaplayer.h b/media/libmedia/include/media/mediaplayer.h
index b45dae5..2f9b85e 100644
--- a/media/libmedia/include/media/mediaplayer.h
+++ b/media/libmedia/include/media/mediaplayer.h
@@ -214,7 +214,8 @@
{
public:
explicit MediaPlayer(const android::content::AttributionSourceState& mAttributionSource =
- android::content::AttributionSourceState());
+ android::content::AttributionSourceState(),
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE);
~MediaPlayer();
void died();
void disconnect();
diff --git a/media/libmedia/mediaplayer.cpp b/media/libmedia/mediaplayer.cpp
index 5215c1b..b5c75b3 100644
--- a/media/libmedia/mediaplayer.cpp
+++ b/media/libmedia/mediaplayer.cpp
@@ -42,8 +42,8 @@
using media::VolumeShaper;
using content::AttributionSourceState;
-MediaPlayer::MediaPlayer(const AttributionSourceState& attributionSource)
- : mAttributionSource(attributionSource)
+MediaPlayer::MediaPlayer(const AttributionSourceState& attributionSource,
+ const audio_session_t sessionId) : mAttributionSource(attributionSource)
{
ALOGV("constructor");
mListener = NULL;
@@ -61,7 +61,12 @@
mLeftVolume = mRightVolume = 1.0;
mVideoWidth = mVideoHeight = 0;
mLockThreadId = 0;
- mAudioSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
+ if (sessionId == AUDIO_SESSION_ALLOCATE) {
+ mAudioSessionId = static_cast<audio_session_t>(
+ AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION));
+ } else {
+ mAudioSessionId = sessionId;
+ }
AudioSystem::acquireAudioSessionId(mAudioSessionId, (pid_t)-1, (uid_t)-1); // always in client.
mSendLevel = 0;
mRetransmitEndpointValid = false;
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index ba3df59..f80a467 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -122,6 +122,7 @@
#define AMEDIAMETRICS_PROP_BURSTFRAMES "burstFrames" // int32
#define AMEDIAMETRICS_PROP_CALLERNAME "callerName" // string, eg. "aaudio"
#define AMEDIAMETRICS_PROP_CHANNELCOUNT "channelCount" // int32
+#define AMEDIAMETRICS_PROP_CHANNELCOUNTHARDWARE "channelCountHardware" // int32
#define AMEDIAMETRICS_PROP_CHANNELMASK "channelMask" // int32
#define AMEDIAMETRICS_PROP_CHANNELMASKS "channelMasks" // string with channelMask values
// separated by |.
@@ -147,6 +148,7 @@
#define AMEDIAMETRICS_PROP_DURATIONNS "durationNs" // int64 duration time span
#define AMEDIAMETRICS_PROP_ENABLED "enabled" // string true/false.
#define AMEDIAMETRICS_PROP_ENCODING "encoding" // string value of format
+#define AMEDIAMETRICS_PROP_ENCODINGHARDWARE "encodingHardware" // string value of hardware format
#define AMEDIAMETRICS_PROP_EVENT "event#" // string value (often func name)
#define AMEDIAMETRICS_PROP_EXECUTIONTIMENS "executionTimeNs" // time to execute the event
@@ -182,6 +184,7 @@
#define AMEDIAMETRICS_PROP_PLAYERIID "playerIId" // int32 (-1 invalid/unset IID)
#define AMEDIAMETRICS_PROP_ROUTEDDEVICEID "routedDeviceId" // int32
#define AMEDIAMETRICS_PROP_SAMPLERATE "sampleRate" // int32
+#define AMEDIAMETRICS_PROP_SAMPLERATEHARDWARE "sampleRateHardware" // int32
#define AMEDIAMETRICS_PROP_SELECTEDDEVICEID "selectedDeviceId" // int32
#define AMEDIAMETRICS_PROP_SELECTEDMICDIRECTION "selectedMicDirection" // int32
#define AMEDIAMETRICS_PROP_SELECTEDMICFIELDDIRECTION "selectedMicFieldDimension" // double
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
index 727d68d..5c6c5fd 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayer.cpp
@@ -1227,7 +1227,11 @@
notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
} else {
// Only audio track has error. Video track could be still good to play.
- notifyListener(MEDIA_INFO, MEDIA_INFO_PLAY_AUDIO_ERROR, err);
+ if (mVideoEOS) {
+ notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0);
+ } else {
+ notifyListener(MEDIA_INFO, MEDIA_INFO_PLAY_AUDIO_ERROR, err);
+ }
}
mAudioDecoderError = true;
} else {
@@ -1238,7 +1242,11 @@
notifyListener(MEDIA_ERROR, MEDIA_ERROR_UNKNOWN, err);
} else {
// Only video track has error. Audio track could be still good to play.
- notifyListener(MEDIA_INFO, MEDIA_INFO_PLAY_VIDEO_ERROR, err);
+ if (mAudioEOS) {
+ notifyListener(MEDIA_PLAYBACK_COMPLETE, 0, 0);
+ } else {
+ notifyListener(MEDIA_INFO, MEDIA_INFO_PLAY_VIDEO_ERROR, err);
+ }
}
mVideoDecoderError = true;
}
diff --git a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
index 5e29b3f..a964d4f 100644
--- a/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
+++ b/media/libmediaplayerservice/nuplayer/NuPlayerDrm.cpp
@@ -22,14 +22,13 @@
#include <mediadrm/DrmUtils.h>
#include <utils/Log.h>
-
namespace android {
// static helpers - internal
sp<IDrm> NuPlayerDrm::CreateDrm(status_t *pstatus)
{
- return DrmUtils::MakeDrm(pstatus);
+ return DrmUtils::MakeDrm(IDRM_NUPLAYER, pstatus);
}
sp<ICrypto> NuPlayerDrm::createCrypto(status_t *pstatus)
diff --git a/media/libstagefright/Android.bp b/media/libstagefright/Android.bp
index 171be08..ddc0f2f 100644
--- a/media/libstagefright/Android.bp
+++ b/media/libstagefright/Android.bp
@@ -155,6 +155,7 @@
"libEGL",
"libGLESv1_CM",
"libGLESv2",
+ "libvulkan",
"libgui",
"liblog",
"libprocessgroup",
@@ -301,7 +302,6 @@
"libstagefright_codecbase",
"libstagefright_foundation",
"libstagefright_omx_utils",
- "libRScpp",
"libhidlallocatorutils",
"libhidlbase",
"libhidlmemory",
@@ -318,7 +318,6 @@
"libstagefright_esds",
"libstagefright_color_conversion",
"libyuv_static",
- "libstagefright_mediafilter",
"libstagefright_webm",
"libstagefright_timedtext",
"libogg",
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index c73679b..a60c04f 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -76,7 +76,6 @@
#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaFilter.h>
#include <media/stagefright/OMXClient.h>
#include <media/stagefright/PersistentSurface.h>
#include <media/stagefright/SurfaceUtils.h>
@@ -1616,8 +1615,6 @@
} else if (name.startsWithIgnoreCase("omx.")) {
// at this time only ACodec specifies a mime type.
return new ACodec;
- } else if (name.startsWithIgnoreCase("android.filter.")) {
- return new MediaFilter;
} else {
return NULL;
}
diff --git a/media/libstagefright/OggWriter.cpp b/media/libstagefright/OggWriter.cpp
index d2a65d0..cff37a3 100644
--- a/media/libstagefright/OggWriter.cpp
+++ b/media/libstagefright/OggWriter.cpp
@@ -96,6 +96,7 @@
return ERROR_UNSUPPORTED;
}
+ // NOLINTNEXTLINE(clang-analyzer-unix.MallocSizeof)
mOs = (OggStreamState*) malloc(sizeof(ogg_stream_state));
if (ogg_stream_init((ogg_stream_state*)mOs, rand()) == -1) {
ALOGE("ogg stream init failed");
diff --git a/media/libstagefright/SurfaceUtils.cpp b/media/libstagefright/SurfaceUtils.cpp
index 1f569ef..291b892 100644
--- a/media/libstagefright/SurfaceUtils.cpp
+++ b/media/libstagefright/SurfaceUtils.cpp
@@ -193,10 +193,38 @@
status_t pushBlankBuffersToNativeWindow(ANativeWindow *nativeWindow /* nonnull */) {
status_t err = NO_ERROR;
- ANativeWindowBuffer* anb = NULL;
+ ANativeWindowBuffer* anb = nullptr;
int numBufs = 0;
int minUndequeuedBufs = 0;
+ auto handleError = [](ANativeWindow *nativeWindow, ANativeWindowBuffer* anb, status_t err)
+ {
+ if (anb != nullptr) {
+ nativeWindow->cancelBuffer(nativeWindow, anb, -1);
+ anb = nullptr;
+ }
+
+ // Clean up after success or error.
+ status_t err2 = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_CPU);
+ if (err2 != NO_ERROR) {
+ ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)",
+ strerror(-err2), -err2);
+ if (err == NO_ERROR) {
+ err = err2;
+ }
+ }
+
+ err2 = nativeWindowConnect(nativeWindow, "pushBlankBuffersToNativeWindow(err2)");
+ if (err2 != NO_ERROR) {
+ ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
+ if (err == NO_ERROR) {
+ err = err2;
+ }
+ }
+
+ return err;
+ };
+
// We need to reconnect to the ANativeWindow as a CPU client to ensure that
// no frames get dropped by SurfaceFlinger assuming that these are video
// frames.
@@ -217,24 +245,29 @@
nativeWindow, 1, 1, HAL_PIXEL_FORMAT_RGBX_8888, 0, GRALLOC_USAGE_SW_WRITE_OFTEN,
false /* reconnect */);
if (err != NO_ERROR) {
- goto error;
+ return handleError(nativeWindow, anb, err);
}
static_cast<Surface*>(nativeWindow)->getIGraphicBufferProducer()->allowAllocation(true);
+ // In nonblocking mode(timetout = 0), native_window_dequeue_buffer_and_wait()
+ // can fail with timeout. Changing to blocking mode will ensure that dequeue
+ // does not timeout.
+ static_cast<Surface*>(nativeWindow)->getIGraphicBufferProducer()->setDequeueTimeout(-1);
+
err = nativeWindow->query(nativeWindow,
NATIVE_WINDOW_MIN_UNDEQUEUED_BUFFERS, &minUndequeuedBufs);
if (err != NO_ERROR) {
ALOGE("error pushing blank frames: MIN_UNDEQUEUED_BUFFERS query "
"failed: %s (%d)", strerror(-err), -err);
- goto error;
+ return handleError(nativeWindow, anb, err);
}
numBufs = minUndequeuedBufs + 1;
err = native_window_set_buffer_count(nativeWindow, numBufs);
if (err != NO_ERROR) {
ALOGE("error pushing blank frames: set_buffer_count failed: %s (%d)", strerror(-err), -err);
- goto error;
+ return handleError(nativeWindow, anb, err);
}
// We push numBufs + 1 buffers to ensure that we've drawn into the same
@@ -252,7 +285,7 @@
sp<GraphicBuffer> buf(GraphicBuffer::from(anb));
// Fill the buffer with the a 1x1 checkerboard pattern ;)
- uint32_t *img = NULL;
+ uint32_t *img = nullptr;
err = buf->lock(GRALLOC_USAGE_SW_WRITE_OFTEN, (void**)(&img));
if (err != NO_ERROR) {
ALOGE("error pushing blank frames: lock failed: %s (%d)", strerror(-err), -err);
@@ -273,34 +306,10 @@
break;
}
- anb = NULL;
+ anb = nullptr;
}
-error:
-
- if (anb != NULL) {
- nativeWindow->cancelBuffer(nativeWindow, anb, -1);
- anb = NULL;
- }
-
- // Clean up after success or error.
- status_t err2 = native_window_api_disconnect(nativeWindow, NATIVE_WINDOW_API_CPU);
- if (err2 != NO_ERROR) {
- ALOGE("error pushing blank frames: api_disconnect failed: %s (%d)", strerror(-err2), -err2);
- if (err == NO_ERROR) {
- err = err2;
- }
- }
-
- err2 = nativeWindowConnect(nativeWindow, "pushBlankBuffersToNativeWindow(err2)");
- if (err2 != NO_ERROR) {
- ALOGE("error pushing blank frames: api_connect failed: %s (%d)", strerror(-err), -err);
- if (err == NO_ERROR) {
- err = err2;
- }
- }
-
- return err;
+ return handleError(nativeWindow, anb, err);
}
status_t nativeWindowConnect(ANativeWindow *surface, const char *reason) {
diff --git a/media/libstagefright/data/media_codecs_google_video.xml b/media/libstagefright/data/media_codecs_google_video.xml
index 2c258e4..b1d7ff4 100644
--- a/media/libstagefright/data/media_codecs_google_video.xml
+++ b/media/libstagefright/data/media_codecs_google_video.xml
@@ -124,7 +124,8 @@
<Limit name="block-size" value="16x16" />
<Limit name="block-count" range="1-3600" />
<Limit name="bitrate" range="1-40000000" />
- <Feature name="bitrate-modes" value="VBR,CBR" />
+ <Limit name="quality" range="0-100" default="80" />
+ <Feature name="bitrate-modes" value="VBR,CBR,CQ" />
</MediaCodec>
</Encoders>
</Included>
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index 05f2760..5f113c5 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -357,7 +357,8 @@
<Limit name="block-size" value="16x16" />
<Limit name="block-count" range="1-8200" />
<Limit name="bitrate" range="1-40000000" />
- <Feature name="bitrate-modes" value="VBR,CBR" />
+ <Limit name="quality" range="0-100" default="80" />
+ <Feature name="bitrate-modes" value="VBR,CBR,CQ" />
<Attribute name="software-codec" />
</MediaCodec>
</Encoders>
diff --git a/media/libstagefright/filters/Android.bp b/media/libstagefright/filters/Android.bp
deleted file mode 100644
index e6d59ad..0000000
--- a/media/libstagefright/filters/Android.bp
+++ /dev/null
@@ -1,52 +0,0 @@
-package {
- // See: http://go/android-license-faq
- // A large-scale-change added 'default_applicable_licenses' to import
- // all of the 'license_kinds' from "frameworks_av_media_libstagefright_license"
- // to get the below license kinds:
- // SPDX-license-identifier-Apache-2.0
- default_applicable_licenses: ["frameworks_av_media_libstagefright_license"],
-}
-
-cc_library_static {
- name: "libstagefright_mediafilter",
-
- srcs: [
- "ColorConvert.cpp",
- "GraphicBufferListener.cpp",
- "IntrinsicBlurFilter.cpp",
- "MediaFilter.cpp",
- "RSFilter.cpp",
- "SaturationFilter.cpp",
- "saturationARGB.rscript",
- "SimpleFilter.cpp",
- "ZeroFilter.cpp",
- ],
-
- export_include_dirs: [
- "include",
- ],
-
- local_include_dirs: [
- "include/filters",
- ],
-
- cflags: [
- "-Wno-multichar",
- "-Werror",
- "-Wall",
- ],
-
- header_libs: [
- "libmediadrm_headers",
- ],
-
- shared_libs: [
- "libgui",
- "libmedia",
- "libhidlmemory",
- ],
-
- sanitize: {
- cfi: true,
- },
-}
diff --git a/media/libstagefright/filters/ColorConvert.cpp b/media/libstagefright/filters/ColorConvert.cpp
deleted file mode 100644
index a8d5dd2..0000000
--- a/media/libstagefright/filters/ColorConvert.cpp
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "ColorConvert.h"
-
-#ifndef max
-#define max(a,b) ((a) > (b) ? (a) : (b))
-#endif
-#ifndef min
-#define min(a,b) ((a) < (b) ? (a) : (b))
-#endif
-
-namespace android {
-
-void YUVToRGB(
- int32_t y, int32_t u, int32_t v,
- int32_t* r, int32_t* g, int32_t* b) {
- y -= 16;
- u -= 128;
- v -= 128;
-
- *b = 1192 * y + 2066 * u;
- *g = 1192 * y - 833 * v - 400 * u;
- *r = 1192 * y + 1634 * v;
-
- *r = min(262143, max(0, *r));
- *g = min(262143, max(0, *g));
- *b = min(262143, max(0, *b));
-
- *r >>= 10;
- *g >>= 10;
- *b >>= 10;
-}
-
-void convertYUV420spToARGB(
- uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
- uint8_t *dest) {
- const int32_t bytes_per_pixel = 2;
-
- for (int32_t i = 0; i < height; i++) {
- for (int32_t j = 0; j < width; j++) {
- int32_t y = *(pY + i * width + j);
- int32_t u = *(pUV + (i/2) * width + bytes_per_pixel * (j/2));
- int32_t v = *(pUV + (i/2) * width + bytes_per_pixel * (j/2) + 1);
-
- int32_t r, g, b;
- YUVToRGB(y, u, v, &r, &g, &b);
-
- *dest++ = 0xFF;
- *dest++ = r;
- *dest++ = g;
- *dest++ = b;
- }
- }
-}
-
-void convertYUV420spToRGB888(
- uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
- uint8_t *dest) {
- const int32_t bytes_per_pixel = 2;
-
- for (int32_t i = 0; i < height; i++) {
- for (int32_t j = 0; j < width; j++) {
- int32_t y = *(pY + i * width + j);
- int32_t u = *(pUV + (i/2) * width + bytes_per_pixel * (j/2));
- int32_t v = *(pUV + (i/2) * width + bytes_per_pixel * (j/2) + 1);
-
- int32_t r, g, b;
- YUVToRGB(y, u, v, &r, &g, &b);
-
- *dest++ = r;
- *dest++ = g;
- *dest++ = b;
- }
- }
-}
-
-// HACK - not even slightly optimized
-// TODO: remove when RGBA support is added to SoftwareRenderer
-void convertRGBAToARGB(
- uint8_t *src, int32_t width, int32_t height, uint32_t stride,
- uint8_t *dest) {
- for (int32_t i = 0; i < height; ++i) {
- for (int32_t j = 0; j < width; ++j) {
- uint8_t r = *src++;
- uint8_t g = *src++;
- uint8_t b = *src++;
- uint8_t a = *src++;
- *dest++ = a;
- *dest++ = r;
- *dest++ = g;
- *dest++ = b;
- }
- src += (stride - width) * 4;
- }
-}
-
-} // namespace android
diff --git a/media/libstagefright/filters/GraphicBufferListener.cpp b/media/libstagefright/filters/GraphicBufferListener.cpp
deleted file mode 100644
index db061c1..0000000
--- a/media/libstagefright/filters/GraphicBufferListener.cpp
+++ /dev/null
@@ -1,155 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "GraphicBufferListener"
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-#include <media/stagefright/MediaErrors.h>
-
-#include <gui/BufferItem.h>
-#include <utils/String8.h>
-
-#include "GraphicBufferListener.h"
-
-namespace android {
-
-status_t GraphicBufferListener::init(
- const sp<AMessage> ¬ify,
- size_t bufferWidth, size_t bufferHeight, size_t bufferCount) {
- mNotify = notify;
-
- String8 name("GraphicBufferListener");
- BufferQueue::createBufferQueue(&mProducer, &mConsumer);
- mConsumer->setConsumerName(name);
- mConsumer->setDefaultBufferSize(bufferWidth, bufferHeight);
- mConsumer->setConsumerUsageBits(GRALLOC_USAGE_SW_READ_OFTEN);
-
- status_t err = mConsumer->setMaxAcquiredBufferCount(bufferCount);
- if (err != NO_ERROR) {
- ALOGE("Unable to set BQ max acquired buffer count to %zu: %d",
- bufferCount, err);
- return err;
- }
-
- wp<BufferQueue::ConsumerListener> listener =
- static_cast<BufferQueue::ConsumerListener*>(this);
- sp<BufferQueue::ProxyConsumerListener> proxy =
- new BufferQueue::ProxyConsumerListener(listener);
-
- err = mConsumer->consumerConnect(proxy, false);
- if (err != NO_ERROR) {
- ALOGE("Error connecting to BufferQueue: %s (%d)",
- strerror(-err), err);
- return err;
- }
-
- ALOGV("init() successful.");
-
- return OK;
-}
-
-void GraphicBufferListener::onFrameAvailable(const BufferItem& /* item */) {
- ALOGV("onFrameAvailable() called");
-
- {
- Mutex::Autolock autoLock(mMutex);
- mNumFramesAvailable++;
- }
-
- sp<AMessage> notify = mNotify->dup();
- mNotify->setWhat(kWhatFrameAvailable);
- mNotify->post();
-}
-
-void GraphicBufferListener::onBuffersReleased() {
- ALOGV("onBuffersReleased() called");
- // nothing to do
-}
-
-void GraphicBufferListener::onSidebandStreamChanged() {
- ALOGW("GraphicBufferListener cannot consume sideband streams.");
- // nothing to do
-}
-
-BufferItem GraphicBufferListener::getBufferItem() {
- BufferItem item;
-
- {
- Mutex::Autolock autoLock(mMutex);
- if (mNumFramesAvailable <= 0) {
- ALOGE("getBuffer() called with no frames available");
- return item;
- }
- mNumFramesAvailable--;
- }
-
- status_t err = mConsumer->acquireBuffer(&item, 0);
- if (err == BufferQueue::NO_BUFFER_AVAILABLE) {
- // shouldn't happen, since we track num frames available
- ALOGE("frame was not available");
- item.mSlot = -1;
- return item;
- } else if (err != OK) {
- ALOGE("acquireBuffer returned err=%d", err);
- item.mSlot = -1;
- return item;
- }
-
- // Wait for it to become available.
- err = item.mFence->waitForever("GraphicBufferListener::getBufferItem");
- if (err != OK) {
- ALOGW("failed to wait for buffer fence: %d", err);
- // keep going
- }
-
- // If this is the first time we're seeing this buffer, add it to our
- // slot table.
- if (item.mGraphicBuffer != NULL) {
- ALOGV("setting mBufferSlot %d", item.mSlot);
- mBufferSlot[item.mSlot] = item.mGraphicBuffer;
- }
-
- return item;
-}
-
-sp<GraphicBuffer> GraphicBufferListener::getBuffer(BufferItem item) {
- sp<GraphicBuffer> buf;
- if (item.mSlot < 0 || item.mSlot >= BufferQueue::NUM_BUFFER_SLOTS) {
- ALOGE("getBuffer() received invalid BufferItem: mSlot==%d", item.mSlot);
- return buf;
- }
-
- buf = mBufferSlot[item.mSlot];
- CHECK(buf.get() != NULL);
-
- return buf;
-}
-
-status_t GraphicBufferListener::releaseBuffer(BufferItem item) {
- if (item.mSlot < 0 || item.mSlot >= BufferQueue::NUM_BUFFER_SLOTS) {
- ALOGE("getBuffer() received invalid BufferItem: mSlot==%d", item.mSlot);
- return ERROR_OUT_OF_RANGE;
- }
-
- mConsumer->releaseBuffer(item.mSlot, item.mFrameNumber,
- EGL_NO_DISPLAY, EGL_NO_SYNC_KHR, Fence::NO_FENCE);
-
- return OK;
-}
-
-} // namespace android
diff --git a/media/libstagefright/filters/IntrinsicBlurFilter.cpp b/media/libstagefright/filters/IntrinsicBlurFilter.cpp
deleted file mode 100644
index e00afd9..0000000
--- a/media/libstagefright/filters/IntrinsicBlurFilter.cpp
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "IntrinsicBlurFilter"
-
-#include <utils/Log.h>
-
-#include <media/MediaCodecBuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-
-#include "IntrinsicBlurFilter.h"
-
-namespace android {
-
-status_t IntrinsicBlurFilter::configure(const sp<AMessage> &msg) {
- status_t err = SimpleFilter::configure(msg);
- if (err != OK) {
- return err;
- }
-
- if (!msg->findString("cacheDir", &mCacheDir)) {
- ALOGE("Failed to find cache directory in config message.");
- return NAME_NOT_FOUND;
- }
-
- return OK;
-}
-
-status_t IntrinsicBlurFilter::start() {
- // TODO: use a single RS context object for entire application
- mRS = new RSC::RS();
-
- if (!mRS->init(mCacheDir.c_str())) {
- ALOGE("Failed to initialize RenderScript context.");
- return NO_INIT;
- }
-
- // 32-bit elements for ARGB8888
- RSC::sp<const RSC::Element> e = RSC::Element::U8_4(mRS);
-
- RSC::Type::Builder tb(mRS, e);
- tb.setX(mWidth);
- tb.setY(mHeight);
- RSC::sp<const RSC::Type> t = tb.create();
-
- mAllocIn = RSC::Allocation::createTyped(mRS, t);
- mAllocOut = RSC::Allocation::createTyped(mRS, t);
-
- mBlur = RSC::ScriptIntrinsicBlur::create(mRS, e);
- mBlur->setRadius(mBlurRadius);
- mBlur->setInput(mAllocIn);
-
- return OK;
-}
-
-void IntrinsicBlurFilter::reset() {
- mBlur.clear();
- mAllocOut.clear();
- mAllocIn.clear();
- mRS.clear();
-}
-
-status_t IntrinsicBlurFilter::setParameters(const sp<AMessage> &msg) {
- sp<AMessage> params;
- CHECK(msg->findMessage("params", ¶ms));
-
- float blurRadius;
- if (params->findFloat("blur-radius", &blurRadius)) {
- mBlurRadius = blurRadius;
- }
-
- return OK;
-}
-
-status_t IntrinsicBlurFilter::processBuffers(
- const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
- mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
- mBlur->forEach(mAllocOut);
- mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
-
- return OK;
-}
-
-} // namespace android
diff --git a/media/libstagefright/filters/MediaFilter.cpp b/media/libstagefright/filters/MediaFilter.cpp
deleted file mode 100644
index c7baa73..0000000
--- a/media/libstagefright/filters/MediaFilter.cpp
+++ /dev/null
@@ -1,840 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "MediaFilter"
-
-#include <inttypes.h>
-#include <utils/Trace.h>
-
-#include <media/stagefright/foundation/ABuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-
-#include <media/stagefright/BufferProducerWrapper.h>
-#include <media/stagefright/MediaCodecConstants.h>
-#include <media/stagefright/MediaDefs.h>
-#include <media/stagefright/MediaErrors.h>
-#include <media/stagefright/MediaFilter.h>
-
-#include <media/MediaCodecBuffer.h>
-
-#include <gui/BufferItem.h>
-
-#include "ColorConvert.h"
-#include "GraphicBufferListener.h"
-#include "IntrinsicBlurFilter.h"
-#include "RSFilter.h"
-#include "SaturationFilter.h"
-#include "ZeroFilter.h"
-
-namespace android {
-
-class MediaFilter::BufferChannel : public BufferChannelBase {
-public:
- BufferChannel(const sp<AMessage> &in, const sp<AMessage> &out)
- : mInputBufferFilled(in), mOutputBufferDrained(out) {
- }
-
- ~BufferChannel() override = default;
-
- // BufferChannelBase
-
- status_t queueInputBuffer(const sp<MediaCodecBuffer> &buffer) override {
- sp<AMessage> msg = mInputBufferFilled->dup();
- msg->setObject("buffer", buffer);
- msg->post();
- return OK;
- }
-
- status_t queueSecureInputBuffer(
- const sp<MediaCodecBuffer> &,
- bool,
- const uint8_t *,
- const uint8_t *,
- CryptoPlugin::Mode,
- CryptoPlugin::Pattern,
- const CryptoPlugin::SubSample *,
- size_t,
- AString *) override {
- return INVALID_OPERATION;
- }
-
- status_t renderOutputBuffer(
- const sp<MediaCodecBuffer> &buffer, int64_t /* timestampNs */) override {
- sp<AMessage> msg = mOutputBufferDrained->dup();
- msg->setObject("buffer", buffer);
- msg->post();
- return OK;
- }
-
- status_t discardBuffer(const sp<MediaCodecBuffer> &buffer) override {
- if (FindBufferIndex(&mInputBuffers, buffer) >= 0) {
- sp<AMessage> msg = mInputBufferFilled->dup();
- msg->setObject("buffer", buffer);
- msg->post();
- return OK;
- }
- sp<AMessage> msg = mOutputBufferDrained->dup();
- msg->setObject("buffer", buffer);
- msg->post();
- return OK;
- }
-
- void getInputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
- if (!array) {
- return;
- }
- array->clear();
- array->appendVector(mInputBuffers);
- }
-
- void getOutputBufferArray(Vector<sp<MediaCodecBuffer>> *array) {
- if (!array) {
- return;
- }
- array->clear();
- array->appendVector(mOutputBuffers);
- }
-
- // For MediaFilter
-
- void fillThisBuffer(const sp<MediaCodecBuffer> &buffer) {
- ssize_t index = FindBufferIndex(&mInputBuffers, buffer);
- mCallback->onInputBufferAvailable(index, buffer);
- }
-
- void drainThisBuffer(const sp<MediaCodecBuffer> &buffer, int flags) {
- ssize_t index = FindBufferIndex(&mOutputBuffers, buffer);
- buffer->meta()->setInt32("flags", flags);
- mCallback->onOutputBufferAvailable(index, buffer);
- }
-
- template <class T>
- void setInputBuffers(T begin, T end) {
- mInputBuffers.clear();
- for (T it = begin; it != end; ++it) {
- mInputBuffers.push_back(it->mData);
- }
- }
-
- template <class T>
- void setOutputBuffers(T begin, T end) {
- mOutputBuffers.clear();
- for (T it = begin; it != end; ++it) {
- mOutputBuffers.push_back(it->mData);
- }
- }
-
-private:
- sp<AMessage> mInputBufferFilled;
- sp<AMessage> mOutputBufferDrained;
- Vector<sp<MediaCodecBuffer>> mInputBuffers;
- Vector<sp<MediaCodecBuffer>> mOutputBuffers;
-
- static ssize_t FindBufferIndex(
- Vector<sp<MediaCodecBuffer>> *array, const sp<MediaCodecBuffer> &buffer) {
- for (size_t i = 0; i < array->size(); ++i) {
- if (array->itemAt(i) == buffer) {
- return i;
- }
- }
- return -1;
- }
-};
-
-// parameter: number of input and output buffers
-static const size_t kBufferCountActual = 4;
-
-MediaFilter::MediaFilter()
- : mState(UNINITIALIZED),
- mGeneration(0),
- mGraphicBufferListener(NULL) {
-}
-
-MediaFilter::~MediaFilter() {
-}
-
-//////////////////// PUBLIC FUNCTIONS //////////////////////////////////////////
-
-std::shared_ptr<BufferChannelBase> MediaFilter::getBufferChannel() {
- if (!mBufferChannel) {
- mBufferChannel = std::make_shared<BufferChannel>(
- new AMessage(kWhatInputBufferFilled, this),
- new AMessage(kWhatOutputBufferDrained, this));
- }
- return mBufferChannel;
-}
-
-void MediaFilter::initiateAllocateComponent(const sp<AMessage> &msg) {
- msg->setWhat(kWhatAllocateComponent);
- msg->setTarget(this);
- msg->post();
-}
-
-void MediaFilter::initiateConfigureComponent(const sp<AMessage> &msg) {
- msg->setWhat(kWhatConfigureComponent);
- msg->setTarget(this);
- msg->post();
-}
-
-void MediaFilter::initiateCreateInputSurface() {
- (new AMessage(kWhatCreateInputSurface, this))->post();
-}
-
-void MediaFilter::initiateSetInputSurface(
- const sp<PersistentSurface> & /* surface */) {
- ALOGW("initiateSetInputSurface() unsupported");
-}
-
-void MediaFilter::initiateStart() {
- (new AMessage(kWhatStart, this))->post();
-}
-
-void MediaFilter::initiateShutdown(bool keepComponentAllocated) {
- sp<AMessage> msg = new AMessage(kWhatShutdown, this);
- msg->setInt32("keepComponentAllocated", keepComponentAllocated);
- msg->post();
-}
-
-void MediaFilter::signalFlush() {
- (new AMessage(kWhatFlush, this))->post();
-}
-
-void MediaFilter::signalResume() {
- (new AMessage(kWhatResume, this))->post();
-}
-
-// nothing to do
-void MediaFilter::signalRequestIDRFrame() {
- return;
-}
-
-void MediaFilter::signalSetParameters(const sp<AMessage> ¶ms) {
- sp<AMessage> msg = new AMessage(kWhatSetParameters, this);
- msg->setMessage("params", params);
- msg->post();
-}
-
-void MediaFilter::signalEndOfInputStream() {
- (new AMessage(kWhatSignalEndOfInputStream, this))->post();
-}
-
-void MediaFilter::onMessageReceived(const sp<AMessage> &msg) {
- switch (msg->what()) {
- case kWhatAllocateComponent:
- {
- onAllocateComponent(msg);
- break;
- }
- case kWhatConfigureComponent:
- {
- onConfigureComponent(msg);
- break;
- }
- case kWhatStart:
- {
- onStart();
- break;
- }
- case kWhatProcessBuffers:
- {
- processBuffers();
- break;
- }
- case kWhatInputBufferFilled:
- {
- onInputBufferFilled(msg);
- break;
- }
- case kWhatOutputBufferDrained:
- {
- onOutputBufferDrained(msg);
- break;
- }
- case kWhatShutdown:
- {
- onShutdown(msg);
- break;
- }
- case kWhatFlush:
- {
- onFlush();
- break;
- }
- case kWhatResume:
- {
- // nothing to do
- break;
- }
- case kWhatSetParameters:
- {
- onSetParameters(msg);
- break;
- }
- case kWhatCreateInputSurface:
- {
- onCreateInputSurface();
- break;
- }
- case GraphicBufferListener::kWhatFrameAvailable:
- {
- onInputFrameAvailable();
- break;
- }
- case kWhatSignalEndOfInputStream:
- {
- onSignalEndOfInputStream();
- break;
- }
- default:
- {
- ALOGE("Message not handled:\n%s", msg->debugString().c_str());
- break;
- }
- }
-}
-
-//////////////////// HELPER FUNCTIONS //////////////////////////////////////////
-
-void MediaFilter::signalProcessBuffers() {
- (new AMessage(kWhatProcessBuffers, this))->post();
-}
-
-void MediaFilter::signalError(status_t error) {
- mCallback->onError(error, ACTION_CODE_FATAL);
-}
-
-status_t MediaFilter::allocateBuffersOnPort(OMX_U32 portIndex) {
- CHECK(portIndex == kPortIndexInput || portIndex == kPortIndexOutput);
- const bool isInput = portIndex == kPortIndexInput;
- const size_t bufferSize = isInput ? mMaxInputSize : mMaxOutputSize;
-
- CHECK(mBuffers[portIndex].isEmpty());
-
- ALOGV("Allocating %zu buffers of size %zu on %s port",
- kBufferCountActual, bufferSize,
- isInput ? "input" : "output");
-
- // trigger output format change
- sp<AMessage> outputFormat = mOutputFormat->dup();
- for (size_t i = 0; i < kBufferCountActual; ++i) {
- BufferInfo info;
- info.mStatus = BufferInfo::OWNED_BY_US;
- info.mBufferID = i;
- info.mGeneration = mGeneration;
- info.mOutputFlags = 0;
- info.mData = new MediaCodecBuffer(
- isInput ? mInputFormat : outputFormat,
- new ABuffer(bufferSize));
- info.mData->meta()->setInt64("timeUs", 0);
-
- mBuffers[portIndex].push_back(info);
-
- if (!isInput) {
- mAvailableOutputBuffers.push(
- &mBuffers[portIndex].editItemAt(i));
- }
- }
- if (isInput) {
- mBufferChannel->setInputBuffers(
- mBuffers[portIndex].begin(), mBuffers[portIndex].end());
- } else {
- mBufferChannel->setOutputBuffers(
- mBuffers[portIndex].begin(), mBuffers[portIndex].end());
- }
-
- return OK;
-}
-
-MediaFilter::BufferInfo* MediaFilter::findBuffer(
- uint32_t portIndex, const sp<MediaCodecBuffer> &buffer,
- ssize_t *index) {
- for (size_t i = 0; i < mBuffers[portIndex].size(); ++i) {
- BufferInfo *info = &mBuffers[portIndex].editItemAt(i);
-
- if (info->mData == buffer) {
- if (index != NULL) {
- *index = i;
- }
- return info;
- }
- }
-
- TRESPASS();
-
- return NULL;
-}
-
-void MediaFilter::postFillThisBuffer(BufferInfo *info) {
- ALOGV("postFillThisBuffer on buffer %d", info->mBufferID);
- if (mPortEOS[kPortIndexInput]) {
- return;
- }
-
- CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
-
- info->mGeneration = mGeneration;
-
- info->mData->meta()->clear();
-
- sp<AMessage> reply = new AMessage(kWhatInputBufferFilled, this);
- reply->setInt32("buffer-id", info->mBufferID);
-
- info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
-
- mBufferChannel->fillThisBuffer(info->mData);
-}
-
-void MediaFilter::postDrainThisBuffer(BufferInfo *info) {
- CHECK_EQ((int)info->mStatus, (int)BufferInfo::OWNED_BY_US);
-
- info->mGeneration = mGeneration;
-
- sp<AMessage> reply = new AMessage(kWhatOutputBufferDrained, this);
- reply->setInt32("buffer-id", info->mBufferID);
-
- mBufferChannel->drainThisBuffer(info->mData, info->mOutputFlags);
-
- info->mStatus = BufferInfo::OWNED_BY_UPSTREAM;
-}
-
-void MediaFilter::postEOS() {
- mCallback->onEos(ERROR_END_OF_STREAM);
-
- ALOGV("Sent kWhatEOS.");
-}
-
-void MediaFilter::requestFillEmptyInput() {
- if (mPortEOS[kPortIndexInput]) {
- return;
- }
-
- for (size_t i = 0; i < mBuffers[kPortIndexInput].size(); ++i) {
- BufferInfo *info = &mBuffers[kPortIndexInput].editItemAt(i);
-
- if (info->mStatus == BufferInfo::OWNED_BY_US) {
- postFillThisBuffer(info);
- }
- }
-}
-
-void MediaFilter::processBuffers() {
- if (mAvailableInputBuffers.empty() || mAvailableOutputBuffers.empty()) {
- ALOGV("Skipping process (buffers unavailable)");
- return;
- }
-
- if (mPortEOS[kPortIndexOutput]) {
- // TODO notify caller of queueInput error when it is supported
- // in MediaCodec
- ALOGW("Tried to process a buffer after EOS.");
- return;
- }
-
- BufferInfo *inputInfo = mAvailableInputBuffers[0];
- mAvailableInputBuffers.removeAt(0);
- BufferInfo *outputInfo = mAvailableOutputBuffers[0];
- mAvailableOutputBuffers.removeAt(0);
-
- status_t err;
- err = mFilter->processBuffers(inputInfo->mData, outputInfo->mData);
- if (err != (status_t)OK) {
- outputInfo->mData->meta()->setInt32("err", err);
- }
-
- int64_t timeUs;
- CHECK(inputInfo->mData->meta()->findInt64("timeUs", &timeUs));
- outputInfo->mData->meta()->setInt64("timeUs", timeUs);
- outputInfo->mOutputFlags = 0;
- int32_t eos = 0;
- if (inputInfo->mData->meta()->findInt32("eos", &eos) && eos != 0) {
- outputInfo->mOutputFlags |= BUFFER_FLAG_END_OF_STREAM;
- mPortEOS[kPortIndexOutput] = true;
- outputInfo->mData->meta()->setInt32("eos", eos);
- postEOS();
- ALOGV("Output stream saw EOS.");
- }
-
- ALOGV("Processed input buffer %u [%zu], output buffer %u [%zu]",
- inputInfo->mBufferID, inputInfo->mData->size(),
- outputInfo->mBufferID, outputInfo->mData->size());
-
- if (mGraphicBufferListener != NULL) {
- delete inputInfo;
- } else {
- postFillThisBuffer(inputInfo);
- }
- postDrainThisBuffer(outputInfo);
-
- // prevent any corner case where buffers could get stuck in queue
- signalProcessBuffers();
-}
-
-void MediaFilter::onAllocateComponent(const sp<AMessage> &msg) {
- CHECK_EQ(mState, UNINITIALIZED);
-
- CHECK(msg->findString("componentName", &mComponentName));
- const char* name = mComponentName.c_str();
- if (!strcasecmp(name, "android.filter.zerofilter")) {
- mFilter = new ZeroFilter;
- } else if (!strcasecmp(name, "android.filter.saturation")) {
- mFilter = new SaturationFilter;
- } else if (!strcasecmp(name, "android.filter.intrinsicblur")) {
- mFilter = new IntrinsicBlurFilter;
- } else if (!strcasecmp(name, "android.filter.RenderScript")) {
- mFilter = new RSFilter;
- } else {
- ALOGE("Unrecognized filter name: %s", name);
- signalError(NAME_NOT_FOUND);
- return;
- }
-
- mCallback->onComponentAllocated(mComponentName.c_str());
- mState = INITIALIZED;
- ALOGV("Handled kWhatAllocateComponent.");
-}
-
-void MediaFilter::onConfigureComponent(const sp<AMessage> &msg) {
- // TODO: generalize to allow audio filters as well as video
-
- CHECK_EQ(mState, INITIALIZED);
-
- // get params - at least mime, width & height
- AString mime;
- CHECK(msg->findString("mime", &mime));
- if (strcasecmp(mime.c_str(), MEDIA_MIMETYPE_VIDEO_RAW)) {
- ALOGE("Bad mime: %s", mime.c_str());
- signalError(BAD_VALUE);
- return;
- }
-
- CHECK(msg->findInt32("width", &mWidth));
- CHECK(msg->findInt32("height", &mHeight));
- if (!msg->findInt32("stride", &mStride)) {
- mStride = mWidth;
- }
- if (!msg->findInt32("slice-height", &mSliceHeight)) {
- mSliceHeight = mHeight;
- }
-
- mMaxInputSize = mWidth * mHeight * 4; // room for ARGB8888
- int32_t maxInputSize;
- if (msg->findInt32("max-input-size", &maxInputSize)
- && (size_t)maxInputSize > mMaxInputSize) {
- mMaxInputSize = maxInputSize;
- }
-
- if (!msg->findInt32("color-format", &mColorFormatIn)) {
- // default to OMX_COLOR_Format32bitARGB8888
- mColorFormatIn = OMX_COLOR_Format32bitARGB8888;
- msg->setInt32("color-format", mColorFormatIn);
- }
- mColorFormatOut = mColorFormatIn;
-
- mMaxOutputSize = mWidth * mHeight * 4; // room for ARGB8888
-
- AString cacheDir;
- if (!msg->findString("cacheDir", &cacheDir)) {
- ALOGE("Failed to find cache directory in config message.");
- signalError(NAME_NOT_FOUND);
- return;
- }
-
- status_t err;
- err = mFilter->configure(msg);
- if (err != (status_t)OK) {
- ALOGE("Failed to configure filter component, err %d", err);
- signalError(err);
- return;
- }
-
- mInputFormat = new AMessage();
- mInputFormat->setString("mime", mime.c_str());
- mInputFormat->setInt32("stride", mStride);
- mInputFormat->setInt32("slice-height", mSliceHeight);
- mInputFormat->setInt32("color-format", mColorFormatIn);
- mInputFormat->setRect("crop", 0, 0, mStride, mSliceHeight);
- mInputFormat->setInt32("width", mWidth);
- mInputFormat->setInt32("height", mHeight);
-
- mOutputFormat = new AMessage();
- mOutputFormat->setString("mime", mime.c_str());
- mOutputFormat->setInt32("stride", mStride);
- mOutputFormat->setInt32("slice-height", mSliceHeight);
- mOutputFormat->setInt32("color-format", mColorFormatOut);
- mOutputFormat->setRect("crop", 0, 0, mStride, mSliceHeight);
- mOutputFormat->setInt32("width", mWidth);
- mOutputFormat->setInt32("height", mHeight);
- mOutputFormat->setInt32("using-sw-renderer", 1);
-
- mCallback->onComponentConfigured(mInputFormat, mOutputFormat);
- mState = CONFIGURED;
- ALOGV("Handled kWhatConfigureComponent.");
-}
-
-void MediaFilter::onStart() {
- CHECK_EQ(mState, CONFIGURED);
-
- allocateBuffersOnPort(kPortIndexInput);
-
- allocateBuffersOnPort(kPortIndexOutput);
-
- mCallback->onStartCompleted();
-
- status_t err = mFilter->start();
- if (err != (status_t)OK) {
- ALOGE("Failed to start filter component, err %d", err);
- signalError(err);
- return;
- }
-
- mPortEOS[kPortIndexInput] = false;
- mPortEOS[kPortIndexOutput] = false;
- mInputEOSResult = OK;
- mState = STARTED;
-
- requestFillEmptyInput();
- ALOGV("Handled kWhatStart.");
-}
-
-void MediaFilter::onInputBufferFilled(const sp<AMessage> &msg) {
- sp<RefBase> obj;
- CHECK(msg->findObject("buffer", &obj));
- sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
- ssize_t index = -1;
- BufferInfo *info = findBuffer(kPortIndexInput, buffer, &index);
-
- if (mState != STARTED) {
- // we're not running, so we'll just keep that buffer...
- info->mStatus = BufferInfo::OWNED_BY_US;
- return;
- }
-
- if (info->mGeneration != mGeneration) {
- ALOGV("Caught a stale input buffer [index %zd]", index);
- // buffer is stale (taken before a flush/shutdown) - repost it
- CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_US);
- postFillThisBuffer(info);
- return;
- }
-
- CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_UPSTREAM);
- info->mStatus = BufferInfo::OWNED_BY_US;
-
- int32_t err = OK;
- bool eos = false;
-
- int32_t isCSD;
- if (buffer != NULL && buffer->meta()->findInt32("csd", &isCSD)
- && isCSD != 0) {
- // ignore codec-specific data buffers
- ALOGW("MediaFilter received a codec-specific data buffer");
- postFillThisBuffer(info);
- return;
- }
-
- int32_t tmp;
- if (buffer != NULL && buffer->meta()->findInt32("eos", &tmp) && tmp) {
- eos = true;
- err = ERROR_END_OF_STREAM;
- }
-
- mAvailableInputBuffers.push_back(info);
- processBuffers();
-
- if (eos) {
- mPortEOS[kPortIndexInput] = true;
- mInputEOSResult = err;
- }
-
- ALOGV("Handled kWhatInputBufferFilled. [index %zd]", index);
-}
-
-void MediaFilter::onOutputBufferDrained(const sp<AMessage> &msg) {
- sp<RefBase> obj;
- CHECK(msg->findObject("buffer", &obj));
- sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
- ssize_t index = -1;
- BufferInfo *info = findBuffer(kPortIndexOutput, buffer, &index);
-
- if (mState != STARTED) {
- // we're not running, so we'll just keep that buffer...
- info->mStatus = BufferInfo::OWNED_BY_US;
- return;
- }
-
- if (info->mGeneration != mGeneration) {
- ALOGV("Caught a stale output buffer [index %zd]", index);
- // buffer is stale (taken before a flush/shutdown) - keep it
- CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_US);
- return;
- }
-
- CHECK_EQ(info->mStatus, BufferInfo::OWNED_BY_UPSTREAM);
- info->mStatus = BufferInfo::OWNED_BY_US;
-
- mAvailableOutputBuffers.push_back(info);
-
- processBuffers();
-
- ALOGV("Handled kWhatOutputBufferDrained. [index %zd]", index);
-}
-
-void MediaFilter::onShutdown(const sp<AMessage> &msg) {
- mGeneration++;
-
- if (mState != UNINITIALIZED) {
- mFilter->reset();
- }
-
- int32_t keepComponentAllocated;
- CHECK(msg->findInt32("keepComponentAllocated", &keepComponentAllocated));
- if (!keepComponentAllocated || mState == UNINITIALIZED) {
- mState = UNINITIALIZED;
- } else {
- mState = INITIALIZED;
- }
-
- if (keepComponentAllocated) {
- mCallback->onStopCompleted();
- } else {
- mCallback->onReleaseCompleted();
- }
-}
-
-void MediaFilter::onFlush() {
- mGeneration++;
-
- mAvailableInputBuffers.clear();
- for (size_t i = 0; i < mBuffers[kPortIndexInput].size(); ++i) {
- BufferInfo *info = &mBuffers[kPortIndexInput].editItemAt(i);
- info->mStatus = BufferInfo::OWNED_BY_US;
- }
- mAvailableOutputBuffers.clear();
- for (size_t i = 0; i < mBuffers[kPortIndexOutput].size(); ++i) {
- BufferInfo *info = &mBuffers[kPortIndexOutput].editItemAt(i);
- info->mStatus = BufferInfo::OWNED_BY_US;
- mAvailableOutputBuffers.push_back(info);
- }
-
- mPortEOS[kPortIndexInput] = false;
- mPortEOS[kPortIndexOutput] = false;
- mInputEOSResult = OK;
-
- mCallback->onFlushCompleted();
- ALOGV("Posted kWhatFlushCompleted");
-
- // MediaCodec returns all input buffers after flush, so in
- // onInputBufferFilled we call postFillThisBuffer on them
-}
-
-void MediaFilter::onSetParameters(const sp<AMessage> &msg) {
- CHECK(mState != STARTED);
-
- status_t err = mFilter->setParameters(msg);
- if (err != (status_t)OK) {
- ALOGE("setParameters returned err %d", err);
- }
-}
-
-void MediaFilter::onCreateInputSurface() {
- CHECK(mState == CONFIGURED);
-
- mGraphicBufferListener = new GraphicBufferListener;
-
- sp<AMessage> notify = new AMessage();
- notify->setTarget(this);
- status_t err = mGraphicBufferListener->init(
- notify, mStride, mSliceHeight, kBufferCountActual);
-
- if (err != OK) {
- ALOGE("Failed to init mGraphicBufferListener: %d", err);
- signalError(err);
- return;
- }
-
- mCallback->onInputSurfaceCreated(
- nullptr, nullptr,
- new BufferProducerWrapper(
- mGraphicBufferListener->getIGraphicBufferProducer()));
-}
-
-void MediaFilter::onInputFrameAvailable() {
- BufferItem item = mGraphicBufferListener->getBufferItem();
- sp<GraphicBuffer> buf = mGraphicBufferListener->getBuffer(item);
-
- // get pointer to graphic buffer
- void* bufPtr;
- buf->lock(GraphicBuffer::USAGE_SW_READ_OFTEN, &bufPtr);
-
- // HACK - there is no OMX_COLOR_FORMATTYPE value for RGBA, so the format
- // conversion is hardcoded until we add this.
- // TODO: check input format and convert only if necessary
- // copy RGBA graphic buffer into temporary ARGB input buffer
- BufferInfo *inputInfo = new BufferInfo;
- inputInfo->mData = new MediaCodecBuffer(
- mInputFormat, new ABuffer(buf->getWidth() * buf->getHeight() * 4));
- ALOGV("Copying surface data into temp buffer.");
- convertRGBAToARGB(
- (uint8_t*)bufPtr, buf->getWidth(), buf->getHeight(),
- buf->getStride(), inputInfo->mData->data());
- inputInfo->mBufferID = item.mSlot;
- inputInfo->mGeneration = mGeneration;
- inputInfo->mOutputFlags = 0;
- inputInfo->mStatus = BufferInfo::OWNED_BY_US;
- inputInfo->mData->meta()->setInt64("timeUs", item.mTimestamp / 1000);
-
- mAvailableInputBuffers.push_back(inputInfo);
-
- mGraphicBufferListener->releaseBuffer(item);
-
- signalProcessBuffers();
-}
-
-void MediaFilter::onSignalEndOfInputStream() {
- // if using input surface, need to send an EOS output buffer
- if (mGraphicBufferListener != NULL) {
- Vector<BufferInfo> *outputBufs = &mBuffers[kPortIndexOutput];
- BufferInfo* eosBuf;
- bool foundBuf = false;
- for (size_t i = 0; i < kBufferCountActual; i++) {
- eosBuf = &outputBufs->editItemAt(i);
- if (eosBuf->mStatus == BufferInfo::OWNED_BY_US) {
- foundBuf = true;
- break;
- }
- }
-
- if (!foundBuf) {
- ALOGE("onSignalEndOfInputStream failed to find an output buffer");
- return;
- }
-
- eosBuf->mOutputFlags = BUFFER_FLAG_END_OF_STREAM;
- eosBuf->mGeneration = mGeneration;
- eosBuf->mData->setRange(0, 0);
- postDrainThisBuffer(eosBuf);
- ALOGV("Posted EOS on output buffer %u", eosBuf->mBufferID);
- }
-
- mPortEOS[kPortIndexOutput] = true;
- mCallback->onSignaledInputEOS(OK);
-
- ALOGV("Output stream saw EOS.");
-}
-
-} // namespace android
diff --git a/media/libstagefright/filters/RSFilter.cpp b/media/libstagefright/filters/RSFilter.cpp
deleted file mode 100644
index 225a375..0000000
--- a/media/libstagefright/filters/RSFilter.cpp
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "RSFilter"
-
-#include <utils/Log.h>
-
-#include <media/MediaCodecBuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-
-#include "RSFilter.h"
-
-namespace android {
-
-RSFilter::RSFilter() {
-
-}
-
-RSFilter::~RSFilter() {
-
-}
-
-status_t RSFilter::configure(const sp<AMessage> &msg) {
- status_t err = SimpleFilter::configure(msg);
- if (err != OK) {
- return err;
- }
-
- if (!msg->findString("cacheDir", &mCacheDir)) {
- ALOGE("Failed to find cache directory in config message.");
- return NAME_NOT_FOUND;
- }
-
- sp<RenderScriptWrapper> wrapper;
- if (!msg->findObject("rs-wrapper", (sp<RefBase>*)&wrapper)) {
- ALOGE("Failed to find RenderScriptWrapper in config message.");
- return NAME_NOT_FOUND;
- }
-
- mRS = wrapper->mContext;
- mCallback = wrapper->mCallback;
-
- return OK;
-}
-
-status_t RSFilter::start() {
- // 32-bit elements for ARGB8888
- RSC::sp<const RSC::Element> e = RSC::Element::U8_4(mRS);
-
- RSC::Type::Builder tb(mRS, e);
- tb.setX(mWidth);
- tb.setY(mHeight);
- RSC::sp<const RSC::Type> t = tb.create();
-
- mAllocIn = RSC::Allocation::createTyped(mRS, t);
- mAllocOut = RSC::Allocation::createTyped(mRS, t);
-
- return OK;
-}
-
-void RSFilter::reset() {
- mCallback.clear();
- mAllocOut.clear();
- mAllocIn.clear();
- mRS.clear();
-}
-
-status_t RSFilter::setParameters(const sp<AMessage> &msg) {
- return mCallback->handleSetParameters(msg);
-}
-
-status_t RSFilter::processBuffers(
- const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
- mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
- mCallback->processBuffers(mAllocIn.get(), mAllocOut.get());
- mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
-
- return OK;
-}
-
-} // namespace android
diff --git a/media/libstagefright/filters/SaturationFilter.cpp b/media/libstagefright/filters/SaturationFilter.cpp
deleted file mode 100644
index 0a1df05..0000000
--- a/media/libstagefright/filters/SaturationFilter.cpp
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "SaturationFilter"
-
-#include <utils/Log.h>
-
-#include <media/MediaCodecBuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-
-#include "SaturationFilter.h"
-
-namespace android {
-
-status_t SaturationFilter::configure(const sp<AMessage> &msg) {
- status_t err = SimpleFilter::configure(msg);
- if (err != OK) {
- return err;
- }
-
- if (!msg->findString("cacheDir", &mCacheDir)) {
- ALOGE("Failed to find cache directory in config message.");
- return NAME_NOT_FOUND;
- }
-
- return OK;
-}
-
-status_t SaturationFilter::start() {
- // TODO: use a single RS context object for entire application
- mRS = new RSC::RS();
-
- if (!mRS->init(mCacheDir.c_str())) {
- ALOGE("Failed to initialize RenderScript context.");
- return NO_INIT;
- }
-
- // 32-bit elements for ARGB8888
- RSC::sp<const RSC::Element> e = RSC::Element::U8_4(mRS);
-
- RSC::Type::Builder tb(mRS, e);
- tb.setX(mWidth);
- tb.setY(mHeight);
- RSC::sp<const RSC::Type> t = tb.create();
-
- mAllocIn = RSC::Allocation::createTyped(mRS, t);
- mAllocOut = RSC::Allocation::createTyped(mRS, t);
-
- mScript = new ScriptC_saturationARGB(mRS);
-
- mScript->set_gSaturation(mSaturation);
-
- return OK;
-}
-
-void SaturationFilter::reset() {
- mScript.clear();
- mAllocOut.clear();
- mAllocIn.clear();
- mRS.clear();
-}
-
-status_t SaturationFilter::setParameters(const sp<AMessage> &msg) {
- sp<AMessage> params;
- CHECK(msg->findMessage("params", ¶ms));
-
- float saturation;
- if (params->findFloat("saturation", &saturation)) {
- mSaturation = saturation;
- }
-
- return OK;
-}
-
-status_t SaturationFilter::processBuffers(
- const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
- mAllocIn->copy1DRangeFrom(0, mWidth * mHeight, srcBuffer->data());
- mScript->forEach_root(mAllocIn, mAllocOut);
- mAllocOut->copy1DRangeTo(0, mWidth * mHeight, outBuffer->data());
-
- return OK;
-}
-
-} // namespace android
diff --git a/media/libstagefright/filters/SimpleFilter.cpp b/media/libstagefright/filters/SimpleFilter.cpp
deleted file mode 100644
index 6c1ca2c..0000000
--- a/media/libstagefright/filters/SimpleFilter.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-
-#include "SimpleFilter.h"
-
-namespace android {
-
-status_t SimpleFilter::configure(const sp<AMessage> &msg) {
- CHECK(msg->findInt32("width", &mWidth));
- CHECK(msg->findInt32("height", &mHeight));
- if (!msg->findInt32("stride", &mStride)) {
- mStride = mWidth;
- }
- if (!msg->findInt32("slice-height", &mSliceHeight)) {
- mSliceHeight = mHeight;
- }
- CHECK(msg->findInt32("color-format", &mColorFormatIn));
- mColorFormatOut = mColorFormatIn;
-
- return OK;
-}
-
-} // namespace android
diff --git a/media/libstagefright/filters/ZeroFilter.cpp b/media/libstagefright/filters/ZeroFilter.cpp
deleted file mode 100644
index 74b94b7..0000000
--- a/media/libstagefright/filters/ZeroFilter.cpp
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-//#define LOG_NDEBUG 0
-#define LOG_TAG "ZeroFilter"
-
-#include <media/MediaCodecBuffer.h>
-#include <media/stagefright/foundation/ADebug.h>
-#include <media/stagefright/foundation/AMessage.h>
-
-#include "ZeroFilter.h"
-
-namespace android {
-
-status_t ZeroFilter::setParameters(const sp<AMessage> &msg) {
- sp<AMessage> params;
- CHECK(msg->findMessage("params", ¶ms));
-
- int32_t invert;
- if (params->findInt32("invert", &invert)) {
- mInvertData = (invert != 0);
- }
-
- return OK;
-}
-
-status_t ZeroFilter::processBuffers(
- const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) {
- // assuming identical input & output buffers, since we're a copy filter
- if (mInvertData) {
- uint32_t* src = (uint32_t*)srcBuffer->data();
- uint32_t* dest = (uint32_t*)outBuffer->data();
- for (size_t i = 0; i < srcBuffer->size() / 4; ++i) {
- *(dest++) = *(src++) ^ 0xFFFFFFFF;
- }
- } else {
- memcpy(outBuffer->data(), srcBuffer->data(), srcBuffer->size());
- }
- outBuffer->setRange(0, srcBuffer->size());
-
- return OK;
-}
-
-} // namespace android
diff --git a/media/libstagefright/filters/include/filters/ColorConvert.h b/media/libstagefright/filters/include/filters/ColorConvert.h
deleted file mode 100644
index 13faa02..0000000
--- a/media/libstagefright/filters/include/filters/ColorConvert.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef COLOR_CONVERT_H_
-#define COLOR_CONVERT_H_
-
-#include <inttypes.h>
-
-namespace android {
-
-void YUVToRGB(
- int32_t y, int32_t u, int32_t v,
- int32_t* r, int32_t* g, int32_t* b);
-
-void convertYUV420spToARGB(
- uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
- uint8_t *dest);
-
-void convertYUV420spToRGB888(
- uint8_t *pY, uint8_t *pUV, int32_t width, int32_t height,
- uint8_t *dest);
-
-// TODO: remove when RGBA support is added to SoftwareRenderer
-void convertRGBAToARGB(
- uint8_t *src, int32_t width, int32_t height, uint32_t stride,
- uint8_t *dest);
-
-} // namespace android
-
-#endif // COLOR_CONVERT_H_
diff --git a/media/libstagefright/filters/include/filters/GraphicBufferListener.h b/media/libstagefright/filters/include/filters/GraphicBufferListener.h
deleted file mode 100644
index 586bf65..0000000
--- a/media/libstagefright/filters/include/filters/GraphicBufferListener.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef GRAPHIC_BUFFER_LISTENER_H_
-#define GRAPHIC_BUFFER_LISTENER_H_
-
-#include <gui/BufferQueue.h>
-
-namespace android {
-
-struct AMessage;
-
-struct GraphicBufferListener : public BufferQueue::ConsumerListener {
-public:
- GraphicBufferListener() {};
-
- status_t init(
- const sp<AMessage> ¬ify,
- size_t bufferWidth, size_t bufferHeight, size_t bufferCount);
-
- virtual void onFrameAvailable(const BufferItem& item);
- virtual void onBuffersReleased();
- virtual void onSidebandStreamChanged();
-
- // Returns the handle to the producer side of the BufferQueue. Buffers
- // queued on this will be received by GraphicBufferListener.
- sp<IGraphicBufferProducer> getIGraphicBufferProducer() const {
- return mProducer;
- }
-
- BufferItem getBufferItem();
- sp<GraphicBuffer> getBuffer(BufferItem item);
- status_t releaseBuffer(BufferItem item);
-
- enum {
- kWhatFrameAvailable = 'frav',
- };
-
-private:
- sp<AMessage> mNotify;
- size_t mNumFramesAvailable;
-
- mutable Mutex mMutex;
-
- // Our BufferQueue interfaces. mProducer is passed to the producer through
- // getIGraphicBufferProducer, and mConsumer is used internally to retrieve
- // the buffers queued by the producer.
- sp<IGraphicBufferProducer> mProducer;
- sp<IGraphicBufferConsumer> mConsumer;
-
- // Cache of GraphicBuffers from the buffer queue.
- sp<GraphicBuffer> mBufferSlot[BufferQueue::NUM_BUFFER_SLOTS];
-};
-
-} // namespace android
-
-#endif // GRAPHIC_BUFFER_LISTENER_H
diff --git a/media/libstagefright/filters/include/filters/IntrinsicBlurFilter.h b/media/libstagefright/filters/include/filters/IntrinsicBlurFilter.h
deleted file mode 100644
index a2aabfa..0000000
--- a/media/libstagefright/filters/include/filters/IntrinsicBlurFilter.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef INTRINSIC_BLUR_FILTER_H_
-#define INTRINSIC_BLUR_FILTER_H_
-
-#include "RenderScript.h"
-#include "SimpleFilter.h"
-
-namespace android {
-
-struct IntrinsicBlurFilter : public SimpleFilter {
-public:
- IntrinsicBlurFilter() : mBlurRadius(1.f) {};
-
- virtual status_t configure(const sp<AMessage> &msg);
- virtual status_t start();
- virtual void reset();
- virtual status_t setParameters(const sp<AMessage> &msg);
- virtual status_t processBuffers(
- const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
-
-protected:
- virtual ~IntrinsicBlurFilter() {};
-
-private:
- AString mCacheDir;
- RSC::sp<RSC::RS> mRS;
- RSC::sp<RSC::Allocation> mAllocIn;
- RSC::sp<RSC::Allocation> mAllocOut;
- RSC::sp<RSC::ScriptIntrinsicBlur> mBlur;
- float mBlurRadius;
-};
-
-} // namespace android
-
-#endif // INTRINSIC_BLUR_FILTER_H_
diff --git a/media/libstagefright/filters/include/filters/RSFilter.h b/media/libstagefright/filters/include/filters/RSFilter.h
deleted file mode 100644
index 3326284..0000000
--- a/media/libstagefright/filters/include/filters/RSFilter.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef RS_FILTER_H_
-#define RS_FILTER_H_
-
-#include <media/stagefright/RenderScriptWrapper.h>
-#include <RenderScript.h>
-
-#include "SimpleFilter.h"
-
-namespace android {
-
-struct AString;
-
-struct RSFilter : public SimpleFilter {
-public:
- RSFilter();
-
- virtual status_t configure(const sp<AMessage> &msg);
- virtual status_t start();
- virtual void reset();
- virtual status_t setParameters(const sp<AMessage> &msg);
- virtual status_t processBuffers(
- const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
-
-protected:
- virtual ~RSFilter();
-
-private:
- AString mCacheDir;
- sp<RenderScriptWrapper::RSFilterCallback> mCallback;
- RSC::sp<RSC::RS> mRS;
- RSC::sp<RSC::Allocation> mAllocIn;
- RSC::sp<RSC::Allocation> mAllocOut;
-};
-
-} // namespace android
-
-#endif // RS_FILTER_H_
diff --git a/media/libstagefright/filters/include/filters/SaturationFilter.h b/media/libstagefright/filters/include/filters/SaturationFilter.h
deleted file mode 100644
index 317e469..0000000
--- a/media/libstagefright/filters/include/filters/SaturationFilter.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef SATURATION_FILTER_H_
-#define SATURATION_FILTER_H_
-
-#include <RenderScript.h>
-
-#include "ScriptC_saturationARGB.h"
-#include "SimpleFilter.h"
-
-namespace android {
-
-struct SaturationFilter : public SimpleFilter {
-public:
- SaturationFilter() : mSaturation(1.f) {};
-
- virtual status_t configure(const sp<AMessage> &msg);
- virtual status_t start();
- virtual void reset();
- virtual status_t setParameters(const sp<AMessage> &msg);
- virtual status_t processBuffers(
- const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
-
-protected:
- virtual ~SaturationFilter() {};
-
-private:
- AString mCacheDir;
- RSC::sp<RSC::RS> mRS;
- RSC::sp<RSC::Allocation> mAllocIn;
- RSC::sp<RSC::Allocation> mAllocOut;
- RSC::sp<ScriptC_saturationARGB> mScript;
- float mSaturation;
-};
-
-} // namespace android
-
-#endif // SATURATION_FILTER_H_
diff --git a/media/libstagefright/filters/include/filters/SimpleFilter.h b/media/libstagefright/filters/include/filters/SimpleFilter.h
deleted file mode 100644
index a3c2d76..0000000
--- a/media/libstagefright/filters/include/filters/SimpleFilter.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef SIMPLE_FILTER_H_
-#define SIMPLE_FILTER_H_
-
-#include <stdint.h>
-#include <utils/Errors.h>
-#include <utils/RefBase.h>
-
-namespace android {
-
-struct AMessage;
-class MediaCodecBuffer;
-
-struct SimpleFilter : public RefBase {
-public:
- SimpleFilter() : mWidth(0), mHeight(0), mStride(0), mSliceHeight(0),
- mColorFormatIn(0), mColorFormatOut(0) {};
-
- virtual status_t configure(const sp<AMessage> &msg);
-
- virtual status_t start() = 0;
- virtual void reset() = 0;
- virtual status_t setParameters(const sp<AMessage> &msg) = 0;
- virtual status_t processBuffers(
- const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer) = 0;
-
-protected:
- int32_t mWidth, mHeight;
- int32_t mStride, mSliceHeight;
- int32_t mColorFormatIn, mColorFormatOut;
-
- virtual ~SimpleFilter() {};
-};
-
-} // namespace android
-
-#endif // SIMPLE_FILTER_H_
diff --git a/media/libstagefright/filters/include/filters/ZeroFilter.h b/media/libstagefright/filters/include/filters/ZeroFilter.h
deleted file mode 100644
index f941cc8..0000000
--- a/media/libstagefright/filters/include/filters/ZeroFilter.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ZERO_FILTER_H_
-#define ZERO_FILTER_H_
-
-#include "SimpleFilter.h"
-
-namespace android {
-
-struct ZeroFilter : public SimpleFilter {
-public:
- ZeroFilter() : mInvertData(false) {};
-
- virtual status_t start() { return OK; };
- virtual void reset() {};
- virtual status_t setParameters(const sp<AMessage> &msg);
- virtual status_t processBuffers(
- const sp<MediaCodecBuffer> &srcBuffer, const sp<MediaCodecBuffer> &outBuffer);
-
-protected:
- virtual ~ZeroFilter() {};
-
-private:
- bool mInvertData;
-};
-
-} // namespace android
-
-#endif // ZERO_FILTER_H_
diff --git a/media/libstagefright/filters/saturation.rscript b/media/libstagefright/filters/saturation.rscript
deleted file mode 100644
index 2c867ac..0000000
--- a/media/libstagefright/filters/saturation.rscript
+++ /dev/null
@@ -1,40 +0,0 @@
-// Sample script for RGB888 support (compare to saturationARGB.rs)
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma version(1)
-#pragma rs java_package_name(com.android.rs.cppbasic)
-#pragma rs_fp_relaxed
-
-const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
-
-// global variables (parameters accessible to application code)
-float gSaturation = 1.0f;
-
-void root(const uchar3 *v_in, uchar3 *v_out) {
- // scale 0-255 uchar to 0-1.0 float
- float3 in = {v_in->r * 0.003921569f, v_in->g * 0.003921569f,
- v_in->b * 0.003921569f};
-
- // apply saturation filter
- float3 result = dot(in, gMonoMult);
- result = mix(result, in, gSaturation);
-
- // convert to uchar, copied from rsPackColorTo8888
- v_out->x = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
- v_out->y = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
- v_out->z = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
-}
diff --git a/media/libstagefright/filters/saturationARGB.rscript b/media/libstagefright/filters/saturationARGB.rscript
deleted file mode 100644
index 1de9dd8..0000000
--- a/media/libstagefright/filters/saturationARGB.rscript
+++ /dev/null
@@ -1,40 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#pragma version(1)
-#pragma rs java_package_name(com.android.rs.cppbasic)
-#pragma rs_fp_relaxed
-
-const static float3 gMonoMult = {0.299f, 0.587f, 0.114f};
-
-// global variables (parameters accessible to application code)
-float gSaturation = 1.0f;
-
-void root(const uchar4 *v_in, uchar4 *v_out) {
- v_out->x = v_in->x; // don't modify A
-
- // get RGB, scale 0-255 uchar to 0-1.0 float
- float3 rgb = {v_in->y * 0.003921569f, v_in->z * 0.003921569f,
- v_in->w * 0.003921569f};
-
- // apply saturation filter
- float3 result = dot(rgb, gMonoMult);
- result = mix(result, rgb, gSaturation);
-
- v_out->y = (uchar)clamp((result.r * 255.f + 0.5f), 0.f, 255.f);
- v_out->z = (uchar)clamp((result.g * 255.f + 0.5f), 0.f, 255.f);
- v_out->w = (uchar)clamp((result.b * 255.f + 0.5f), 0.f, 255.f);
-}
diff --git a/media/libstagefright/include/media/stagefright/MediaFilter.h b/media/libstagefright/include/media/stagefright/MediaFilter.h
deleted file mode 100644
index 1255e0f..0000000
--- a/media/libstagefright/include/media/stagefright/MediaFilter.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef MEDIA_FILTER_H_
-#define MEDIA_FILTER_H_
-
-#include <media/stagefright/CodecBase.h>
-
-namespace android {
-
-struct GraphicBufferListener;
-struct SimpleFilter;
-
-struct MediaFilter : public CodecBase {
- MediaFilter();
-
- virtual std::shared_ptr<BufferChannelBase> getBufferChannel() override;
- virtual void initiateAllocateComponent(const sp<AMessage> &msg);
- virtual void initiateConfigureComponent(const sp<AMessage> &msg);
- virtual void initiateCreateInputSurface();
- virtual void initiateSetInputSurface(const sp<PersistentSurface> &surface);
-
- virtual void initiateStart();
- virtual void initiateShutdown(bool keepComponentAllocated = false);
-
- virtual void signalFlush();
- virtual void signalResume();
-
- virtual void signalRequestIDRFrame();
- virtual void signalSetParameters(const sp<AMessage> &msg);
- virtual void signalEndOfInputStream();
-
- virtual void onMessageReceived(const sp<AMessage> &msg);
-
-protected:
- virtual ~MediaFilter();
-
-private:
- struct BufferInfo {
- enum Status {
- OWNED_BY_US,
- OWNED_BY_UPSTREAM,
- };
-
- uint32_t mBufferID;
- int32_t mGeneration;
- int32_t mOutputFlags;
- Status mStatus;
-
- sp<MediaCodecBuffer> mData;
- };
-
- class BufferChannel;
-
- enum State {
- UNINITIALIZED,
- INITIALIZED,
- CONFIGURED,
- STARTED,
- };
-
- enum {
- kWhatInputBufferFilled = 'inpF',
- kWhatOutputBufferDrained = 'outD',
- kWhatShutdown = 'shut',
- kWhatFlush = 'flus',
- kWhatResume = 'resm',
- kWhatAllocateComponent = 'allo',
- kWhatConfigureComponent = 'conf',
- kWhatCreateInputSurface = 'cisf',
- kWhatSignalEndOfInputStream = 'eois',
- kWhatStart = 'star',
- kWhatSetParameters = 'setP',
- kWhatProcessBuffers = 'proc',
- };
-
- enum {
- kPortIndexInput = 0,
- kPortIndexOutput = 1
- };
-
- // member variables
- AString mComponentName;
- State mState;
- status_t mInputEOSResult;
- int32_t mWidth, mHeight;
- int32_t mStride, mSliceHeight;
- int32_t mColorFormatIn, mColorFormatOut;
- size_t mMaxInputSize, mMaxOutputSize;
- int32_t mGeneration;
- sp<AMessage> mInputFormat;
- sp<AMessage> mOutputFormat;
-
- Vector<BufferInfo> mBuffers[2];
- Vector<BufferInfo*> mAvailableInputBuffers;
- Vector<BufferInfo*> mAvailableOutputBuffers;
- bool mPortEOS[2];
-
- sp<SimpleFilter> mFilter;
- sp<GraphicBufferListener> mGraphicBufferListener;
-
- std::shared_ptr<BufferChannel> mBufferChannel;
-
- // helper functions
- void signalProcessBuffers();
- void signalError(status_t error);
-
- status_t allocateBuffersOnPort(OMX_U32 portIndex);
- BufferInfo *findBuffer(
- uint32_t portIndex, const sp<MediaCodecBuffer> &buffer,
- ssize_t *index = NULL);
- void postFillThisBuffer(BufferInfo *info);
- void postDrainThisBuffer(BufferInfo *info);
- void postEOS();
- void requestFillEmptyInput();
- void processBuffers();
-
- void onAllocateComponent(const sp<AMessage> &msg);
- void onConfigureComponent(const sp<AMessage> &msg);
- void onStart();
- void onInputBufferFilled(const sp<AMessage> &msg);
- void onOutputBufferDrained(const sp<AMessage> &msg);
- void onShutdown(const sp<AMessage> &msg);
- void onFlush();
- void onSetParameters(const sp<AMessage> &msg);
- void onCreateInputSurface();
- void onInputFrameAvailable();
- void onSignalEndOfInputStream();
-
- DISALLOW_EVIL_CONSTRUCTORS(MediaFilter);
-};
-
-} // namespace android
-
-#endif // MEDIA_FILTER_H_
diff --git a/media/libstagefright/include/media/stagefright/RenderScriptWrapper.h b/media/libstagefright/include/media/stagefright/RenderScriptWrapper.h
deleted file mode 100644
index b42649e..0000000
--- a/media/libstagefright/include/media/stagefright/RenderScriptWrapper.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef RENDERSCRIPT_WRAPPER_H_
-#define RENDERSCRIPT_WRAPPER_H_
-
-#include <RenderScript.h>
-
-namespace android {
-
-struct RenderScriptWrapper : public RefBase {
-public:
- struct RSFilterCallback : public RefBase {
- public:
- // called by RSFilter to process each input buffer
- virtual status_t processBuffers(
- RSC::Allocation* inBuffer,
- RSC::Allocation* outBuffer) = 0;
-
- virtual status_t handleSetParameters(const sp<AMessage> &msg) = 0;
- };
-
- sp<RSFilterCallback> mCallback;
- RSC::sp<RSC::RS> mContext;
-};
-
-} // namespace android
-
-#endif // RENDERSCRIPT_WRAPPER_H_
diff --git a/media/libstagefright/renderfright/Android.bp b/media/libstagefright/renderfright/Android.bp
index 3c00a1c..3598e8d 100644
--- a/media/libstagefright/renderfright/Android.bp
+++ b/media/libstagefright/renderfright/Android.bp
@@ -32,6 +32,7 @@
"libEGL",
"libGLESv1_CM",
"libGLESv2",
+ "libvulkan",
"liblog",
"libnativewindow",
"libprocessgroup",
diff --git a/media/module/extractors/aac/AACExtractor.cpp b/media/module/extractors/aac/AACExtractor.cpp
index 2fc4584..a44fb61 100644
--- a/media/module/extractors/aac/AACExtractor.cpp
+++ b/media/module/extractors/aac/AACExtractor.cpp
@@ -310,9 +310,9 @@
return AMEDIA_ERROR_END_OF_STREAM;
}
- MediaBufferHelper *buffer;
+ MediaBufferHelper *buffer = nullptr;
status_t err = mBufferGroup->acquire_buffer(&buffer);
- if (err != OK) {
+ if (err != OK || buffer == nullptr) {
return AMEDIA_ERROR_UNKNOWN;
}
diff --git a/media/module/extractors/amr/AMRExtractor.cpp b/media/module/extractors/amr/AMRExtractor.cpp
index e26ff0a..b0f69ce 100644
--- a/media/module/extractors/amr/AMRExtractor.cpp
+++ b/media/module/extractors/amr/AMRExtractor.cpp
@@ -341,9 +341,9 @@
return AMEDIA_ERROR_MALFORMED;
}
- MediaBufferHelper *buffer;
+ MediaBufferHelper *buffer = nullptr;
status_t err = mBufferGroup->acquire_buffer(&buffer);
- if (err != OK) {
+ if (err != OK || buffer == nullptr) {
return AMEDIA_ERROR_UNKNOWN;
}
diff --git a/media/module/extractors/flac/FLACExtractor.cpp b/media/module/extractors/flac/FLACExtractor.cpp
index ec7cb24..2434e41 100644
--- a/media/module/extractors/flac/FLACExtractor.cpp
+++ b/media/module/extractors/flac/FLACExtractor.cpp
@@ -614,9 +614,9 @@
}
// acquire a media buffer
CHECK(mGroup != NULL);
- MediaBufferHelper *buffer;
+ MediaBufferHelper *buffer = nullptr;
status_t err = mGroup->acquire_buffer(&buffer);
- if (err != OK) {
+ if (err != OK || buffer == nullptr) {
return NULL;
}
const size_t bufferSize = blocksize * getChannels() * getOutputSampleSize();
diff --git a/media/module/extractors/midi/MidiExtractor.cpp b/media/module/extractors/midi/MidiExtractor.cpp
index d0efb2f..167cc40 100644
--- a/media/module/extractors/midi/MidiExtractor.cpp
+++ b/media/module/extractors/midi/MidiExtractor.cpp
@@ -240,9 +240,9 @@
if ((state == EAS_STATE_STOPPED) || (state == EAS_STATE_ERROR)) {
return NULL;
}
- MediaBufferHelper *buffer;
+ MediaBufferHelper *buffer = nullptr;
status_t err = mGroup->acquire_buffer(&buffer);
- if (err != OK) {
+ if (err != OK || buffer == nullptr) {
ALOGE("readBuffer: no buffer");
return NULL;
}
diff --git a/media/module/extractors/mkv/MatroskaExtractor.cpp b/media/module/extractors/mkv/MatroskaExtractor.cpp
index 443e26c..2b72387 100644
--- a/media/module/extractors/mkv/MatroskaExtractor.cpp
+++ b/media/module/extractors/mkv/MatroskaExtractor.cpp
@@ -790,6 +790,7 @@
int64_t timeUs = mBlockIter.blockTimeUs();
for (int i = 0; i < block->GetFrameCount(); ++i) {
+ status_t err;
MatroskaExtractor::TrackInfo *trackInfo = &mExtractor->mTracks.editItemAt(mTrackIndex);
const mkvparser::Block::Frame &frame = block->GetFrame(i);
size_t len = frame.len;
@@ -798,8 +799,13 @@
}
len += trackInfo->mHeaderLen;
- MediaBufferHelper *mbuf;
- mBufferGroup->acquire_buffer(&mbuf, false /* nonblocking */, len /* requested size */);
+ MediaBufferHelper *mbuf = nullptr;
+ err = mBufferGroup->acquire_buffer(&mbuf, false /* nonblocking */,
+ len /* requested size */);
+ if (err != OK || mbuf == nullptr) {
+ ALOGE("readBlock: no buffer");
+ return AMEDIA_ERROR_UNKNOWN;
+ }
mbuf->set_range(0, len);
uint8_t *data = static_cast<uint8_t *>(mbuf->data());
if (trackInfo->mHeader) {
@@ -832,7 +838,7 @@
}
}
- status_t err = frame.Read(mExtractor->mReader, data + trackInfo->mHeaderLen);
+ err = frame.Read(mExtractor->mReader, data + trackInfo->mHeaderLen);
if (err == OK
&& mExtractor->mIsWebm
&& trackInfo->mEncrypted) {
diff --git a/media/module/extractors/mp3/MP3Extractor.cpp b/media/module/extractors/mp3/MP3Extractor.cpp
index 248a39c..328b790 100644
--- a/media/module/extractors/mp3/MP3Extractor.cpp
+++ b/media/module/extractors/mp3/MP3Extractor.cpp
@@ -521,9 +521,9 @@
mSamplesRead = 0;
}
- MediaBufferHelper *buffer;
+ MediaBufferHelper *buffer = nullptr;
status_t err = mBufferGroup->acquire_buffer(&buffer);
- if (err != OK) {
+ if (err != OK || buffer == nullptr) {
return AMEDIA_ERROR_UNKNOWN;
}
diff --git a/media/module/extractors/mp4/MPEG4Extractor.cpp b/media/module/extractors/mp4/MPEG4Extractor.cpp
index 3a5a869..1d88785 100644
--- a/media/module/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/module/extractors/mp4/MPEG4Extractor.cpp
@@ -6337,7 +6337,7 @@
err = mBufferGroup->acquire_buffer(&mBuffer);
- if (err != OK) {
+ if (err != OK || mBuffer == nullptr) {
CHECK(mBuffer == NULL);
return AMEDIA_ERROR_UNKNOWN;
}
diff --git a/media/module/extractors/mpeg2/MPEG2PSExtractor.cpp b/media/module/extractors/mpeg2/MPEG2PSExtractor.cpp
index afd28ef..44c8937 100644
--- a/media/module/extractors/mpeg2/MPEG2PSExtractor.cpp
+++ b/media/module/extractors/mpeg2/MPEG2PSExtractor.cpp
@@ -699,11 +699,26 @@
}
}
- MediaBufferBase *mbuf;
- mSource->read(&mbuf, (MediaTrack::ReadOptions*) options);
+ MediaBufferBase *mbuf = nullptr;
+ status_t err_read = mSource->read(&mbuf, (MediaTrack::ReadOptions*) options);
+ if (mbuf == nullptr) {
+ ALOGE("Track::read: null buffer read from source");
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+ if (err_read != OK) {
+ ALOGE("Track::read: no buffer read from source");
+ mbuf->release();
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
size_t length = mbuf->range_length();
- MediaBufferHelper *outbuf;
- mBufferGroup->acquire_buffer(&outbuf, false, length);
+ MediaBufferHelper *outbuf = nullptr;
+ status_t err = mBufferGroup->acquire_buffer(&outbuf, false, length);
+ if (err != OK || outbuf == nullptr) {
+ ALOGE("Track::read: no buffer");
+ mbuf->release();
+ return AMEDIA_ERROR_UNKNOWN;
+ }
memcpy(outbuf->data(), mbuf->data(), length);
outbuf->set_range(0, length);
*buffer = outbuf;
diff --git a/media/module/extractors/mpeg2/MPEG2TSExtractor.cpp b/media/module/extractors/mpeg2/MPEG2TSExtractor.cpp
index 9a3cd92..736b817 100644
--- a/media/module/extractors/mpeg2/MPEG2TSExtractor.cpp
+++ b/media/module/extractors/mpeg2/MPEG2TSExtractor.cpp
@@ -182,11 +182,26 @@
return AMEDIA_ERROR_END_OF_STREAM;
}
- MediaBufferBase *mbuf;
- mImpl->read(&mbuf, (MediaTrack::ReadOptions*) options);
+ MediaBufferBase *mbuf = nullptr;
+ status_t err_read = mImpl->read(&mbuf, (MediaTrack::ReadOptions*) options);
+ if (mbuf == nullptr) {
+ ALOGE("Track::read: null buffer read from source");
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+ if (err_read != OK) {
+ ALOGE("Track::read: no buffer read from source");
+ mbuf->release();
+ return AMEDIA_ERROR_UNKNOWN;
+ }
+
size_t length = mbuf->range_length();
- MediaBufferHelper *outbuf;
- mBufferGroup->acquire_buffer(&outbuf, false, length);
+ MediaBufferHelper *outbuf = nullptr;
+ status_t err = mBufferGroup->acquire_buffer(&outbuf, false, length);
+ if (err != OK || outbuf == nullptr) {
+ ALOGE("read: no buffer");
+ mbuf->release();
+ return AMEDIA_ERROR_UNKNOWN;
+ }
memcpy(outbuf->data(), mbuf->data(), length);
outbuf->set_range(0, length);
*out = outbuf;
diff --git a/media/module/extractors/ogg/OggExtractor.cpp b/media/module/extractors/ogg/OggExtractor.cpp
index eb2246d..1c6f516 100644
--- a/media/module/extractors/ogg/OggExtractor.cpp
+++ b/media/module/extractors/ogg/OggExtractor.cpp
@@ -790,7 +790,8 @@
}
MediaBufferHelper *tmp;
if (mBufferGroup) {
- mBufferGroup->acquire_buffer(&tmp, false, fullSize);
+ // ignore return code here. instead, check tmp below.
+ (void) mBufferGroup->acquire_buffer(&tmp, false, fullSize);
ALOGV("acquired buffer %p from group", tmp);
} else {
tmp = new StandAloneMediaBuffer(fullSize);
@@ -924,13 +925,16 @@
status_t MyOggExtractor::init() {
AMediaFormat_setString(mMeta, AMEDIAFORMAT_KEY_MIME, mMimeType);
- media_status_t err;
- MediaBufferHelper *packet;
for (size_t i = 0; i < mNumHeaders; ++i) {
+ media_status_t err;
+ MediaBufferHelper *packet = nullptr;
// ignore timestamp for configuration packets
if ((err = _readNextPacket(&packet, /* calcVorbisTimestamp = */ false)) != AMEDIA_OK) {
return err;
}
+ if (packet == nullptr) {
+ return AMEDIA_ERROR_UNKNOWN;
+ }
ALOGV("read packet of size %zu\n", packet->range_length());
err = verifyHeader(packet, /* type = */ i * 2 + 1);
packet->release();
diff --git a/media/module/extractors/wav/WAVExtractor.cpp b/media/module/extractors/wav/WAVExtractor.cpp
index 9e94587..9c3bac6 100644
--- a/media/module/extractors/wav/WAVExtractor.cpp
+++ b/media/module/extractors/wav/WAVExtractor.cpp
@@ -459,11 +459,15 @@
mCurrentPos = pos + mOffset;
}
- MediaBufferHelper *buffer;
+ MediaBufferHelper *buffer = nullptr;
media_status_t err = mBufferGroup->acquire_buffer(&buffer);
if (err != OK) {
return err;
}
+ if (buffer == nullptr) {
+ ALOGE("acquire_buffer OK, but no buffer");
+ return AMEDIA_ERROR_UNKNOWN;
+ }
// maxBytesToRead may be reduced so that in-place data conversion will fit in buffer size.
const size_t bufferSize = std::min(buffer->size(), kMaxFrameSize);
diff --git a/media/module/libmediatranscoding/tests/TranscodingSessionController_tests.cpp b/media/module/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
index ef9c4f8..fdd327f 100644
--- a/media/module/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
+++ b/media/module/libmediatranscoding/tests/TranscodingSessionController_tests.cpp
@@ -337,6 +337,8 @@
// Should have created new transcoder.
EXPECT_EQ(mTranscoder->getGeneration(), generation);
EXPECT_EQ(mTranscoder.use_count(), 2);
+ // b/240537336: Allow extra time to finish onError call
+ sleep(1);
}
void testPacerHelper(int numSubmits, int sessionDurationMs, int expectedSuccess) {
diff --git a/media/ndk/NdkMediaDrm.cpp b/media/ndk/NdkMediaDrm.cpp
index f4674de..5005365 100644
--- a/media/ndk/NdkMediaDrm.cpp
+++ b/media/ndk/NdkMediaDrm.cpp
@@ -253,7 +253,7 @@
}
static sp<IDrm> CreateDrm() {
- return DrmUtils::MakeDrm();
+ return DrmUtils::MakeDrm(IDRM_NDK);
}
diff --git a/media/ndk/include/media/NdkMediaDrm.h b/media/ndk/include/media/NdkMediaDrm.h
index 4eca3d7..8044140 100644
--- a/media/ndk/include/media/NdkMediaDrm.h
+++ b/media/ndk/include/media/NdkMediaDrm.h
@@ -261,8 +261,8 @@
/**
* Open a new session with the MediaDrm object. A session ID is returned.
*
- * Returns MEDIADRM_NOT_PROVISIONED_ERROR if provisioning is needed.
- * Returns MEDIADRM_RESOURCE_BUSY_ERROR if required resources are in use.
+ * Returns AMEDIA_DRM_NOT_PROVISIONED if provisioning is needed.
+ * Returns AMEDIA_DRM_RESOURCE_BUSY if required resources are in use.
*
* Available since API level 21.
*/
@@ -327,7 +327,7 @@
* 2. keyRequestSize will be set to the size of the request
* If this does not return AMEDIA_OK, value of these parameters should not be used.
*
- * Returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
+ * Returns AMEDIA_DRM_NOT_PROVISIONED if reprovisioning is needed, due to a
* problem with the device certificate.
*
* Available since API level 21.
@@ -390,7 +390,7 @@
* 4. keyRequestType will be set to the key request type. Passing in NULL means
* you don't need it to be reported.
*
- * Returns MEDIADRM_NOT_PROVISIONED_ERROR if reprovisioning is needed, due to a
+ * Returns AMEDIA_DRM_NOT_PROVISIONED if reprovisioning is needed, due to a
* problem with the device certificate.
*
* Available since API level 33.
@@ -457,7 +457,7 @@
* On entry, numPairs should be set by the caller to the maximum number of pairs
* that can be returned (the size of the array). On exit, numPairs will be set
* to the number of entries written to the array. If the number of {key, value} pairs
- * to be returned is greater than *numPairs, MEDIADRM_SHORT_BUFFER will be returned
+ * to be returned is greater than *numPairs, AMEDIA_DRM_SHORT_BUFFER will be returned
* and numPairs will be set to the number of pairs available.
*
* Available since API level 21.
@@ -495,7 +495,7 @@
* DRM engine plugin.
* responseSize is the length of the provisioning response in bytes.
*
- * Returns MEDIADRM_DEVICE_REVOKED_ERROR if the response indicates that the
+ * Returns AMEDIA_DRM_DEVICE_REVOKED if the response indicates that the
* server rejected the request
*
* Available since API level 21.
@@ -522,7 +522,7 @@
* numSecureStops is set by the caller to the maximum number of secure stops to
* return. On exit, *numSecureStops will be set to the number actually returned.
* If *numSecureStops is too small for the number of secure stops available,
- * MEDIADRM_SHORT_BUFFER will be returned and *numSecureStops will be set to the
+ * AMEDIA_DRM_SHORT_BUFFER will be returned and *numSecureStops will be set to the
* number required.
*
* Available since API level 21.
@@ -657,7 +657,7 @@
* Generate a signature using the specified macAlgorithm over the message data
* referenced by message of size messageSize and store the signature in the
* buffer referenced signature of max size *signatureSize. If the buffer is not
- * large enough to hold the signature, MEDIADRM_SHORT_BUFFER is returned and
+ * large enough to hold the signature, AMEDIA_DRM_SHORT_BUFFER is returned and
* *signatureSize is set to the buffer size required. The key to use is identified
* by the 16 byte keyId. The key must have been loaded into the session using
* provideKeyResponse.
@@ -670,7 +670,7 @@
/*
* Perform a signature verification using the specified macAlgorithm over the message
- * data referenced by the message parameter of size messageSize. Returns MEDIADRM_OK
+ * data referenced by the message parameter of size messageSize. Returns AMEDIA_OK
* if the signature matches, otherwise MEDAIDRM_VERIFY_FAILED is returned. The key to
* use is identified by the 16 byte keyId. The key must have been loaded into the
* session using provideKeyResponse.
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index bac4b22..2b5bacf 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -183,11 +183,11 @@
AMediaCodecCryptoInfo_setPattern; # introduced=24
AMediaCodec_configure;
AMediaCodec_createCodecByName;
- AMediaCodec_createCodecByNameForClient; # apex # introduced=31
+ AMediaCodec_createCodecByNameForClient; # systemapi # introduced=31
AMediaCodec_createDecoderByType;
- AMediaCodec_createDecoderByTypeForClient; # apex # introduced=31
+ AMediaCodec_createDecoderByTypeForClient; # systemapi # introduced=31
AMediaCodec_createEncoderByType;
- AMediaCodec_createEncoderByTypeForClient; # apex # introduced=31
+ AMediaCodec_createEncoderByTypeForClient; # systemapi # introduced=31
AMediaCodec_delete;
AMediaCodec_dequeueInputBuffer;
AMediaCodec_dequeueOutputBuffer;
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Decoder.java b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Decoder.java
index a4ba36a..9e0d5e4 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Decoder.java
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/Decoder.java
@@ -48,6 +48,9 @@
private boolean mSawOutputEOS;
private boolean mSignalledError;
+ private int mNumInFramesProvided;
+ private int mNumInFramesRequired;
+
private int mNumOutputFrame;
private int mIndex;
@@ -87,6 +90,10 @@
}
public void setupDecoder(Surface surface, boolean render,
boolean useFrameReleaseQueue, int frameRate) {
+ setupDecoder(surface, render, useFrameReleaseQueue, frameRate, -1);
+ }
+ public void setupDecoder(Surface surface, boolean render,
+ boolean useFrameReleaseQueue, int frameRate, int numInFramesRequired) {
mSignalledError = false;
mOutputStream = null;
mSurface = surface;
@@ -95,6 +102,8 @@
Log.i(TAG, "Using FrameReleaseQueue with frameRate " + frameRate);
mFrameReleaseQueue = new FrameReleaseQueue(mRender, frameRate);
}
+ mNumInFramesRequired = numInFramesRequired;
+ Log.i(TAG, "Decoding " + mNumInFramesRequired + " frames");
}
private MediaCodec createCodec(String codecName, MediaFormat format) throws IOException {
@@ -147,6 +156,10 @@
mSawOutputEOS = false;
mNumOutputFrame = 0;
mIndex = 0;
+ mNumInFramesProvided = 0;
+ if (mNumInFramesRequired < 0) {
+ mNumInFramesRequired = mInputBuffer.size();
+ }
long sTime = mStats.getCurTime();
mCodec = createCodec(codecName, format);
if (mCodec == null) {
@@ -305,12 +318,22 @@
}
private void onInputAvailable(int inputBufferId, MediaCodec mediaCodec) {
- if ((inputBufferId >= 0) && !mSawInputEOS) {
+ if (inputBufferId >= 0) {
ByteBuffer inputCodecBuffer = mediaCodec.getInputBuffer(inputBufferId);
- BufferInfo bufInfo = mInputBufferInfo.get(mIndex);
- inputCodecBuffer.put(mInputBuffer.get(mIndex).array());
- mIndex++;
+ BufferInfo bufInfo;
+ if (mNumInFramesProvided >= mNumInFramesRequired) {
+ Log.i(TAG, "Input frame limit reached");
+ mIndex = mInputBufferInfo.size() - 1;
+ bufInfo = mInputBufferInfo.get(mIndex);
+ if ((bufInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) == 0) {
+ Log.e(TAG, "Error in EOS flag for Decoder");
+ }
+ }
+ bufInfo = mInputBufferInfo.get(mIndex);
mSawInputEOS = (bufInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0;
+ inputCodecBuffer.put(mInputBuffer.get(mIndex).array());
+ mNumInFramesProvided++;
+ mIndex = mNumInFramesProvided % (mInputBufferInfo.size() - 1);
if (mSawInputEOS) {
Log.i(TAG, "Saw input EOS");
}
diff --git a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/FrameReleaseQueue.java b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/FrameReleaseQueue.java
index 4b9b505..84554d3 100644
--- a/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/FrameReleaseQueue.java
+++ b/media/tests/benchmark/MediaBenchmarkTest/src/main/java/com/android/media/benchmark/library/FrameReleaseQueue.java
@@ -31,6 +31,8 @@
private boolean doFrameRelease = false;
private boolean mRender = false;
private int mWaitTime = 40; // milliseconds per frame
+ private int mWaitTimeCorrection = 0;
+ private int mCorrectionLoopCount;
private int firstReleaseTime = -1;
private int THRESHOLD_TIME = 5;
@@ -48,29 +50,40 @@
private class ReleaseThread extends Thread {
public void run() {
int nextReleaseTime = 0;
+ int loopCount = 0;
while (doFrameRelease || mFrameInfoQueue.size() > 0) {
FrameInfo curFrameInfo = mFrameInfoQueue.peek();
if (curFrameInfo == null) {
nextReleaseTime += mWaitTime;
} else {
- if (firstReleaseTime == -1) {
+ if (curFrameInfo.displayTime == 0) {
+ // first frame of loop
firstReleaseTime = getCurSysTime();
nextReleaseTime = firstReleaseTime + mWaitTime;
- popAndRelease(curFrameInfo);
+ popAndRelease(curFrameInfo, true);
+ } else if (!doFrameRelease && mFrameInfoQueue.size() == 1) {
+ // EOS
+ Log.i(TAG, "EOS");
+ popAndRelease(curFrameInfo, false);
} else {
nextReleaseTime += mWaitTime;
int curSysTime = getCurSysTime();
int curMediaTime = curSysTime - firstReleaseTime;
- while (curFrameInfo != null && curFrameInfo.displayTime <= curMediaTime) {
+ while (curFrameInfo != null && curFrameInfo.displayTime > 0 &&
+ curFrameInfo.displayTime <= curMediaTime) {
if (!((curMediaTime - curFrameInfo.displayTime) < THRESHOLD_TIME)) {
- Log.d(TAG, "Dropping expired frame " + curFrameInfo.number);
+ Log.d(TAG, "Dropping expired frame " + curFrameInfo.number +
+ " display time " + curFrameInfo.displayTime +
+ " current time " + curMediaTime);
+ popAndRelease(curFrameInfo, false);
+ } else {
+ popAndRelease(curFrameInfo, true);
}
- popAndRelease(curFrameInfo);
curFrameInfo = mFrameInfoQueue.peek();
}
if (curFrameInfo != null && curFrameInfo.displayTime > curMediaTime) {
if ((curFrameInfo.displayTime - curMediaTime) < THRESHOLD_TIME) {
- popAndRelease(curFrameInfo);
+ popAndRelease(curFrameInfo, true);
}
}
}
@@ -85,6 +98,10 @@
} else {
Log.d(TAG, "Thread sleep time less than 1");
}
+ if (loopCount % mCorrectionLoopCount == 0) {
+ nextReleaseTime += mWaitTimeCorrection;
+ }
+ loopCount += 1;
}
}
}
@@ -94,10 +111,18 @@
this.mReleaseThread = new ReleaseThread();
this.doFrameRelease = true;
this.mRender = render;
- this.mWaitTime = (int)(1.0f/frameRate * 1000); // wait time in milliseconds per frame
+ this.mWaitTime = 1000 / frameRate; // wait time in milliseconds per frame
+ int waitTimeRemainder = 1000 % frameRate;
+ int gcd = gcd(frameRate, waitTimeRemainder);
+ this.mCorrectionLoopCount = frameRate / gcd;
+ this.mWaitTimeCorrection = waitTimeRemainder / gcd;
Log.i(TAG, "Constructed FrameReleaseQueue with wait time " + this.mWaitTime + " ms");
}
+ private static int gcd(int a, int b) {
+ return b == 0 ? a : gcd(b, a % b);
+ }
+
public void setMediaCodec(MediaCodec mediaCodec) {
this.mCodec = mediaCodec;
}
@@ -121,14 +146,15 @@
return (int)(System.nanoTime()/1000000);
}
- private void popAndRelease(FrameInfo curFrameInfo) {
+ private void popAndRelease(FrameInfo curFrameInfo, boolean renderThisFrame) {
try {
curFrameInfo = mFrameInfoQueue.take();
} catch (InterruptedException e) {
Log.e(TAG, "Threw InterruptedException on take");
}
+ boolean actualRender = (renderThisFrame && mRender);
try {
- mCodec.releaseOutputBuffer(curFrameInfo.bufferId, mRender);
+ mCodec.releaseOutputBuffer(curFrameInfo.bufferId, actualRender);
} catch (IllegalStateException e) {
Log.e(TAG,
"Threw IllegalStateException on releaseOutputBuffer for frame "
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index 698752f..7abb0b6 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -103,6 +103,13 @@
logtags: ["EventLogTags.logtags"],
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ "-Wthread-safety",
+ ],
+
export_shared_lib_headers: [
"libpermission",
],
diff --git a/media/utils/TimeCheck.cpp b/media/utils/TimeCheck.cpp
index 0cf5bd9..a5378e6 100644
--- a/media/utils/TimeCheck.cpp
+++ b/media/utils/TimeCheck.cpp
@@ -312,7 +312,7 @@
}
// Automatically create a TimeCheck class for a class and method.
-// This is used for Audio HIDL support.
+// This is used for Audio HAL support.
mediautils::TimeCheck makeTimeCheckStatsForClassMethod(
std::string_view className, std::string_view methodName) {
std::shared_ptr<MethodStatistics<std::string>> statistics =
diff --git a/media/utils/TimerThread.cpp b/media/utils/TimerThread.cpp
index b760ee2..3966103 100644
--- a/media/utils/TimerThread.cpp
+++ b/media/utils/TimerThread.cpp
@@ -288,6 +288,7 @@
void TimerThread::MonitorThread::threadFunc() {
std::unique_lock _l(mMutex);
+ ::android::base::ScopedLockAssertion lock_assertion(mMutex);
while (!mShouldExit) {
Handle nextDeadline = INVALID_HANDLE;
Handle now = INVALID_HANDLE;
@@ -381,6 +382,7 @@
std::shared_ptr<const TimerThread::Request> TimerThread::MonitorThread::remove(Handle handle) {
std::pair<std::shared_ptr<const Request>, TimerCallback> data;
std::unique_lock ul(mMutex);
+ ::android::base::ScopedLockAssertion lock_assertion(mMutex);
if (const auto it = mMonitorRequests.find(handle);
it != mMonitorRequests.end()) {
data = std::move(it->second);
diff --git a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
index 51e8d7a..15f043a 100644
--- a/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
+++ b/media/utils/fuzzers/ServiceUtilitiesFuzz.cpp
@@ -25,6 +25,7 @@
static constexpr int kMaxOperations = 50;
static constexpr int kMaxStringLen = 256;
+static constexpr int kMaxSpaces = 1000;
using android::content::AttributionSourceState;
@@ -35,7 +36,9 @@
pm.allowPlaybackCapture(uid);
},
[](FuzzedDataProvider* data_provider, android::MediaPackageManager pm) -> void {
- int spaces = data_provider->ConsumeIntegral<int>();
+ /* The large value of spaces was taking time in file write operation.
+ * Limited spaces values in range to avoid timeout.*/
+ int spaces = data_provider->ConsumeIntegralInRange<int>(0, kMaxSpaces);
// Dump everything into /dev/null
int fd = open("/dev/null", O_WRONLY);
diff --git a/media/utils/include/mediautils/TimeCheck.h b/media/utils/include/mediautils/TimeCheck.h
index f9ea50c..f1d572f 100644
--- a/media/utils/include/mediautils/TimeCheck.h
+++ b/media/utils/include/mediautils/TimeCheck.h
@@ -147,4 +147,9 @@
TimeCheck makeTimeCheckStatsForClassMethod(
std::string_view className, std::string_view methodName);
+// A handy statement-like macro to put at the beginning of almost every method
+// which calls into HAL. Note that it requires the class to implement 'getClassName'.
+#define TIME_CHECK() auto timeCheck = \
+ mediautils::makeTimeCheckStatsForClassMethod(getClassName(), __func__)
+
} // namespace android::mediautils
diff --git a/media/utils/include/mediautils/TimerThread.h b/media/utils/include/mediautils/TimerThread.h
index d5be177..d84d682 100644
--- a/media/utils/include/mediautils/TimerThread.h
+++ b/media/utils/include/mediautils/TimerThread.h
@@ -340,7 +340,7 @@
std::pair<std::shared_ptr<const Request>, TimerCallback>>
mSecondChanceRequests GUARDED_BY(mMutex);
- RequestQueue& mTimeoutQueue; // locked internally, added to when request times out.
+ RequestQueue& mTimeoutQueue GUARDED_BY(mMutex); // added to when request times out.
// Worker thread variables
bool mShouldExit GUARDED_BY(mMutex) = false;
diff --git a/services/audioflinger/Android.bp b/services/audioflinger/Android.bp
index 4b6d5f2..43ef311 100644
--- a/services/audioflinger/Android.bp
+++ b/services/audioflinger/Android.bp
@@ -24,7 +24,7 @@
defaults: [
"latest_android_media_audio_common_types_cpp_shared",
- "latest_android_hardware_audio_sounddose_ndk_shared",
+ "latest_android_hardware_audio_core_sounddose_ndk_shared",
],
srcs: [
@@ -111,7 +111,7 @@
export_shared_lib_headers: [
"libpermission",
- "android.hardware.audio.sounddose-V1-ndk",
+ "android.hardware.audio.core.sounddose-V1-ndk",
],
cflags: [
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index f8d7c70..7bb0fd3 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -2573,7 +2573,7 @@
ALOGE("loadHwModule() error %d loading module %s", rc, name);
return AUDIO_MODULE_HANDLE_NONE;
}
- if (!mMelReporter->activateHalSoundDoseComputation(name)) {
+ if (!mMelReporter->activateHalSoundDoseComputation(name, dev)) {
ALOGW("loadHwModule() sound dose reporting is not available");
}
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index 173f0c9..e8133d9 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -33,7 +33,6 @@
#include <sys/types.h>
#include <limits.h>
-#include <aidl/android/hardware/audio/sounddose/ISoundDoseFactory.h>
#include <android/media/BnAudioTrack.h>
#include <android/media/IAudioFlingerClient.h>
#include <android/media/IAudioTrackCallback.h>
@@ -140,6 +139,7 @@
class AudioFlinger : public AudioFlingerServerAdapter::Delegate
{
+ friend class sp<AudioFlinger>;
public:
static void instantiate() ANDROID_API;
diff --git a/services/audioflinger/MelReporter.cpp b/services/audioflinger/MelReporter.cpp
index 5ac7cde..5dbcb33 100644
--- a/services/audioflinger/MelReporter.cpp
+++ b/services/audioflinger/MelReporter.cpp
@@ -22,38 +22,37 @@
#include <android/media/ISoundDoseCallback.h>
#include <audio_utils/power.h>
-#include <android/binder_manager.h>
#include <utils/Log.h>
using aidl::android::hardware::audio::core::sounddose::ISoundDose;
-using aidl::android::hardware::audio::sounddose::ISoundDoseFactory;
namespace android {
-constexpr std::string_view kSoundDoseInterfaceModule = "/default";
-
-bool AudioFlinger::MelReporter::activateHalSoundDoseComputation(const std::string& module) {
+bool AudioFlinger::MelReporter::activateHalSoundDoseComputation(const std::string& module,
+ const sp<DeviceHalInterface>& device) {
if (mSoundDoseManager->forceUseFrameworkMel()) {
ALOGD("%s: Forcing use of internal MEL computation.", __func__);
activateInternalSoundDoseComputation();
return false;
}
- if (mSoundDoseFactory == nullptr) {
- ALOGW("%s: sound dose HAL reporting not available", __func__);
- activateInternalSoundDoseComputation();
- return false;
- }
-
- std::shared_ptr<ISoundDose> soundDoseInterface;
- auto result = mSoundDoseFactory->getSoundDose(module, &soundDoseInterface);
- if (!result.isOk()) {
- ALOGW("%s: HAL cannot provide sound dose interface for module %s",
+ ndk::SpAIBinder soundDoseBinder;
+ if (device->getSoundDoseInterface(module, &soundDoseBinder) != OK) {
+ ALOGW("%s: HAL cannot provide sound dose interface for module %s, use internal MEL",
__func__, module.c_str());
activateInternalSoundDoseComputation();
return false;
}
+ if (soundDoseBinder == nullptr) {
+ ALOGW("%s: HAL doesn't implement a sound dose interface for module %s, use internal MEL",
+ __func__, module.c_str());
+ activateInternalSoundDoseComputation();
+ return false;
+ }
+
+ std::shared_ptr<ISoundDose> soundDoseInterface = ISoundDose::fromBinder(soundDoseBinder);
+
if (!mSoundDoseManager->setHalSoundDoseInterface(soundDoseInterface)) {
ALOGW("%s: cannot activate HAL MEL reporting for module %s", __func__, module.c_str());
activateInternalSoundDoseComputation();
@@ -79,16 +78,6 @@
void AudioFlinger::MelReporter::onFirstRef() {
mAudioFlinger.mPatchCommandThread->addListener(this);
-
- std::string interface =
- std::string(ISoundDoseFactory::descriptor) + kSoundDoseInterfaceModule.data();
- AIBinder* binder = AServiceManager_checkService(interface.c_str());
- if (binder == nullptr) {
- ALOGW("%s service %s doesn't exist", __func__, interface.c_str());
- return;
- }
-
- mSoundDoseFactory = ISoundDoseFactory::fromBinder(ndk::SpAIBinder(binder));
}
bool AudioFlinger::MelReporter::shouldComputeMelForDeviceType(audio_devices_t device) {
@@ -111,14 +100,50 @@
}
}
+void AudioFlinger::MelReporter::updateMetadataForCsd(audio_io_handle_t streamHandle,
+ const std::vector<playback_track_metadata_v7_t>& metadataVec) {
+ std::lock_guard _laf(mAudioFlinger.mLock);
+ std::lock_guard _l(mLock);
+ auto activeMelPatchId = activePatchStreamHandle_l(streamHandle);
+ if (!activeMelPatchId) {
+ ALOGV("%s stream handle %d does not have an active patch", __func__, streamHandle);
+ return;
+ }
+
+ bool shouldActivateCsd = false;
+ for (const auto& metadata : metadataVec) {
+ if (metadata.base.usage == AUDIO_USAGE_GAME || metadata.base.usage == AUDIO_USAGE_MEDIA) {
+ shouldActivateCsd = true;
+ }
+ }
+
+ auto activeMelPatchIt = mActiveMelPatches.find(activeMelPatchId.value());
+ if (activeMelPatchIt != mActiveMelPatches.end()
+ && shouldActivateCsd != activeMelPatchIt->second.csdActive) {
+ if (activeMelPatchIt->second.csdActive) {
+ ALOGV("%s should not compute CSD for stream handle %d", __func__, streamHandle);
+ stopMelComputationForPatch_l(activeMelPatchIt->second);
+ } else {
+ ALOGV("%s should compute CSD for stream handle %d", __func__, streamHandle);
+ startMelComputationForActivePatch_l(activeMelPatchIt->second);
+ }
+ activeMelPatchIt->second.csdActive = shouldActivateCsd;
+ }
+}
+
void AudioFlinger::MelReporter::onCreateAudioPatch(audio_patch_handle_t handle,
const PatchPanel::Patch& patch) {
+ if (useHalSoundDoseInterface()) {
+ ALOGV("%s using HAL sound dose, ignore new patch", __func__);
+ return;
+ }
+
ALOGV("%s: handle %d mHalHandle %d device sink %08x",
__func__, handle, patch.mHalHandle,
patch.mAudioPatch.num_sinks > 0 ? patch.mAudioPatch.sinks[0].ext.device.type : 0);
if (patch.mAudioPatch.num_sources == 0
|| patch.mAudioPatch.sources[0].type != AUDIO_PORT_TYPE_MIX) {
- ALOGW("%s: patch does not contain any mix sources", __func__);
+ ALOGV("%s: patch does not contain any mix sources", __func__);
return;
}
@@ -133,49 +158,46 @@
AudioDeviceTypeAddr adt{patch.mAudioPatch.sinks[i].ext.device.type,
patch.mAudioPatch.sinks[i].ext.device.address};
mSoundDoseManager->mapAddressToDeviceId(adt, deviceId);
-
- bool useHalSoundDoseInterface = !mSoundDoseManager->forceUseFrameworkMel();
- {
- std::lock_guard _l(mLock);
- useHalSoundDoseInterface &= mUseHalSoundDoseInterface;
- }
- if (!useHalSoundDoseInterface) {
- startMelComputationForNewPatch(streamHandle, deviceId);
- }
}
}
+ std::lock_guard _afl(mAudioFlinger.mLock);
std::lock_guard _l(mLock);
- mActiveMelPatches[patch.mAudioPatch.id] = newPatch;
+ ALOGV("%s add patch handle %d to active devices", __func__, handle);
+ startMelComputationForActivePatch_l(newPatch);
+ newPatch.csdActive = true;
+ mActiveMelPatches[handle] = newPatch;
}
-void AudioFlinger::MelReporter::startMelComputationForNewPatch(
- audio_io_handle_t streamHandle, audio_port_handle_t deviceId) {
- // Start the MEL calculation in the PlaybackThread
- std::lock_guard _lAf(mAudioFlinger.mLock);
- auto thread = mAudioFlinger.checkPlaybackThread_l(streamHandle);
- if (thread != nullptr) {
+void AudioFlinger::MelReporter::startMelComputationForActivePatch_l(const ActiveMelPatch& patch) {
+ auto thread = mAudioFlinger.checkPlaybackThread_l(patch.streamHandle);
+ if (thread == nullptr) {
+ ALOGE("%s cannot find thread for stream handle %d", __func__, patch.streamHandle);
+ return;
+ }
+
+ for (const auto& deviceHandle : patch.deviceHandles) {
+ ++mActiveDevices[deviceHandle];
+ ALOGI("%s add stream %d that uses device %d for CSD, nr of streams: %d", __func__,
+ patch.streamHandle, deviceHandle, mActiveDevices[deviceHandle]);
thread->startMelComputation(mSoundDoseManager->getOrCreateProcessorForDevice(
- deviceId,
- streamHandle,
+ deviceHandle,
+ patch.streamHandle,
thread->mSampleRate,
thread->mChannelCount,
thread->mFormat));
- }
+ }
}
void AudioFlinger::MelReporter::onReleaseAudioPatch(audio_patch_handle_t handle) {
- ALOGV("%s", __func__);
-
ActiveMelPatch melPatch;
{
std::lock_guard _l(mLock);
auto patchIt = mActiveMelPatches.find(handle);
if (patchIt == mActiveMelPatches.end()) {
- ALOGW(
- "%s patch does not contain any mix sources with active MEL calculation",
- __func__);
+ ALOGV("%s patch handle %d does not contain any mix sources with active MEL calculation",
+ __func__, handle);
return;
}
@@ -183,10 +205,9 @@
mActiveMelPatches.erase(patchIt);
}
- for (const auto& deviceId : melPatch.deviceHandles) {
- mSoundDoseManager->clearMapDeviceIdEntries(deviceId);
- }
- stopInternalMelComputationForStream(melPatch.streamHandle);
+ std::lock_guard _afl(mAudioFlinger.mLock);
+ std::lock_guard _l(mLock);
+ stopMelComputationForPatch_l(melPatch);
}
sp<media::ISoundDose> AudioFlinger::MelReporter::getSoundDoseInterface(
@@ -202,17 +223,46 @@
mUseHalSoundDoseInterface = true;
}
-void AudioFlinger::MelReporter::stopInternalMelComputationForStream(audio_io_handle_t streamId) {
- ALOGV("%s: stop internal mel for stream id: %d", __func__, streamId);
+void AudioFlinger::MelReporter::stopMelComputationForPatch_l(const ActiveMelPatch& patch) {
+ auto thread = mAudioFlinger.checkPlaybackThread_l(patch.streamHandle);
+ ALOGV("%s: stop MEL for stream id: %d", __func__, patch.streamHandle);
+ for (const auto& deviceId : patch.deviceHandles) {
+ if (mActiveDevices[deviceId] > 0) {
+ --mActiveDevices[deviceId];
+ if (mActiveDevices[deviceId] == 0) {
+ // no stream is using deviceId anymore
+ ALOGI("%s removing device %d from active CSD devices", __func__, deviceId);
+ mSoundDoseManager->clearMapDeviceIdEntries(deviceId);
+ }
+ }
+ }
- std::lock_guard _lAf(mAudioFlinger.mLock);
- mSoundDoseManager->removeStreamProcessor(streamId);
- auto thread = mAudioFlinger.checkPlaybackThread_l(streamId);
+ mSoundDoseManager->removeStreamProcessor(patch.streamHandle);
if (thread != nullptr) {
thread->stopMelComputation();
}
}
+
+std::optional<audio_patch_handle_t> AudioFlinger::MelReporter::activePatchStreamHandle_l(
+ audio_io_handle_t streamHandle) {
+ for(const auto& patchIt : mActiveMelPatches) {
+ if (patchIt.second.streamHandle == streamHandle) {
+ return patchIt.first;
+ }
+ }
+ return std::nullopt;
+}
+
+bool AudioFlinger::MelReporter::useHalSoundDoseInterface() {
+ bool useHalSoundDoseInterface = !mSoundDoseManager->forceUseFrameworkMel();
+ {
+ std::lock_guard _l(mLock);
+ useHalSoundDoseInterface &= mUseHalSoundDoseInterface;
+ }
+ return useHalSoundDoseInterface;
+}
+
std::string AudioFlinger::MelReporter::dump() {
std::lock_guard _l(mLock);
std::string output("\nSound Dose:\n");
diff --git a/services/audioflinger/MelReporter.h b/services/audioflinger/MelReporter.h
index acbc8ed..c1b291f 100644
--- a/services/audioflinger/MelReporter.h
+++ b/services/audioflinger/MelReporter.h
@@ -37,21 +37,21 @@
void onFirstRef() override;
- /** Returns true if we should compute MEL for the given device. */
- bool shouldComputeMelForDeviceType(audio_devices_t device);
-
/**
* Activates the MEL reporting from the HAL sound dose interface. If the HAL
* does not support the sound dose interface for this module, the internal MEL
* calculation will be use.
*
- * For now support internal MelReporting only if the sound dose standalone HAL
- * is not implemented
+ * <p>If the device is using the audio AIDL HAL then this method will try to get the sound
+ * dose interface from IModule#getSoundDose(). Otherwise, if the legacy audio HIDL HAL is used
+ * this method will be looking for the standalone sound dose implementation. It falls back to
+ * the internal MEL computation if no valid sound dose interface can be retrieved.
*
- * @return true if the MEL reporting will be done from the sound dose HAL
- * interface
+ * @return true if the MEL reporting will be done from any sound dose HAL interface
+ * implementation, false otherwise.
*/
- bool activateHalSoundDoseComputation(const std::string& module);
+ bool activateHalSoundDoseComputation(const std::string& module,
+ const sp<DeviceHalInterface>& device);
/**
* Activates the MEL reporting from internal framework values. These are used
@@ -70,24 +70,39 @@
const PatchPanel::Patch& patch) override;
void onReleaseAudioPatch(audio_patch_handle_t handle) override;
+ /**
+ * The new metadata can determine whether we should compute MEL for the given thread.
+ * This is the case only if one of the tracks in the thread mix is using MEDIA or GAME.
+ * Otherwise, this method will disable CSD.
+ **/
+ void updateMetadataForCsd(audio_io_handle_t streamHandle,
+ const std::vector<playback_track_metadata_v7_t>& metadataVec);
private:
- void stopInternalMelComputation();
- void stopInternalMelComputationForStream(audio_io_handle_t streamId);
-
- void startMelComputationForNewPatch(audio_io_handle_t streamHandle,
- audio_port_handle_t deviceId);
-
- AudioFlinger& mAudioFlinger; // does not own the object
- std::shared_ptr<::aidl::android::hardware::audio::sounddose::ISoundDoseFactory>
- mSoundDoseFactory;
-
- sp<SoundDoseManager> mSoundDoseManager;
-
struct ActiveMelPatch {
audio_io_handle_t streamHandle{AUDIO_IO_HANDLE_NONE};
std::vector<audio_port_handle_t> deviceHandles;
+ bool csdActive;
};
+ /** Returns true if we should compute MEL for the given device. */
+ bool shouldComputeMelForDeviceType(audio_devices_t device);
+
+ void stopInternalMelComputation();
+
+ /** Should be called with the following order of locks: mAudioFlinger.mLock -> mLock. */
+ void stopMelComputationForPatch_l(const ActiveMelPatch& patch);
+
+ /** Should be called with the following order of locks: mAudioFlinger.mLock -> mLock. */
+ void startMelComputationForActivePatch_l(const ActiveMelPatch& patch);
+
+ std::optional<audio_patch_handle_t> activePatchStreamHandle_l(audio_io_handle_t streamHandle);
+
+ bool useHalSoundDoseInterface();
+
+ AudioFlinger& mAudioFlinger; // does not own the object
+
+ sp<SoundDoseManager> mSoundDoseManager;
+
/**
* Lock for protecting the active mel patches. Do not mix with the AudioFlinger lock.
* Locking order AudioFlinger::mLock -> PatchCommandThread::mLock -> MelReporter::mLock.
@@ -95,5 +110,7 @@
std::mutex mLock;
std::unordered_map<audio_patch_handle_t, ActiveMelPatch>
mActiveMelPatches GUARDED_BY(AudioFlinger::MelReporter::mLock);
+ std::unordered_map<audio_port_handle_t, int>
+ mActiveDevices GUARDED_BY(AudioFlinger::MelReporter::mLock);
bool mUseHalSoundDoseInterface GUARDED_BY(AudioFlinger::MelReporter::mLock) = false;
};
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index a39e439..4ca8a8e 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -2970,7 +2970,14 @@
void AudioFlinger::PlaybackThread::onCodecFormatChanged(
const std::basic_string<uint8_t>& metadataBs)
{
- std::thread([this, metadataBs]() {
+ wp<AudioFlinger::PlaybackThread> weakPointerThis = this;
+ std::thread([this, metadataBs, weakPointerThis]() {
+ sp<AudioFlinger::PlaybackThread> playbackThread = weakPointerThis.promote();
+ if (playbackThread == nullptr) {
+ ALOGW("PlaybackThread was destroyed, skip codec format change event");
+ return;
+ }
+
audio_utils::metadata::Data metadata =
audio_utils::metadata::dataFromByteString(metadataBs);
if (metadata.empty()) {
@@ -3222,10 +3229,10 @@
item.record();
}
-void AudioFlinger::PlaybackThread::updateMetadata_l()
+AudioFlinger::ThreadBase::MetadataUpdate AudioFlinger::PlaybackThread::updateMetadata_l()
{
if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
- return; // nothing to do
+ return {}; // nothing to do
}
StreamOutHalInterface::SourceMetadata metadata;
auto backInserter = std::back_inserter(metadata.tracks);
@@ -3234,6 +3241,9 @@
track->copyMetadataTo(backInserter);
}
sendMetadataToBackend_l(metadata);
+ MetadataUpdate change;
+ change.playbackMetadataUpdate = metadata.tracks;
+ return change;
}
void AudioFlinger::PlaybackThread::sendMetadataToBackend_l(
@@ -3471,8 +3481,10 @@
}
void AudioFlinger::PlaybackThread::stopMelComputation() {
- ALOGV("%s: stopping mel processor for thread %d", __func__, id());
- mMelProcessor = nullptr;
+ if (mMelProcessor.load() != nullptr) {
+ ALOGV("%s: stopping mel processor for thread %d", __func__, id());
+ mMelProcessor = nullptr;
+ }
}
void AudioFlinger::PlaybackThread::threadLoop_drain()
@@ -3637,9 +3649,9 @@
if (result != OK) return result;
#ifdef FLOAT_EFFECT_CHAIN
- buffer = halInBuffer->audioBuffer()->f32;
+ buffer = halInBuffer ? halInBuffer->audioBuffer()->f32 : buffer;
#else
- buffer = halInBuffer->audioBuffer()->s16;
+ buffer = halInBuffer ? halInBuffer->audioBuffer()->s16 : buffer;
#endif
ALOGV("addEffectChain_l() creating new input buffer %p session %d",
buffer, session);
@@ -3668,7 +3680,8 @@
halOutBuffer = halInBuffer;
ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
if (!audio_is_global_session(session)) {
- buffer = reinterpret_cast<effect_buffer_t*>(halInBuffer->externalData());
+ buffer = halInBuffer ? reinterpret_cast<effect_buffer_t*>(halInBuffer->externalData())
+ : buffer;
// Only one effect chain can be present in direct output thread and it uses
// the sink buffer as input
if (mType != DIRECT) {
@@ -3680,9 +3693,9 @@
&halInBuffer);
if (result != OK) return result;
#ifdef FLOAT_EFFECT_CHAIN
- buffer = halInBuffer->audioBuffer()->f32;
+ buffer = halInBuffer ? halInBuffer->audioBuffer()->f32 : buffer;
#else
- buffer = halInBuffer->audioBuffer()->s16;
+ buffer = halInBuffer ? halInBuffer->audioBuffer()->s16 : buffer;
#endif
ALOGV("addEffectChain_l() creating new input buffer %p session %d",
buffer, session);
@@ -3925,6 +3938,7 @@
checkOutputStageEffects();
}
+ MetadataUpdate metadataUpdate;
{ // scope for mLock
Mutex::Autolock _l(mLock);
@@ -4026,7 +4040,7 @@
mActiveTracks.updatePowerState(this);
- updateMetadata_l();
+ metadataUpdate = updateMetadata_l();
// prevent any changes in effect chain list and in each effect chain
// during mixing and effect process as the audio buffers could be deleted
@@ -4256,6 +4270,11 @@
// enable changes in effect chain
unlockEffectChains(effectChains);
+ if (!metadataUpdate.playbackMetadataUpdate.empty()) {
+ mAudioFlinger->mMelReporter->updateMetadataForCsd(id(),
+ metadataUpdate.playbackMetadataUpdate);
+ }
+
if (!waitingAsyncCallback()) {
// mSleepTimeUs == 0 means we must write to audio hardware
if (mSleepTimeUs == 0) {
@@ -4775,7 +4794,7 @@
if (configChanged) {
sendIoConfigEvent_l(AUDIO_OUTPUT_CONFIG_CHANGED);
}
- // Force meteadata update after a route change
+ // Force metadata update after a route change
mActiveTracks.setHasChanged();
return status;
@@ -9015,10 +9034,10 @@
mSharedAudioPackageName = "";
}
-void AudioFlinger::RecordThread::updateMetadata_l()
+AudioFlinger::ThreadBase::MetadataUpdate AudioFlinger::RecordThread::updateMetadata_l()
{
if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
- return; // nothing to do
+ return {}; // nothing to do
}
StreamInHalInterface::SinkMetadata metadata;
auto backInserter = std::back_inserter(metadata.tracks);
@@ -9026,6 +9045,9 @@
track->copyMetadataTo(backInserter);
}
mInput->stream->updateSinkMetadata(metadata);
+ MetadataUpdate change;
+ change.recordMetadataUpdate = metadata.tracks;
+ return change;
}
// destroyTrack_l() must be called with ThreadBase::mLock held
@@ -10725,10 +10747,10 @@
}
}
-void AudioFlinger::MmapPlaybackThread::updateMetadata_l()
+AudioFlinger::ThreadBase::MetadataUpdate AudioFlinger::MmapPlaybackThread::updateMetadata_l()
{
if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
- return; // nothing to do
+ return {}; // nothing to do
}
StreamOutHalInterface::SourceMetadata metadata;
for (const sp<MmapTrack> &track : mActiveTracks) {
@@ -10744,7 +10766,11 @@
metadata.tracks.push_back(trackMetadata);
}
mOutput->stream->updateSourceMetadata(metadata);
-}
+
+ MetadataUpdate change;
+ change.playbackMetadataUpdate = metadata.tracks;
+ return change;
+};
void AudioFlinger::MmapPlaybackThread::checkSilentMode_l()
{
@@ -10852,10 +10878,10 @@
}
}
-void AudioFlinger::MmapCaptureThread::updateMetadata_l()
+AudioFlinger::ThreadBase::MetadataUpdate AudioFlinger::MmapCaptureThread::updateMetadata_l()
{
if (!isStreamInitialized() || !mActiveTracks.readAndClearHasChanged()) {
- return; // nothing to do
+ return {}; // nothing to do
}
StreamInHalInterface::SinkMetadata metadata;
for (const sp<MmapTrack> &track : mActiveTracks) {
@@ -10870,6 +10896,9 @@
metadata.tracks.push_back(trackMetadata);
}
mInput->stream->updateSinkMetadata(metadata);
+ MetadataUpdate change;
+ change.recordMetadataUpdate = metadata.tracks;
+ return change;
}
void AudioFlinger::MmapCaptureThread::setRecordSilenced(audio_port_handle_t portId, bool silenced)
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 4ab4557..f829efc 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -607,7 +607,11 @@
void checkSuspendOnAddEffectChain_l(const sp<EffectChain>& chain);
// sends the metadata of the active tracks to the HAL
- virtual void updateMetadata_l() = 0;
+ struct MetadataUpdate {
+ std::vector<playback_track_metadata_v7_t> playbackMetadataUpdate;
+ std::vector<record_track_metadata_v7_t> recordMetadataUpdate;
+ };
+ virtual MetadataUpdate updateMetadata_l() = 0;
String16 getWakeLockTag();
@@ -1277,7 +1281,7 @@
void removeTrack_l(const sp<Track>& track);
void readOutputParameters_l();
- void updateMetadata_l() final;
+ MetadataUpdate updateMetadata_l() final;
virtual void sendMetadataToBackend_l(const StreamOutHalInterface::SourceMetadata& metadata);
void collectTimestamps_l();
@@ -1983,7 +1987,7 @@
status_t setPreferredMicrophoneDirection(audio_microphone_direction_t direction);
status_t setPreferredMicrophoneFieldDimension(float zoom);
- void updateMetadata_l() override;
+ MetadataUpdate updateMetadata_l() override;
bool fastTrackAvailable() const { return mFastTrackAvail; }
@@ -2257,7 +2261,7 @@
virtual void checkSilentMode_l();
void processVolume_l() override;
- void updateMetadata_l() override;
+ MetadataUpdate updateMetadata_l() override;
virtual void toAudioPortConfig(struct audio_port_config *config);
@@ -2290,7 +2294,7 @@
status_t exitStandby_l() REQUIRES(mLock) override;
- void updateMetadata_l() override;
+ MetadataUpdate updateMetadata_l() override;
void processVolume_l() override;
void setRecordSilenced(audio_port_handle_t portId,
bool silenced) override;
diff --git a/services/audioflinger/sounddose/SoundDoseManager.cpp b/services/audioflinger/sounddose/SoundDoseManager.cpp
index df6eb5b..6f9e11f 100644
--- a/services/audioflinger/sounddose/SoundDoseManager.cpp
+++ b/services/audioflinger/sounddose/SoundDoseManager.cpp
@@ -116,23 +116,26 @@
ALOGV("%s", __func__);
std::lock_guard _l(mLock);
- mRs2Value = rs2Value;
if (mHalSoundDose != nullptr) {
// using the HAL sound dose interface
- if (!mHalSoundDose->setOutputRs2(mRs2Value).isOk()) {
- ALOGE("%s: Cannot set RS2 value for momentary exposure %f", __func__, mRs2Value);
+ if (!mHalSoundDose->setOutputRs2(rs2Value).isOk()) {
+ ALOGE("%s: Cannot set RS2 value for momentary exposure %f", __func__, rs2Value);
+ return;
}
+ mRs2Value = rs2Value;
return;
}
for (auto& streamProcessor : mActiveProcessors) {
sp<audio_utils::MelProcessor> processor = streamProcessor.second.promote();
if (processor != nullptr) {
- status_t result = processor->setOutputRs2(mRs2Value);
+ status_t result = processor->setOutputRs2(rs2Value);
if (result != NO_ERROR) {
- ALOGW("%s: could not set RS2 value %f for stream %d", __func__, mRs2Value,
+ ALOGW("%s: could not set RS2 value %f for stream %d", __func__, rs2Value,
streamProcessor.first);
+ return;
}
+ mRs2Value = rs2Value;
}
}
}
@@ -160,7 +163,7 @@
auto adt = AudioDeviceTypeAddr(type, address);
auto deviceIt = mActiveDevices.find(adt);
if (deviceIt == mActiveDevices.end()) {
- ALOGE("%s: could not find port id for device %s", __func__, adt.toString().c_str());
+ ALOGI("%s: could not find port id for device %s", __func__, adt.toString().c_str());
return AUDIO_PORT_HANDLE_NONE;
}
return deviceIt->second;
@@ -184,7 +187,6 @@
}
++activeDevice;
}
- return;
}
ndk::ScopedAStatus SoundDoseManager::HalSoundDoseCallback::onMomentaryExposureWarning(
@@ -203,7 +205,7 @@
auto id = soundDoseManager->getIdForAudioDevice(in_audioDevice);
if (id == AUDIO_PORT_HANDLE_NONE) {
- ALOGW("%s: no mapped id for audio device with type %d and address %s",
+ ALOGI("%s: no mapped id for audio device with type %d and address %s",
__func__, in_audioDevice.type.type,
in_audioDevice.address.get<AudioDeviceAddress::id>().c_str());
return ndk::ScopedAStatus::fromExceptionCode(EX_ILLEGAL_ARGUMENT);
@@ -230,7 +232,7 @@
auto id = soundDoseManager->getIdForAudioDevice(in_audioDevice);
if (id == AUDIO_PORT_HANDLE_NONE) {
- ALOGW("%s: no mapped id for audio device with type %d and address %s",
+ ALOGI("%s: no mapped id for audio device with type %d and address %s",
__func__, in_audioDevice.type.type,
in_audioDevice.address.get<AudioDeviceAddress::id>().c_str());
return ndk::ScopedAStatus::fromExceptionCode(EX_ILLEGAL_ARGUMENT);
diff --git a/services/audioflinger/sounddose/SoundDoseManager.h b/services/audioflinger/sounddose/SoundDoseManager.h
index db87ad8..c12efc3 100644
--- a/services/audioflinger/sounddose/SoundDoseManager.h
+++ b/services/audioflinger/sounddose/SoundDoseManager.h
@@ -176,7 +176,9 @@
std::unordered_map<audio_io_handle_t, wp<audio_utils::MelProcessor>> mActiveProcessors
GUARDED_BY(mLock);
- // map active device address and type to device id
+ // map active device address and type to device id, used also for managing the pause/resume
+ // logic for deviceId's that should not report MEL values (e.g.: do not have an active MUSIC
+ // or GAME stream).
std::map<AudioDeviceTypeAddr, audio_port_handle_t> mActiveDevices GUARDED_BY(mLock);
float mRs2Value GUARDED_BY(mLock);
diff --git a/services/audiopolicy/TEST_MAPPING b/services/audiopolicy/TEST_MAPPING
index 4d43eb0..2612393 100644
--- a/services/audiopolicy/TEST_MAPPING
+++ b/services/audiopolicy/TEST_MAPPING
@@ -31,5 +31,10 @@
}
]
}
+ ],
+ "auto-presubmit": [
+ {
+ "name": "audiopolicy_tests"
+ }
]
}
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
index 54a143c..92292e1 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyMix.h
@@ -62,11 +62,24 @@
void closeOutput(sp<SwAudioOutputDescriptor> &desc);
/**
- * Try to find an output descriptor for the given attributes.
+ * Tries to find the best matching audio policy mix
*
- * @param[in] attributes to consider fowr the research of output descriptor.
- * @param[out] desc to return if an primary output could be found.
- * @param[out] secondaryDesc other desc that the audio should be routed to.
+ * @param[in] attributes to consider for the research of the audio policy mix.
+ * @param[in] config to consider for the research of the audio policy mix.
+ * @param[in] uid to consider for the research of the audio policy mix.
+ * @param[in] session to consider for the research of the audio policy mix.
+ * @param[in] flags to consider for the research of the audio policy mix.
+ * @param[in] availableOutputDevices available output devices can be use during the research
+ * of the audio policy mix
+ * @param[in] requestedDevice currently requested device that can be used determined if the
+ * matching audio policy mix should be used instead of the currently set preferred device.
+ * @param[out] primaryMix to return in case a matching audio plicy mix could be found.
+ * @param[out] secondaryMixes that audio should be routed to in case a matching
+ * secondary mixes could be found.
+ * @param[out] usePrimaryOutputFromPolicyMixes to return in case the audio policy mix
+ * should be use, including the case where the requested device is explicitly disallowed
+ * by the audio policy.
+ *
* @return OK if the request is valid
* otherwise if the request is not supported
*/
@@ -75,8 +88,11 @@
uid_t uid,
audio_session_t session,
audio_output_flags_t flags,
+ const DeviceVector &availableOutputDevices,
+ const sp<DeviceDescriptor>& requestedDevice,
sp<AudioPolicyMix> &primaryMix,
- std::vector<sp<AudioPolicyMix>> *secondaryMixes);
+ std::vector<sp<AudioPolicyMix>> *secondaryMixes,
+ bool& usePrimaryOutputFromPolicyMixes);
sp<DeviceDescriptor> getDeviceAndMixForInputSource(const audio_attributes_t& attributes,
const DeviceVector &availableDeviceTypes,
@@ -132,6 +148,13 @@
const audio_config_base_t& config,
uid_t uid,
audio_session_t session);
+ bool mixDisallowsRequestedDevice(const AudioMix* mix,
+ const sp<DeviceDescriptor>& requestedDevice,
+ const sp<DeviceDescriptor>& mixDevice,
+ const uid_t uid);
+
+ sp<DeviceDescriptor> getOutputDeviceForMix(const AudioMix* mix,
+ const DeviceVector& availableOutputDevices);
};
std::optional<std::string> extractAddressFromAudioAttributes(const audio_attributes_t& attr);
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
index 003dcaf..ba5a6a7 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioPolicyMix.cpp
@@ -15,7 +15,7 @@
*/
#define LOG_TAG "APM_AudioPolicyMix"
-//#define LOG_NDEBUG 0
+// #define LOG_NDEBUG 0
#include <algorithm>
#include <iterator>
@@ -259,14 +259,25 @@
const audio_attributes_t& attributes, const audio_config_base_t& config, const uid_t uid,
const audio_session_t session,
audio_output_flags_t flags,
+ const DeviceVector &availableOutputDevices,
+ const sp<DeviceDescriptor>& requestedDevice,
sp<AudioPolicyMix> &primaryMix,
- std::vector<sp<AudioPolicyMix>> *secondaryMixes)
+ std::vector<sp<AudioPolicyMix>> *secondaryMixes,
+ bool& usePrimaryOutputFromPolicyMixes)
{
ALOGV("getOutputForAttr() querying %zu mixes:", size());
primaryMix.clear();
+ bool mixesDisallowsRequestedDevice = false;
for (size_t i = 0; i < size(); i++) {
sp<AudioPolicyMix> policyMix = itemAt(i);
const bool primaryOutputMix = !is_mix_loopback_render(policyMix->mRouteFlags);
+ sp<DeviceDescriptor> mixDevice = getOutputDeviceForMix(policyMix.get(),
+ availableOutputDevices);
+ if (mixDisallowsRequestedDevice(policyMix.get(), requestedDevice, mixDevice, uid)) {
+ ALOGV("%s: Mix %zu: does not allows device", __func__, i);
+ mixesDisallowsRequestedDevice = true;
+ }
+
if (!primaryOutputMix && (flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ)) {
// AAudio does not support MMAP_NO_IRQ loopback render, and there is no way with
// the current MmapStreamInterface::start to reject a specific client added to a shared
@@ -288,6 +299,11 @@
continue; // skip the mix
}
+ if (mixDevice != nullptr && mixDevice->equals(requestedDevice)) {
+ ALOGV("%s: Mix %zu: requested device mathches", __func__, i);
+ mixesDisallowsRequestedDevice = false;
+ }
+
if (primaryOutputMix) {
primaryMix = policyMix;
ALOGV("%s: Mix %zu: set primary desc", __func__, i);
@@ -298,9 +314,36 @@
}
}
}
+
+ // Explicit routing is higher priority than dynamic policy primary output, but policy may
+ // explicitly deny it
+ usePrimaryOutputFromPolicyMixes =
+ (mixesDisallowsRequestedDevice || requestedDevice == nullptr) && primaryMix != nullptr;
+
return NO_ERROR;
}
+sp<DeviceDescriptor> AudioPolicyMixCollection::getOutputDeviceForMix(const AudioMix* mix,
+ const DeviceVector& availableOutputDevices) {
+ ALOGV("%s: device (0x%x, addr=%s) forced by mix", __func__, mix->mDeviceType,
+ mix->mDeviceAddress.c_str());
+ return availableOutputDevices.getDevice(mix->mDeviceType, mix->mDeviceAddress,
+ AUDIO_FORMAT_DEFAULT);
+}
+
+bool AudioPolicyMixCollection::mixDisallowsRequestedDevice(const AudioMix* mix,
+ const sp<DeviceDescriptor>& requestedDevice,
+ const sp<DeviceDescriptor>& mixDevice,
+ const uid_t uid) {
+ if (requestedDevice == nullptr || mixDevice == nullptr) {
+ return false;
+ }
+
+ return is_mix_disallows_preferred_device(mix->mRouteFlags)
+ && requestedDevice->equals(mixDevice)
+ && mix->hasUserIdRule(false /* match */, multiuser_get_user_id(uid));
+}
+
bool AudioPolicyMixCollection::mixMatch(const AudioMix* mix, size_t mixIndex,
const audio_attributes_t& attributes, const audio_config_base_t& config,
uid_t uid, audio_session_t session) {
@@ -361,11 +404,7 @@
for (size_t i = 0; i < size(); i++) {
if (itemAt(i)->getOutput() == output) {
// This Desc is involved in a Mix, which has the highest prio
- audio_devices_t deviceType = itemAt(i)->mDeviceType;
- String8 address = itemAt(i)->mDeviceAddress;
- ALOGV("%s: device (0x%x, addr=%s) forced by mix",
- __FUNCTION__, deviceType, address.c_str());
- return availableOutputDevices.getDevice(deviceType, address, AUDIO_FORMAT_DEFAULT);
+ return getOutputDeviceForMix(itemAt(i).get(), availableOutputDevices);
}
}
return nullptr;
@@ -568,13 +607,14 @@
break;
}
}
- if (!deviceMatch && !mix->hasMatchUserIdRule()) {
+ if (!deviceMatch && !mix->hasUserIdRule(true /*match*/)) {
// this mix doesn't go to one of the listed devices for the given userId,
// and it's not already restricting the mix on a userId,
// modify its rules to exclude the userId
- if (!mix->hasUserIdRule(false /*match*/, userId)) {
+ if (!mix->hasUserIdRule(false /* match */, userId)) {
// no need to do it again if userId is already excluded
mix->setExcludeUserId(userId);
+ mix->mRouteFlags = mix->mRouteFlags | MIX_ROUTE_FLAG_DISALLOWS_PREFERRED_DEVICE;
}
}
}
@@ -595,6 +635,10 @@
EraseCriteriaIf(mix->mCriteria, [userId](const AudioMixMatchCriterion& c) {
return c.mRule == RULE_EXCLUDE_USERID && c.mValue.mUserId == userId;
});
+
+ if (!mix->hasUserIdRule(false /* match */)) {
+ mix->mRouteFlags = mix->mRouteFlags & ~MIX_ROUTE_FLAG_DISALLOWS_PREFERRED_DEVICE;
+ }
}
return NO_ERROR;
}
diff --git a/services/audiopolicy/engine/common/include/VolumeGroup.h b/services/audiopolicy/engine/common/include/VolumeGroup.h
index 5378f64..f40ab1c 100644
--- a/services/audiopolicy/engine/common/include/VolumeGroup.h
+++ b/services/audiopolicy/engine/common/include/VolumeGroup.h
@@ -39,7 +39,7 @@
VolumeCurves *getVolumeCurves() { return &mGroupVolumeCurves; }
void addSupportedAttributes(const audio_attributes_t &attr);
- AttributesVector getSupportedAttributes() const { return mGroupVolumeCurves.getAttributes(); }
+ AttributesVector getSupportedAttributes() const;
void addSupportedStream(audio_stream_type_t stream);
StreamTypeVector getStreamTypes() const { return mGroupVolumeCurves.getStreamTypes(); }
diff --git a/services/audiopolicy/engine/common/src/VolumeGroup.cpp b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
index e189807..f5ffbba 100644
--- a/services/audiopolicy/engine/common/src/VolumeGroup.cpp
+++ b/services/audiopolicy/engine/common/src/VolumeGroup.cpp
@@ -37,6 +37,17 @@
{
}
+// Used for introspection, e.g. JAVA
+AttributesVector VolumeGroup::getSupportedAttributes() const
+{
+ AttributesVector supportedAttributes = {};
+ for (auto &aa : mGroupVolumeCurves.getAttributes()) {
+ aa.source = AUDIO_SOURCE_INVALID;
+ supportedAttributes.push_back(aa);
+ }
+ return supportedAttributes;
+}
+
void VolumeGroup::dump(String8 *dst, int spaces) const
{
dst->appendFormat("\n%*s-%s (id: %d)\n", spaces, "", mName.c_str(), mId);
diff --git a/services/audiopolicy/enginedefault/src/Engine.cpp b/services/audiopolicy/enginedefault/src/Engine.cpp
index d96ae21..f2df7ac 100644
--- a/services/audiopolicy/enginedefault/src/Engine.cpp
+++ b/services/audiopolicy/enginedefault/src/Engine.cpp
@@ -299,7 +299,9 @@
if ((strategy == STRATEGY_SONIFICATION) ||
(getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_SYSTEM_ENFORCED)) {
- devices = availableOutputDevices.getDevicesFromType(AUDIO_DEVICE_OUT_SPEAKER);
+ // favor dock over speaker when available
+ devices = availableOutputDevices.getFirstDevicesFromTypes({
+ AUDIO_DEVICE_OUT_DGTL_DOCK_HEADSET, AUDIO_DEVICE_OUT_SPEAKER});
}
// if SCO headset is connected and we are told to use it, play ringtone over
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 0f2b34e..6d7ed75 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -603,7 +603,10 @@
audioDeviceSet = getAudioDeviceOutAllA2dpSet();
break;
case AUDIO_DEVICE_OUT_BLE_HEADSET:
- audioDeviceSet = getAudioDeviceOutAllBleSet();
+ audioDeviceSet = getAudioDeviceOutLeAudioUnicastSet();
+ break;
+ case AUDIO_DEVICE_OUT_BLE_BROADCAST:
+ audioDeviceSet = getAudioDeviceOutLeAudioBroadcastSet();
break;
default:
ALOGE("%s() device type 0x%08x not supported", __func__, device);
@@ -1043,7 +1046,7 @@
// when searching for direct outputs, if several profiles are compatible, give priority
// to one with offload capability
- if (profile != 0 &&
+ if (profile != 0 &&
((curProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) == 0)) {
continue;
}
@@ -1168,6 +1171,8 @@
ALOGV("%s() attributes=%s stream=%s session %d selectedDeviceId %d", __func__,
toString(*resultAttr).c_str(), toString(*stream).c_str(), session, requestedPortId);
+ bool usePrimaryOutputFromPolicyMixes = false;
+
// The primary output is the explicit routing (eg. setPreferredDevice) if specified,
// otherwise, fallback to the dynamic policies, if none match, query the engine.
// Secondary outputs are always found by dynamic policies as the engine do not support them
@@ -1177,14 +1182,12 @@
.format = config->format,
};
status = mPolicyMixes.getOutputForAttr(*resultAttr, clientConfig, uid, session, *flags,
- primaryMix, secondaryMixes);
+ mAvailableOutputDevices, requestedDevice, primaryMix,
+ secondaryMixes, usePrimaryOutputFromPolicyMixes);
if (status != OK) {
return status;
}
- // Explicit routing is higher priority then any dynamic policy primary output
- bool usePrimaryOutputFromPolicyMixes = requestedDevice == nullptr && primaryMix != nullptr;
-
// FIXME: in case of RENDER policy, the output capabilities should be checked
if ((secondaryMixes != nullptr && !secondaryMixes->empty())
&& !audio_is_linear_pcm(config->format)) {
@@ -2132,6 +2135,10 @@
}
}
+ if (client->hasPreferredDevice()) {
+ // playback activity with preferred device impacts routing occurred, inform upper layers
+ mpClientInterface->onRoutingUpdated();
+ }
if (delayMs != 0) {
usleep(delayMs * 1000);
}
@@ -2392,6 +2399,11 @@
}
sp<TrackClientDescriptor> client = outputDesc->getClient(portId);
+ if (client->hasPreferredDevice(true)) {
+ // playback activity with preferred device impacts routing occurred, inform upper layers
+ mpClientInterface->onRoutingUpdated();
+ }
+
ALOGV("stopOutput() output %d, stream %d, session %d",
outputDesc->mIoHandle, client->stream(), client->session());
@@ -3916,7 +3928,8 @@
status_t status = mEngine->clearDevicesRoleForStrategy(strategy, role);
if (status != NO_ERROR) {
- ALOGV("Engine could not remove device role for strategy %d status %d",
+ ALOGW_IF(status != NAME_NOT_FOUND,
+ "Engine could not remove device role for strategy %d status %d",
strategy, status);
return status;
}
@@ -3988,10 +4001,11 @@
status_t status = mEngine->removeDevicesRoleForCapturePreset(
audioSource, role, devices);
- ALOGW_IF(status != NO_ERROR,
+ ALOGW_IF(status != NO_ERROR && status != NAME_NOT_FOUND,
"Engine could not remove devices role (%d) for capture preset %d", role, audioSource);
-
- updateInputRouting();
+ if (status == NO_ERROR) {
+ updateInputRouting();
+ }
return status;
}
@@ -4000,10 +4014,11 @@
ALOGV("%s() audioSource=%d role=%d", __func__, audioSource, role);
status_t status = mEngine->clearDevicesRoleForCapturePreset(audioSource, role);
- ALOGW_IF(status != NO_ERROR,
+ ALOGW_IF(status != NO_ERROR && status != NAME_NOT_FOUND,
"Engine could not clear devices role (%d) for capture preset %d", role, audioSource);
-
- updateInputRouting();
+ if (status == NO_ERROR) {
+ updateInputRouting();
+ }
return status;
}
@@ -6001,6 +6016,17 @@
return status;
}
+ // If microphones address is empty, set it according to device type
+ for (size_t i = 0; i < mInputDevicesAll.size(); i++) {
+ if (mInputDevicesAll[i]->address().empty()) {
+ if (mInputDevicesAll[i]->type() == AUDIO_DEVICE_IN_BUILTIN_MIC) {
+ mInputDevicesAll[i]->setAddress(AUDIO_BOTTOM_MICROPHONE_ADDRESS);
+ } else if (mInputDevicesAll[i]->type() == AUDIO_DEVICE_IN_BACK_MIC) {
+ mInputDevicesAll[i]->setAddress(AUDIO_BACK_MICROPHONE_ADDRESS);
+ }
+ }
+ }
+
mEngine->updateDeviceSelectionCache();
mCommunnicationStrategy = mEngine->getProductStrategyForAttributes(
mEngine->getAttributesForStreamType(AUDIO_STREAM_VOICE_CALL));
@@ -6015,17 +6041,6 @@
mDefaultOutputDevice->toString().c_str());
status = NO_INIT;
}
- // If microphones address is empty, set it according to device type
- for (size_t i = 0; i < mAvailableInputDevices.size(); i++) {
- if (mAvailableInputDevices[i]->address().empty()) {
- if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BUILTIN_MIC) {
- mAvailableInputDevices[i]->setAddress(AUDIO_BOTTOM_MICROPHONE_ADDRESS);
- } else if (mAvailableInputDevices[i]->type() == AUDIO_DEVICE_IN_BACK_MIC) {
- mAvailableInputDevices[i]->setAddress(AUDIO_BACK_MICROPHONE_ADDRESS);
- }
- }
- }
-
ALOGW_IF(mPrimaryOutput == nullptr, "The policy configuration does not declare a primary output");
// Silence ALOGV statements
@@ -6730,6 +6745,7 @@
SortedVector<audio_io_handle_t> dstOutputs = getOutputsForDevices(newDevices, mOutputs);
uint32_t maxLatency = 0;
+ bool unneededUsePrimaryOutputFromPolicyMixes = false;
std::vector<sp<SwAudioOutputDescriptor>> invalidatedOutputs;
// take into account dynamic audio policies related changes: if a client is now associated
// to a different policy mix than at creation time, invalidate corresponding stream
@@ -6744,7 +6760,9 @@
}
sp<AudioPolicyMix> primaryMix;
status_t status = mPolicyMixes.getOutputForAttr(client->attributes(), client->config(),
- client->uid(), client->session(), client->flags(), primaryMix, nullptr);
+ client->uid(), client->session(), client->flags(), mAvailableOutputDevices,
+ nullptr /* requestedDevice */, primaryMix, nullptr /* secondaryMixes */,
+ unneededUsePrimaryOutputFromPolicyMixes);
if (status != OK) {
continue;
}
@@ -6842,13 +6860,16 @@
void AudioPolicyManager::checkSecondaryOutputs() {
PortHandleVector clientsToInvalidate;
TrackSecondaryOutputsMap trackSecondaryOutputs;
+ bool unneededUsePrimaryOutputFromPolicyMixes = false;
for (size_t i = 0; i < mOutputs.size(); i++) {
const sp<SwAudioOutputDescriptor>& outputDescriptor = mOutputs[i];
for (const sp<TrackClientDescriptor>& client : outputDescriptor->getClientIterable()) {
sp<AudioPolicyMix> primaryMix;
std::vector<sp<AudioPolicyMix>> secondaryMixes;
status_t status = mPolicyMixes.getOutputForAttr(client->attributes(), client->config(),
- client->uid(), client->session(), client->flags(), primaryMix, &secondaryMixes);
+ client->uid(), client->session(), client->flags(), mAvailableOutputDevices,
+ nullptr /* requestedDevice */, primaryMix, &secondaryMixes,
+ unneededUsePrimaryOutputFromPolicyMixes);
std::vector<sp<SwAudioOutputDescriptor>> secondaryDescs;
for (auto &secondaryMix : secondaryMixes) {
sp<SwAudioOutputDescriptor> outputDesc = secondaryMix->getOutput();
@@ -8282,9 +8303,11 @@
// check dynamic policies but only for primary descriptors (secondary not used for audible
// audio routing, only used for duplication for playback capture)
sp<AudioPolicyMix> policyMix;
+ bool unneededUsePrimaryOutputFromPolicyMixes = false;
status_t status = mPolicyMixes.getOutputForAttr(attr, AUDIO_CONFIG_BASE_INITIALIZER,
- 0 /*uid unknown here*/, AUDIO_SESSION_NONE, AUDIO_OUTPUT_FLAG_NONE, policyMix,
- nullptr /* secondaryMixes */);
+ 0 /*uid unknown here*/, AUDIO_SESSION_NONE, AUDIO_OUTPUT_FLAG_NONE,
+ mAvailableOutputDevices, nullptr /* requestedDevice */, policyMix,
+ nullptr /* secondaryMixes */, unneededUsePrimaryOutputFromPolicyMixes);
if (status != OK) {
return status;
}
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 1774600..35411f9 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -1534,7 +1534,7 @@
numPortsReq = std::min(numPortsReq, num_ports);
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
convertRange(ports.get(), ports.get() + numPortsReq, std::back_inserter(*portsAidl),
- legacy2aidl_audio_port_v7_AudioPort)));
+ legacy2aidl_audio_port_v7_AudioPortFw)));
count->value = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int32_t>(num_ports));
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int32_t>(generation));
return Status::ok();
@@ -1549,7 +1549,7 @@
}
AutoCallerClear acc;
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(mAudioPolicyManager->getAudioPort(&port)));
- *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_port_v7_AudioPort(port));
+ *_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_port_v7_AudioPortFw(port));
return Status::ok();
}
@@ -1557,7 +1557,7 @@
int32_t handleAidl,
int32_t* _aidl_return) {
audio_patch patch = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioPatch_audio_patch(patchAidl));
+ aidl2legacy_AudioPatchFw_audio_patch(patchAidl));
audio_patch_handle_t handle = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_int32_t_audio_port_handle_t(handleAidl));
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(AudioValidator::validateAudioPatch(patch)));
@@ -1616,7 +1616,7 @@
numPatchesReq = std::min(numPatchesReq, num_patches);
RETURN_IF_BINDER_ERROR(binderStatusFromStatusT(
convertRange(patches.get(), patches.get() + numPatchesReq,
- std::back_inserter(*patchesAidl), legacy2aidl_audio_patch_AudioPatch)));
+ std::back_inserter(*patchesAidl), legacy2aidl_audio_patch_AudioPatchFw)));
count->value = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int32_t>(num_patches));
*_aidl_return = VALUE_OR_RETURN_BINDER_STATUS(convertIntegral<int32_t>(generation));
return Status::ok();
@@ -1625,7 +1625,7 @@
Status AudioPolicyService::setAudioPortConfig(const media::AudioPortConfigFw& configAidl)
{
audio_port_config config = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioPortConfig_audio_port_config(configAidl));
+ aidl2legacy_AudioPortConfigFw_audio_port_config(configAidl));
RETURN_IF_BINDER_ERROR(
binderStatusFromStatusT(AudioValidator::validateAudioPortConfig(config)));
@@ -1799,7 +1799,7 @@
const media::AudioAttributesInternal& attributesAidl,
int32_t* _aidl_return) {
audio_port_config source = VALUE_OR_RETURN_BINDER_STATUS(
- aidl2legacy_AudioPortConfig_audio_port_config(sourceAidl));
+ aidl2legacy_AudioPortConfigFw_audio_port_config(sourceAidl));
audio_attributes_t attributes = VALUE_OR_RETURN_BINDER_STATUS(
aidl2legacy_AudioAttributesInternal_audio_attributes_t(attributesAidl));
audio_port_handle_t portId;
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 8b23ede..31d5249 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -70,7 +70,7 @@
public IBinder::DeathRecipient,
public SpatializerPolicyCallback
{
- friend class BinderService<AudioPolicyService>;
+ friend class sp<AudioPolicyService>;
public:
// for BinderService
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
index 72dba3d..ab1b6f7 100644
--- a/services/audiopolicy/service/SpatializerPoseController.cpp
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -192,7 +192,7 @@
mHeadSensor = INVALID_SENSOR;
}
- mProcessor->recenter(true /* recenterHead */, false /* recenterScreen */);
+ mProcessor->recenter(true /* recenterHead */, false /* recenterScreen */, __func__);
}
void SpatializerPoseController::setScreenSensor(int32_t sensor) {
@@ -229,7 +229,7 @@
mScreenSensor = INVALID_SENSOR;
}
- mProcessor->recenter(false /* recenterHead */, true /* recenterScreen */);
+ mProcessor->recenter(false /* recenterHead */, true /* recenterScreen */, __func__);
}
void SpatializerPoseController::setDesiredMode(HeadTrackingMode mode) {
@@ -276,7 +276,7 @@
void SpatializerPoseController::recenter() {
std::lock_guard lock(mMutex);
- mProcessor->recenter();
+ mProcessor->recenter(true /* recenterHead */, true /* recenterScreen */, __func__);
}
void SpatializerPoseController::onPose(int64_t timestamp, int32_t sensor, const Pose3f& pose,
@@ -286,13 +286,13 @@
mProcessor->setWorldToHeadPose(timestamp, pose,
twist.value_or(Twist3f()) / kTicksPerSecond);
if (isNewReference) {
- mProcessor->recenter(true, false);
+ mProcessor->recenter(true, false, __func__);
}
}
if (sensor == mScreenSensor) {
mProcessor->setWorldToScreenPose(timestamp, pose);
if (isNewReference) {
- mProcessor->recenter(false, true);
+ mProcessor->recenter(false, true, __func__);
}
}
}
diff --git a/services/audiopolicy/tests/Android.bp b/services/audiopolicy/tests/Android.bp
index 6813587..bf82680 100644
--- a/services/audiopolicy/tests/Android.bp
+++ b/services/audiopolicy/tests/Android.bp
@@ -28,6 +28,7 @@
"liblog",
"libmedia_helper",
"libutils",
+ "libcutils",
"libxml2",
"framework-permission-aidl-cpp",
"libbinder",
@@ -54,7 +55,10 @@
"-Wall",
],
- test_suites: ["device-tests"],
+ test_suites: [
+ "device-tests",
+ "automotive-tests",
+ ],
}
diff --git a/services/audiopolicy/tests/audiopolicymanager_tests.cpp b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
index 3f38d01..ef829e1 100644
--- a/services/audiopolicy/tests/audiopolicymanager_tests.cpp
+++ b/services/audiopolicy/tests/audiopolicymanager_tests.cpp
@@ -32,6 +32,7 @@
#include <media/RecordingActivityTracker.h>
#include <utils/Log.h>
#include <utils/Vector.h>
+#include <cutils/multiuser.h>
#include "AudioPolicyInterface.h"
#include "AudioPolicyManagerTestClient.h"
@@ -51,6 +52,13 @@
return criterion;
}
+AudioMixMatchCriterion createUserIdCriterion(int userId, bool exclude = false) {
+ AudioMixMatchCriterion criterion;
+ criterion.mValue.mUserId = userId;
+ criterion.mRule = exclude ? RULE_EXCLUDE_USERID : RULE_MATCH_USERID;
+ return criterion;
+}
+
AudioMixMatchCriterion createUsageCriterion(audio_usage_t usage, bool exclude = false) {
AudioMixMatchCriterion criterion;
criterion.mValue.mUsage = usage;
@@ -2161,11 +2169,23 @@
std::string getConfigFile() override { return sCarConfig; }
static const std::string sCarConfig;
+ static const std::string sCarBusMediaOutput;
+ static const std::string sCarBusNavigationOutput;
+ static const std::string sCarRearZoneOneOutput;
+ static const std::string sCarRearZoneTwoOutput;
};
const std::string AudioPolicyManagerCarTest::sCarConfig =
AudioPolicyManagerCarTest::sExecutableDir + "test_car_ap_atmos_offload_configuration.xml";
+const std::string AudioPolicyManagerCarTest::sCarBusMediaOutput = "bus0_media_out";
+
+const std::string AudioPolicyManagerCarTest::sCarBusNavigationOutput = "bus1_navigation_out";
+
+const std::string AudioPolicyManagerCarTest::sCarRearZoneOneOutput = "bus100_audio_zone_1";
+
+const std::string AudioPolicyManagerCarTest::sCarRearZoneTwoOutput = "bus200_audio_zone_2";
+
TEST_F(AudioPolicyManagerCarTest, InitSuccess) {
// SetUp must finish with no assertions.
}
@@ -2177,9 +2197,8 @@
TEST_F(AudioPolicyManagerCarTest, GetOutputForAttrAtmosOutputAfterRegisteringPolicyMix) {
status_t ret;
audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
- const std::string kTestBusMediaOutput = "bus0_media_out";
ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
- AUDIO_DEVICE_OUT_BUS, kTestBusMediaOutput, audioConfig);
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig);
ASSERT_EQ(NO_ERROR, ret);
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
@@ -2207,6 +2226,415 @@
ASSERT_EQ(k48000SamplingRate, outDesc->getSamplingRate());
}
+TEST_F(AudioPolicyManagerCarTest, GetOutputForAttrAfterRegisteringPolicyMix) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = k48000SamplingRate;
+ std::vector<AudioMixMatchCriterion> mediaMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ std::vector<AudioMixMatchCriterion> navMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusNavigationOutput, audioConfig, navMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ audio_port_v7 mediaDevicePort;
+ ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_BUS,
+ sCarBusMediaOutput, &mediaDevicePort));
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ const audio_attributes_t mediaAttribute = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ k48000SamplingRate, AUDIO_OUTPUT_FLAG_DIRECT, &output, &portId, mediaAttribute);
+
+ ASSERT_EQ(mediaDevicePort.id, selectedDeviceId);
+}
+
+TEST_F(AudioPolicyManagerCarTest, GetOutputForAttrWithSelectedOutputAfterRegisteringPolicyMix) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = k48000SamplingRate;
+ std::vector<AudioMixMatchCriterion> mediaMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ std::vector<AudioMixMatchCriterion> navMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusNavigationOutput, audioConfig, navMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ audio_port_v7 navDevicePort;
+ ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_BUS,
+ sCarBusNavigationOutput, &navDevicePort));
+ audio_port_handle_t selectedDeviceId = navDevicePort.id;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ const audio_attributes_t mediaAttribute = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ k48000SamplingRate, AUDIO_OUTPUT_FLAG_DIRECT, &output, &portId, mediaAttribute);
+
+ ASSERT_EQ(navDevicePort.id, selectedDeviceId);
+}
+
+TEST_F(AudioPolicyManagerCarTest, GetOutputForAttrWithSelectedOutputAfterUserAffinities) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = k48000SamplingRate;
+ std::vector<AudioMixMatchCriterion> mediaMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ std::vector<AudioMixMatchCriterion> navMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusNavigationOutput, audioConfig, navMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ const AudioDeviceTypeAddr mediaOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput);
+ const AudioDeviceTypeAddrVector outputDevices = {mediaOutputDevice};
+ mManager->setUserIdDeviceAffinities(/* userId */ 0, outputDevices);
+ audio_port_v7 navDevicePort;
+ ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_BUS,
+ sCarBusNavigationOutput, &navDevicePort));
+ audio_port_handle_t selectedDeviceId = navDevicePort.id;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ const audio_attributes_t mediaAttribute = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ k48000SamplingRate, AUDIO_OUTPUT_FLAG_DIRECT, &output, &portId, mediaAttribute);
+
+ ASSERT_NE(navDevicePort.id, selectedDeviceId);
+}
+
+TEST_F(AudioPolicyManagerCarTest, GetOutputForAttrWithExcludeUserIdCriteria) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = k48000SamplingRate;
+ std::vector<AudioMixMatchCriterion> mediaMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ std::vector<AudioMixMatchCriterion> navMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ /*exclude=*/ false),
+ createUserIdCriterion(/* userId */ 0, /* exclude */ true)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusNavigationOutput, audioConfig, navMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ audio_port_v7 navDevicePort;
+ ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_BUS,
+ sCarBusNavigationOutput, &navDevicePort));
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ const audio_attributes_t navigationAttribute = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ k48000SamplingRate, AUDIO_OUTPUT_FLAG_DIRECT, &output, &portId, navigationAttribute);
+
+ ASSERT_NE(navDevicePort.id, selectedDeviceId);
+}
+
+TEST_F(AudioPolicyManagerCarTest, GetOutputForAttrWithSelectedOutputExcludeUserIdCriteria) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = k48000SamplingRate;
+ std::vector<AudioMixMatchCriterion> mediaMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ std::vector<AudioMixMatchCriterion> navMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ /*exclude=*/ false),
+ createUserIdCriterion(0 /* userId */, /* exclude */ true)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusNavigationOutput, audioConfig, navMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ audio_port_v7 navDevicePort;
+ ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_BUS,
+ sCarBusNavigationOutput, &navDevicePort));
+ audio_port_handle_t selectedDeviceId = navDevicePort.id;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ const audio_attributes_t mediaAttribute = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ k48000SamplingRate, AUDIO_OUTPUT_FLAG_DIRECT, &output, &portId, mediaAttribute);
+
+ ASSERT_EQ(navDevicePort.id, selectedDeviceId);
+}
+
+TEST_F(AudioPolicyManagerCarTest,
+ GetOutputForAttrWithMatchingMixAndSelectedOutputAfterUserAffinities) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = k48000SamplingRate;
+ std::vector<AudioMixMatchCriterion> mediaMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ std::vector<AudioMixMatchCriterion> navMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusNavigationOutput, audioConfig, navMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ const AudioDeviceTypeAddr mediaOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput);
+ const AudioDeviceTypeAddr navOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarBusNavigationOutput);
+ const AudioDeviceTypeAddrVector outputDevices = {mediaOutputDevice, navOutputDevice};
+ mManager->setUserIdDeviceAffinities(/* userId */ 0, outputDevices);
+ audio_port_v7 navDevicePort;
+ ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_BUS,
+ sCarBusNavigationOutput, &navDevicePort));
+ audio_port_handle_t selectedDeviceId = navDevicePort.id;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ const audio_attributes_t mediaAttribute = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ k48000SamplingRate, AUDIO_OUTPUT_FLAG_DIRECT, &output, &portId, mediaAttribute);
+
+ ASSERT_EQ(navDevicePort.id, selectedDeviceId);
+}
+
+TEST_F(AudioPolicyManagerCarTest,
+ GetOutputForAttrWithNoMatchingMaxAndSelectedOutputAfterUserAffinities) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = k48000SamplingRate;
+ std::vector<AudioMixMatchCriterion> mediaMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ std::vector<AudioMixMatchCriterion> navMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusNavigationOutput, audioConfig, navMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ const AudioDeviceTypeAddr mediaOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput);
+ const AudioDeviceTypeAddr navOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarBusNavigationOutput);
+ const AudioDeviceTypeAddrVector outputDevices = {mediaOutputDevice, navOutputDevice};
+ mManager->setUserIdDeviceAffinities(/* userId */ 0, outputDevices);
+ audio_port_v7 navDevicePort;
+ ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_BUS,
+ sCarBusNavigationOutput, &navDevicePort));
+ audio_port_handle_t selectedDeviceId = navDevicePort.id;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ const audio_attributes_t alarmAttribute = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ALARM,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ k48000SamplingRate, AUDIO_OUTPUT_FLAG_DIRECT, &output, &portId, alarmAttribute);
+
+ ASSERT_EQ(navDevicePort.id, selectedDeviceId);
+}
+
+TEST_F(AudioPolicyManagerCarTest,
+ GetOutputForAttrWithMatMixAfterUserAffinitiesForOneUser) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = k48000SamplingRate;
+ std::vector<AudioMixMatchCriterion> mediaMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarRearZoneOneOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarRearZoneTwoOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ const AudioDeviceTypeAddr mediaOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput);
+ const AudioDeviceTypeAddrVector primaryZoneDevices = {mediaOutputDevice};
+ mManager->setUserIdDeviceAffinities(/* userId */ 0, primaryZoneDevices);
+ audio_port_v7 primaryZoneDevicePort;
+ ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_BUS,
+ sCarBusMediaOutput, &primaryZoneDevicePort));
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ const audio_attributes_t mediaAttribute = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+ uid_t user11AppUid = multiuser_get_uid(/* user_id */ 11, /* app_id */ 12345);
+
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ k48000SamplingRate, AUDIO_OUTPUT_FLAG_DIRECT, &output, &portId, mediaAttribute,
+ AUDIO_SESSION_NONE, user11AppUid);
+
+ ASSERT_EQ(primaryZoneDevicePort.id, selectedDeviceId);
+}
+
+TEST_F(AudioPolicyManagerCarTest,
+ GetOutputForAttrWithMatMixAfterUserAffinitiesForTwoUsers) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = k48000SamplingRate;
+ std::vector<AudioMixMatchCriterion> mediaMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarRearZoneOneOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarRearZoneTwoOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ const AudioDeviceTypeAddr mediaOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput);
+ const AudioDeviceTypeAddrVector primaryZoneDevices = {mediaOutputDevice};
+ mManager->setUserIdDeviceAffinities(/* userId */ 0, primaryZoneDevices);
+ const AudioDeviceTypeAddr secondaryOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarRearZoneOneOutput);
+ const AudioDeviceTypeAddrVector secondaryZoneDevices = {secondaryOutputDevice};
+ mManager->setUserIdDeviceAffinities(/* userId */ 11, secondaryZoneDevices);
+ audio_port_v7 secondaryZoneDevicePort;
+ ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_BUS,
+ sCarRearZoneOneOutput, &secondaryZoneDevicePort));
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ const audio_attributes_t mediaAttribute = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+ uid_t user11AppUid = multiuser_get_uid(/* user_id */ 11, /* app_id */ 12345);
+
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ k48000SamplingRate, AUDIO_OUTPUT_FLAG_DIRECT, &output, &portId, mediaAttribute,
+ AUDIO_SESSION_NONE, user11AppUid);
+
+ ASSERT_EQ(secondaryZoneDevicePort.id, selectedDeviceId);
+}
+
+TEST_F(AudioPolicyManagerCarTest,
+ GetOutputForAttrWithMatMixAfterUserAffinitiesForThreeUsers) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = k48000SamplingRate;
+ std::vector<AudioMixMatchCriterion> mediaMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarRearZoneOneOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarRearZoneTwoOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ const AudioDeviceTypeAddr mediaOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput);
+ const AudioDeviceTypeAddrVector primaryZoneDevices = {mediaOutputDevice};
+ mManager->setUserIdDeviceAffinities(/* userId */ 0, primaryZoneDevices);
+ const AudioDeviceTypeAddr secondaryOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarRearZoneOneOutput);
+ const AudioDeviceTypeAddrVector secondaryZoneDevices = {secondaryOutputDevice};
+ mManager->setUserIdDeviceAffinities(/* userId */ 11, secondaryZoneDevices);
+ const AudioDeviceTypeAddr tertiaryOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarRearZoneTwoOutput);
+ const AudioDeviceTypeAddrVector tertiaryZoneDevices = {tertiaryOutputDevice};
+ mManager->setUserIdDeviceAffinities(/* userId */ 15, tertiaryZoneDevices);
+ audio_port_v7 tertiaryZoneDevicePort;
+ ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_BUS,
+ sCarRearZoneTwoOutput, &tertiaryZoneDevicePort));
+ audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ const audio_attributes_t mediaAttribute = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_MEDIA,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+ uid_t user15AppUid = multiuser_get_uid(/* user_id */ 15, /* app_id */ 12345);
+
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ k48000SamplingRate, AUDIO_OUTPUT_FLAG_DIRECT, &output, &portId, mediaAttribute,
+ AUDIO_SESSION_NONE, user15AppUid);
+
+ ASSERT_EQ(tertiaryZoneDevicePort.id, selectedDeviceId);
+}
+
+TEST_F(AudioPolicyManagerCarTest, GetOutputForAttrWithNoMatchingMix) {
+ status_t ret;
+ audio_config_t audioConfig = AUDIO_CONFIG_INITIALIZER;
+ audioConfig.channel_mask = AUDIO_CHANNEL_OUT_STEREO;
+ audioConfig.format = AUDIO_FORMAT_PCM_16_BIT;
+ audioConfig.sample_rate = k48000SamplingRate;
+ std::vector<AudioMixMatchCriterion> mediaMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_MEDIA, /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput, audioConfig, mediaMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ std::vector<AudioMixMatchCriterion> navMatchCriteria = {
+ createUsageCriterion(AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE,
+ /*exclude=*/ false)};
+ ret = addPolicyMix(MIX_TYPE_PLAYERS, MIX_ROUTE_FLAG_RENDER,
+ AUDIO_DEVICE_OUT_BUS, sCarBusNavigationOutput, audioConfig, navMatchCriteria);
+ ASSERT_EQ(NO_ERROR, ret);
+ const AudioDeviceTypeAddr mediaOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarBusMediaOutput);
+ const AudioDeviceTypeAddr navOutputDevice(AUDIO_DEVICE_OUT_BUS, sCarBusNavigationOutput);
+ const AudioDeviceTypeAddrVector outputDevices = {mediaOutputDevice, navOutputDevice};
+ mManager->setUserIdDeviceAffinities(/* userId */ 0, outputDevices);
+ audio_port_v7 navDevicePort;
+ ASSERT_TRUE(findDevicePort(AUDIO_PORT_ROLE_SINK, AUDIO_DEVICE_OUT_BUS,
+ sCarBusNavigationOutput, &navDevicePort));
+ audio_port_handle_t selectedDeviceId = navDevicePort.id;
+ audio_io_handle_t output;
+ audio_port_handle_t portId;
+ const audio_attributes_t alarmAttribute = {
+ AUDIO_CONTENT_TYPE_UNKNOWN, AUDIO_USAGE_ALARM,
+ AUDIO_SOURCE_DEFAULT, AUDIO_FLAG_NONE, ""};
+
+ getOutputForAttr(&selectedDeviceId, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ k48000SamplingRate, AUDIO_OUTPUT_FLAG_DIRECT, &output, &portId, alarmAttribute);
+
+ ASSERT_EQ(navDevicePort.id, selectedDeviceId);
+}
+
class AudioPolicyManagerTVTest : public AudioPolicyManagerTestWithConfigurationFile {
protected:
std::string getConfigFile() override { return sTvConfig; }
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index 7b1759e..1c922ce 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -143,6 +143,7 @@
"libhidlbase",
"libimage_io",
"libjpeg",
+ "libjpegrecoverymap",
"libmedia_codeclist",
"libmedia_omx",
"libmemunreachable",
@@ -182,9 +183,6 @@
"libbinderthreadstateutils",
"media_permission-aidl-cpp",
"libcameraservice_device_independent",
- "libjpegrecoverymap",
- "libjpegencoder",
- "libjpegdecoder",
],
export_shared_lib_headers: [
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index caa1750..c812cd7 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -37,6 +37,7 @@
#include <android-base/macros.h>
#include <android-base/parseint.h>
#include <android-base/stringprintf.h>
+#include <android/permission/PermissionChecker.h>
#include <binder/ActivityManager.h>
#include <binder/AppOpsManager.h>
#include <binder/IPCThreadState.h>
@@ -81,6 +82,9 @@
namespace {
const char* kPermissionServiceName = "permission";
+ const char* kActivityServiceName = "activity";
+ const char* kSensorPrivacyServiceName = "sensor_privacy";
+ const char* kAppopsServiceName = "appops";
}; // namespace anonymous
namespace android {
@@ -167,6 +171,20 @@
return (CameraThreadState::getCallingUid() < AID_APP_START);
}
+// Enable processes with isolated AID to request the binder
+void CameraService::instantiate() {
+ CameraService::publish(true);
+}
+
+void CameraService::onServiceRegistration(const String16& name, const sp<IBinder>&) {
+ if (name != String16(kAppopsServiceName)) {
+ return;
+ }
+
+ ALOGV("appops service registered. setting camera audio restriction");
+ mAppOps.setCameraAudioRestriction(mAudioRestriction);
+}
+
void CameraService::onFirstRef()
{
@@ -191,10 +209,23 @@
mSensorPrivacyPolicy = new SensorPrivacyPolicy(this);
mSensorPrivacyPolicy->registerSelf();
mInjectionStatusListener = new InjectionStatusListener(this);
- mAppOps.setCameraAudioRestriction(mAudioRestriction);
+
+ // appops function setCamerAudioRestriction uses getService which
+ // is blocking till the appops service is ready. To enable early
+ // boot availability for cameraservice, use checkService which is
+ // non blocking and register for notifications
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->checkService(String16(kAppopsServiceName));
+ if (!binder) {
+ sm->registerForNotifications(String16(kAppopsServiceName), this);
+ } else {
+ mAppOps.setCameraAudioRestriction(mAudioRestriction);
+ }
+
sp<HidlCameraService> hcs = HidlCameraService::getInstance(this);
if (hcs->registerAsService() != android::OK) {
- ALOGE("%s: Failed to register default android.frameworks.cameraservice.service@1.0",
+ // Deprecated, so it will fail to register on newer devices
+ ALOGW("%s: Did not register default android.frameworks.cameraservice.service@2.2",
__FUNCTION__);
}
@@ -666,11 +697,18 @@
broadcastTorchModeStatus(cameraId, newStatus, systemCameraKind);
}
-static bool hasPermissionsForSystemCamera(int callingPid, int callingUid,
- bool logPermissionFailure = false) {
- return checkPermission(sSystemCameraPermission, callingPid, callingUid,
- logPermissionFailure) &&
- checkPermission(sCameraPermission, callingPid, callingUid);
+static bool hasPermissionsForSystemCamera(int callingPid, int callingUid) {
+ permission::PermissionChecker permissionChecker;
+ AttributionSourceState attributionSource{};
+ attributionSource.pid = callingPid;
+ attributionSource.uid = callingUid;
+ bool checkPermissionForSystemCamera = permissionChecker.checkPermissionForPreflight(
+ sSystemCameraPermission, attributionSource, String16(), AppOpsManager::OP_NONE)
+ != permission::PermissionChecker::PERMISSION_HARD_DENIED;
+ bool checkPermissionForCamera = permissionChecker.checkPermissionForPreflight(
+ sCameraPermission, attributionSource, String16(), AppOpsManager::OP_NONE)
+ != permission::PermissionChecker::PERMISSION_HARD_DENIED;
+ return checkPermissionForSystemCamera && checkPermissionForCamera;
}
Status CameraService::getNumberOfCameras(int32_t type, int32_t* numCameras) {
@@ -749,8 +787,14 @@
const std::vector<std::string> *deviceIds = &mNormalDeviceIdsWithoutSystemCamera;
auto callingPid = CameraThreadState::getCallingPid();
auto callingUid = CameraThreadState::getCallingUid();
- if (checkPermission(sSystemCameraPermission, callingPid, callingUid,
- /*logPermissionFailure*/false) || getpid() == callingPid) {
+ permission::PermissionChecker permissionChecker;
+ AttributionSourceState attributionSource{};
+ attributionSource.pid = callingPid;
+ attributionSource.uid = callingUid;
+ bool checkPermissionForSystemCamera = permissionChecker.checkPermissionForPreflight(
+ sSystemCameraPermission, attributionSource, String16(), AppOpsManager::OP_NONE)
+ != permission::PermissionChecker::PERMISSION_HARD_DENIED;
+ if (checkPermissionForSystemCamera || getpid() == callingPid) {
deviceIds = &mNormalDeviceIds;
}
if (cameraIdInt < 0 || cameraIdInt >= static_cast<int>(deviceIds->size())) {
@@ -821,9 +865,16 @@
// If it's not calling from cameraserver, check the permission only if
// android.permission.CAMERA is required. If android.permission.SYSTEM_CAMERA was needed,
// it would've already been checked in shouldRejectSystemCameraConnection.
+ permission::PermissionChecker permissionChecker;
+ AttributionSourceState attributionSource{};
+ attributionSource.pid = callingPid;
+ attributionSource.uid = callingUid;
+ bool checkPermissionForCamera = permissionChecker.checkPermissionForPreflight(
+ sCameraPermission, attributionSource, String16(), AppOpsManager::OP_NONE)
+ != permission::PermissionChecker::PERMISSION_HARD_DENIED;
if ((callingPid != getpid()) &&
(deviceKind != SystemCameraKind::SYSTEM_ONLY_CAMERA) &&
- !checkPermission(sCameraPermission, callingPid, callingUid)) {
+ !checkPermissionForCamera) {
res = cameraInfo->removePermissionEntries(
mCameraProviderManager->getProviderTagIdLocked(String8(cameraId).string()),
&tagsRemoved);
@@ -1257,6 +1308,9 @@
Status CameraService::validateClientPermissionsLocked(const String8& cameraId,
const String8& clientName8, int& clientUid, int& clientPid,
/*out*/int& originalClientPid) const {
+ permission::PermissionChecker permissionChecker;
+ AttributionSourceState attributionSource{};
+
int callingPid = CameraThreadState::getCallingPid();
int callingUid = CameraThreadState::getCallingUid();
@@ -1303,9 +1357,14 @@
// If it's not calling from cameraserver, check the permission if the
// device isn't a system only camera (shouldRejectSystemCameraConnection already checks for
// android.permission.SYSTEM_CAMERA for system only camera devices).
+ attributionSource.pid = clientPid;
+ attributionSource.uid = clientUid;
+ attributionSource.packageName = clientName8;
+ bool checkPermissionForCamera = permissionChecker.checkPermissionForPreflight(
+ sCameraPermission, attributionSource, String16(), AppOpsManager::OP_NONE)
+ != permission::PermissionChecker::PERMISSION_HARD_DENIED;
if (callingPid != getpid() &&
- (deviceKind != SystemCameraKind::SYSTEM_ONLY_CAMERA) &&
- !checkPermission(sCameraPermission, clientPid, clientUid)) {
+ (deviceKind != SystemCameraKind::SYSTEM_ONLY_CAMERA) && !checkPermissionForCamera) {
ALOGE("Permission Denial: can't use the camera pid=%d, uid=%d", clientPid, clientUid);
return STATUS_ERROR_FMT(ERROR_PERMISSION_DENIED,
"Caller \"%s\" (PID %d, UID %d) cannot open camera \"%s\" without camera permission",
@@ -1694,7 +1753,7 @@
// characteristics) even if clients don't have android.permission.CAMERA. We do not want the
// same behavior for system camera devices.
if (!systemClient && systemCameraKind == SystemCameraKind::SYSTEM_ONLY_CAMERA &&
- !hasPermissionsForSystemCamera(cPid, cUid, /*logPermissionFailure*/true)) {
+ !hasPermissionsForSystemCamera(cPid, cUid)) {
ALOGW("Rejecting access to system only camera %s, inadequete permissions",
cameraId.c_str());
return true;
@@ -2073,6 +2132,7 @@
}
client->setImageDumpMask(mImageDumpMask);
+ client->setStreamUseCaseOverrides(mStreamUseCaseOverrides);
} // lock is destroyed, allow further connect calls
// Important: release the mutex here so the client can call back into the service from its
@@ -2647,7 +2707,14 @@
// Check for camera permissions
int callingPid = CameraThreadState::getCallingPid();
int callingUid = CameraThreadState::getCallingUid();
- if ((callingPid != getpid()) && !checkPermission(sCameraPermission, callingPid, callingUid)) {
+ permission::PermissionChecker permissionChecker;
+ AttributionSourceState attributionSource{};
+ attributionSource.pid = callingPid;
+ attributionSource.uid = callingUid;
+ bool checkPermissionForCamera = permissionChecker.checkPermissionForPreflight(
+ sCameraPermission, attributionSource, String16(), AppOpsManager::OP_NONE)
+ != permission::PermissionChecker::PERMISSION_HARD_DENIED;
+ if ((callingPid != getpid()) && !checkPermissionForCamera) {
ALOGE("%s: pid %d doesn't have camera permissions", __FUNCTION__, callingPid);
return STATUS_ERROR(ERROR_PERMISSION_DENIED,
"android.permission.CAMERA needed to call"
@@ -2694,8 +2761,13 @@
auto clientUid = CameraThreadState::getCallingUid();
auto clientPid = CameraThreadState::getCallingPid();
- bool openCloseCallbackAllowed = checkPermission(sCameraOpenCloseListenerPermission,
- clientPid, clientUid, /*logPermissionFailure*/false);
+ permission::PermissionChecker permissionChecker;
+ AttributionSourceState attributionSource{};
+ attributionSource.uid = clientUid;
+ attributionSource.pid = clientPid;
+ bool openCloseCallbackAllowed = permissionChecker.checkPermissionForPreflight(
+ sCameraOpenCloseListenerPermission, attributionSource, String16(),
+ AppOpsManager::OP_NONE) != permission::PermissionChecker::PERMISSION_HARD_DENIED;
Mutex::Autolock lock(mServiceLock);
@@ -3531,6 +3603,11 @@
mClientPackageName);
bool isCameraPrivacyEnabled =
sCameraService->mSensorPrivacyPolicy->isCameraPrivacyEnabled();
+ // We don't want to return EACCESS if the CameraPrivacy is enabled.
+ // We prefer to successfully open the camera and perform camera muting
+ // or blocking in connectHelper as handleAppOpMode can be called before the
+ // connection has been fully established and at that time camera muting
+ // capabilities are unknown.
if (!isUidActive || !isCameraPrivacyEnabled) {
ALOGI("Camera %s: Access for \"%s\" has been restricted",
mCameraIdStr.string(), String8(mClientPackageName).string());
@@ -3785,7 +3862,7 @@
// UidPolicy
// ----------------------------------------------------------------------------
-void CameraService::UidPolicy::registerSelf() {
+void CameraService::UidPolicy::registerWithActivityManager() {
Mutex::Autolock _l(mUidLock);
if (mRegistered) return;
@@ -3802,6 +3879,27 @@
}
}
+void CameraService::UidPolicy::onServiceRegistration(const String16& name, const sp<IBinder>&) {
+ if (name != String16(kActivityServiceName)) {
+ return;
+ }
+
+ registerWithActivityManager();
+}
+
+void CameraService::UidPolicy::registerSelf() {
+ // Use check service to see if the activity service is available
+ // If not available then register for notifications, instead of blocking
+ // till the service is ready
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->checkService(String16(kActivityServiceName));
+ if (!binder) {
+ sm->registerForNotifications(String16(kActivityServiceName), this);
+ } else {
+ registerWithActivityManager();
+ }
+}
+
void CameraService::UidPolicy::unregisterSelf() {
Mutex::Autolock _l(mUidLock);
@@ -4015,7 +4113,9 @@
// ----------------------------------------------------------------------------
// SensorPrivacyPolicy
// ----------------------------------------------------------------------------
-void CameraService::SensorPrivacyPolicy::registerSelf() {
+
+void CameraService::SensorPrivacyPolicy::registerWithSensorPrivacyManager()
+{
Mutex::Autolock _l(mSensorPrivacyLock);
if (mRegistered) {
return;
@@ -4030,6 +4130,27 @@
}
}
+void CameraService::SensorPrivacyPolicy::onServiceRegistration(const String16& name,
+ const sp<IBinder>&) {
+ if (name != String16(kSensorPrivacyServiceName)) {
+ return;
+ }
+
+ registerWithSensorPrivacyManager();
+}
+
+void CameraService::SensorPrivacyPolicy::registerSelf() {
+ // Use checkservice to see if the sensor_privacy service is available
+ // If service is not available then register for notification
+ sp<IServiceManager> sm = defaultServiceManager();
+ sp<IBinder> binder = sm->checkService(String16(kSensorPrivacyServiceName));
+ if (!binder) {
+ sm->registerForNotifications(String16(kSensorPrivacyServiceName),this);
+ } else {
+ registerWithSensorPrivacyManager();
+ }
+}
+
void CameraService::SensorPrivacyPolicy::unregisterSelf() {
Mutex::Autolock _l(mSensorPrivacyLock);
mSpm.removeSensorPrivacyListener(this);
@@ -4039,6 +4160,10 @@
}
bool CameraService::SensorPrivacyPolicy::isSensorPrivacyEnabled() {
+ if (!mRegistered) {
+ registerWithSensorPrivacyManager();
+ }
+
Mutex::Autolock _l(mSensorPrivacyLock);
return mSensorPrivacyEnabled;
}
@@ -4451,6 +4576,13 @@
String8 activeClientString = mActiveClientManager.toString();
dprintf(fd, "Active Camera Clients:\n%s", activeClientString.string());
dprintf(fd, "Allowed user IDs: %s\n", toString(mAllowedUsers).string());
+ if (mStreamUseCaseOverrides.size() > 0) {
+ dprintf(fd, "Active stream use case overrides:");
+ for (int64_t useCaseOverride : mStreamUseCaseOverrides) {
+ dprintf(fd, " %" PRId64, useCaseOverride);
+ }
+ dprintf(fd, "\n");
+ }
dumpEventLog(fd);
@@ -4954,6 +5086,11 @@
return handleGetImageDumpMask(out);
} else if (args.size() >= 2 && args[0] == String16("set-camera-mute")) {
return handleSetCameraMute(args);
+ } else if (args.size() >= 2 && args[0] == String16("set-stream-use-case-override")) {
+ return handleSetStreamUseCaseOverrides(args);
+ } else if (args.size() >= 1 && args[0] == String16("clear-stream-use-case-override")) {
+ handleClearStreamUseCaseOverrides();
+ return OK;
} else if (args.size() >= 2 && args[0] == String16("watch")) {
return handleWatchCommand(args, in, out);
} else if (args.size() >= 2 && args[0] == String16("set-watchdog")) {
@@ -5160,6 +5297,43 @@
return OK;
}
+status_t CameraService::handleSetStreamUseCaseOverrides(const Vector<String16>& args) {
+ std::vector<int64_t> useCasesOverride;
+ for (size_t i = 1; i < args.size(); i++) {
+ int64_t useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+ String8 arg8 = String8(args[i]);
+ if (arg8 == "DEFAULT") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT;
+ } else if (arg8 == "PREVIEW") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW;
+ } else if (arg8 == "STILL_CAPTURE") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_STILL_CAPTURE;
+ } else if (arg8 == "VIDEO_RECORD") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_VIDEO_RECORD;
+ } else if (arg8 == "PREVIEW_VIDEO_STILL") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_PREVIEW_VIDEO_STILL;
+ } else if (arg8 == "VIDEO_CALL") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_VIDEO_CALL;
+ } else if (arg8 == "CROPPED_RAW") {
+ useCase = ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_CROPPED_RAW;
+ } else {
+ ALOGE("%s: Invalid stream use case %s", __FUNCTION__, arg8.c_str());
+ return BAD_VALUE;
+ }
+ useCasesOverride.push_back(useCase);
+ }
+
+ Mutex::Autolock lock(mServiceLock);
+ mStreamUseCaseOverrides = std::move(useCasesOverride);
+
+ return OK;
+}
+
+void CameraService::handleClearStreamUseCaseOverrides() {
+ Mutex::Autolock lock(mServiceLock);
+ mStreamUseCaseOverrides.clear();
+}
+
status_t CameraService::handleWatchCommand(const Vector<String16>& args, int inFd, int outFd) {
if (args.size() >= 3 && args[1] == String16("start")) {
return startWatchingTags(args, outFd);
@@ -5517,6 +5691,15 @@
" Valid values 0=OFF, 1=ON for JPEG\n"
" get-image-dump-mask returns the current image-dump-mask value\n"
" set-camera-mute <0/1> enable or disable camera muting\n"
+ " set-stream-use-case-override <usecase1> <usecase2> ... override stream use cases\n"
+ " Use cases applied in descending resolutions. So usecase1 is assigned to the\n"
+ " largest resolution, usecase2 is assigned to the 2nd largest resolution, and so\n"
+ " on. In case the number of usecases is smaller than the number of streams, the\n"
+ " last use case is assigned to all the remaining streams. In case of multiple\n"
+ " streams with the same resolution, the tie-breaker is (JPEG, RAW, YUV, and PRIV)\n"
+ " Valid values are (case sensitive): DEFAULT, PREVIEW, STILL_CAPTURE, VIDEO_RECORD,\n"
+ " PREVIEW_VIDEO_STILL, VIDEO_CALL, CROPPED_RAW\n"
+ " clear-stream-use-case-override clear the stream use case override\n"
" watch <start|stop|dump|print|clear> manages tag monitoring in connected clients\n"
" help print this message\n");
}
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 4f92f15..59c5534 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -71,7 +71,8 @@
public BinderService<CameraService>,
public virtual ::android::hardware::BnCameraService,
public virtual IBinder::DeathRecipient,
- public virtual CameraProviderManager::StatusListener
+ public virtual CameraProviderManager::StatusListener,
+ public virtual IServiceManager::LocalRegistrationCallback
{
friend class BinderService<CameraService>;
friend class CameraOfflineSessionClient;
@@ -98,9 +99,15 @@
// Event log ID
static const int SN_EVENT_LOG_ID = 0x534e4554;
+ // Register camera service
+ static void instantiate();
+
// Implementation of BinderService<T>
static char const* getServiceName() { return "media.camera"; }
+ // Implementation of IServiceManager::LocalRegistrationCallback
+ virtual void onServiceRegistration(const String16& name, const sp<IBinder>& binder) override;
+
// Non-null arguments for cameraServiceProxyWrapper should be provided for
// testing purposes only.
CameraService(std::shared_ptr<CameraServiceProxyWrapper>
@@ -355,6 +362,13 @@
// Set Camera service watchdog
virtual status_t setCameraServiceWatchdog(bool enabled) = 0;
+ // Set stream use case overrides
+ virtual void setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides) = 0;
+
+ // Clear stream use case overrides
+ virtual void clearStreamUseCaseOverrides() = 0;
+
// The injection camera session to replace the internal camera
// session.
virtual status_t injectCamera(const String8& injectedCamId,
@@ -707,7 +721,10 @@
// Observer for UID lifecycle enforcing that UIDs in idle
// state cannot use the camera to protect user privacy.
- class UidPolicy : public BnUidObserver, public virtual IBinder::DeathRecipient {
+ class UidPolicy :
+ public BnUidObserver,
+ public virtual IBinder::DeathRecipient,
+ public virtual IServiceManager::LocalRegistrationCallback {
public:
explicit UidPolicy(sp<CameraService> service)
: mRegistered(false), mService(service) {}
@@ -732,12 +749,16 @@
void registerMonitorUid(uid_t uid);
void unregisterMonitorUid(uid_t uid);
+ // Implementation of IServiceManager::LocalRegistrationCallback
+ virtual void onServiceRegistration(const String16& name,
+ const sp<IBinder>& binder) override;
// IBinder::DeathRecipient implementation
virtual void binderDied(const wp<IBinder> &who);
private:
bool isUidActiveLocked(uid_t uid, String16 callingPackage);
int32_t getProcStateLocked(uid_t uid);
void updateOverrideUid(uid_t uid, String16 callingPackage, bool active, bool insert);
+ void registerWithActivityManager();
struct MonitoredUid {
int32_t procState;
@@ -757,7 +778,8 @@
// If sensor privacy is enabled then all apps, including those that are active, should be
// prevented from accessing the camera.
class SensorPrivacyPolicy : public hardware::BnSensorPrivacyListener,
- public virtual IBinder::DeathRecipient {
+ public virtual IBinder::DeathRecipient,
+ public virtual IServiceManager::LocalRegistrationCallback {
public:
explicit SensorPrivacyPolicy(wp<CameraService> service)
: mService(service), mSensorPrivacyEnabled(false), mRegistered(false) {}
@@ -771,6 +793,9 @@
binder::Status onSensorPrivacyChanged(int toggleType, int sensor,
bool enabled);
+ // Implementation of IServiceManager::LocalRegistrationCallback
+ virtual void onServiceRegistration(const String16& name,
+ const sp<IBinder>& binder) override;
// IBinder::DeathRecipient implementation
virtual void binderDied(const wp<IBinder> &who);
@@ -782,6 +807,7 @@
bool mRegistered;
bool hasCameraPrivacyFeature();
+ void registerWithSensorPrivacyManager();
};
sp<UidPolicy> mUidPolicy;
@@ -1257,6 +1283,12 @@
// Set the camera mute state
status_t handleSetCameraMute(const Vector<String16>& args);
+ // Set the stream use case overrides
+ status_t handleSetStreamUseCaseOverrides(const Vector<String16>& args);
+
+ // Clear the stream use case overrides
+ void handleClearStreamUseCaseOverrides();
+
// Handle 'watch' command as passed through 'cmd'
status_t handleWatchCommand(const Vector<String16> &args, int inFd, int outFd);
@@ -1355,6 +1387,9 @@
// Camera Service watchdog flag
bool mCameraServiceWatchdogEnabled = true;
+ // Current stream use case overrides
+ std::vector<int64_t> mStreamUseCaseOverrides;
+
/**
* A listener class that implements the IBinder::DeathRecipient interface
* for use to call back the error state injected by the external camera, and
diff --git a/services/camera/libcameraservice/CameraServiceWatchdog.cpp b/services/camera/libcameraservice/CameraServiceWatchdog.cpp
index 74497d1..e80064a 100644
--- a/services/camera/libcameraservice/CameraServiceWatchdog.cpp
+++ b/services/camera/libcameraservice/CameraServiceWatchdog.cpp
@@ -17,6 +17,7 @@
#define LOG_TAG "CameraServiceWatchdog"
#include "CameraServiceWatchdog.h"
+#include "utils/CameraServiceProxyWrapper.h"
namespace android {
@@ -43,6 +44,8 @@
if (tidToCycleCounterMap[currentThreadId] >= mMaxCycles) {
ALOGW("CameraServiceWatchdog triggering abort for pid: %d tid: %d", getpid(),
currentThreadId);
+ mCameraServiceProxyWrapper->logClose(mCameraId, 0 /*latencyMs*/,
+ true /*deviceError*/);
// We use abort here so we can get a tombstone for better
// debugging.
abort();
diff --git a/services/camera/libcameraservice/CameraServiceWatchdog.h b/services/camera/libcameraservice/CameraServiceWatchdog.h
index e35d69e..6617873 100644
--- a/services/camera/libcameraservice/CameraServiceWatchdog.h
+++ b/services/camera/libcameraservice/CameraServiceWatchdog.h
@@ -32,10 +32,13 @@
#include <chrono>
#include <thread>
#include <time.h>
+#include <utils/String8.h>
#include <utils/Thread.h>
#include <utils/Log.h>
#include <unordered_map>
+#include "utils/CameraServiceProxyWrapper.h"
+
// Used to wrap the call of interest in start and stop calls
#define WATCH(toMonitor) watchThread([&]() { return toMonitor;}, gettid())
#define WATCH_CUSTOM_TIMER(toMonitor, cycles, cycleLength) \
@@ -50,12 +53,18 @@
class CameraServiceWatchdog : public Thread {
public:
- explicit CameraServiceWatchdog() : mPause(true), mMaxCycles(kMaxCycles),
- mCycleLengthMs(kCycleLengthMs), mEnabled(true) {};
+ explicit CameraServiceWatchdog(const String8 &cameraId,
+ std::shared_ptr<CameraServiceProxyWrapper> cameraServiceProxyWrapper) :
+ mCameraId(cameraId), mPause(true), mMaxCycles(kMaxCycles),
+ mCycleLengthMs(kCycleLengthMs), mEnabled(true),
+ mCameraServiceProxyWrapper(cameraServiceProxyWrapper) {};
- explicit CameraServiceWatchdog (size_t maxCycles, uint32_t cycleLengthMs, bool enabled) :
- mPause(true), mMaxCycles(maxCycles), mCycleLengthMs(cycleLengthMs), mEnabled(enabled)
- {};
+ explicit CameraServiceWatchdog (const String8 &cameraId, size_t maxCycles,
+ uint32_t cycleLengthMs, bool enabled,
+ std::shared_ptr<CameraServiceProxyWrapper> cameraServiceProxyWrapper) :
+ mCameraId(cameraId), mPause(true), mMaxCycles(maxCycles),
+ mCycleLengthMs(cycleLengthMs), mEnabled(enabled),
+ mCameraServiceProxyWrapper(cameraServiceProxyWrapper) {};
virtual ~CameraServiceWatchdog() {};
@@ -75,8 +84,8 @@
// Lock for mEnabled
mEnabledLock.lock();
- sp<CameraServiceWatchdog> tempWatchdog =
- new CameraServiceWatchdog(cycles, cycleLength, mEnabled);
+ sp<CameraServiceWatchdog> tempWatchdog = new CameraServiceWatchdog(
+ mCameraId, cycles, cycleLength, mEnabled, mCameraServiceProxyWrapper);
mEnabledLock.unlock();
status_t status = tempWatchdog->run("CameraServiceWatchdog");
@@ -134,11 +143,14 @@
Mutex mWatchdogLock; // Lock for condition variable
Mutex mEnabledLock; // Lock for enabled status
Condition mWatchdogCondition; // Condition variable for stop/start
+ String8 mCameraId; // Camera Id the watchdog belongs to
bool mPause; // True if tid map is empty
uint32_t mMaxCycles; // Max cycles
uint32_t mCycleLengthMs; // Length of time elapsed per cycle
bool mEnabled; // True if watchdog is enabled
+ std::shared_ptr<CameraServiceProxyWrapper> mCameraServiceProxyWrapper;
+
std::unordered_map<uint32_t, uint32_t> tidToCycleCounterMap; // Thread Id to cycle counter map
};
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index d447fe0..23a70db 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -501,12 +501,13 @@
ALOGV("Camera %d: Disconnecting device", mCameraId);
+ bool hasDeviceError = mDevice->hasDeviceError();
mDevice->disconnect();
CameraService::Client::disconnect();
int32_t closeLatencyMs = ns2ms(systemTime() - startTime);
- mCameraServiceProxyWrapper->logClose(mCameraIdStr, closeLatencyMs);
+ mCameraServiceProxyWrapper->logClose(mCameraIdStr, closeLatencyMs, hasDeviceError);
return res;
}
@@ -2380,6 +2381,15 @@
return mDevice->setCameraMute(enabled);
}
+void Camera2Client::setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides) {
+ mDevice->setStreamUseCaseOverrides(useCaseOverrides);
+}
+
+void Camera2Client::clearStreamUseCaseOverrides() {
+ mDevice->clearStreamUseCaseOverrides();
+}
+
status_t Camera2Client::waitUntilCurrentRequestIdLocked() {
int32_t activeRequestId = mStreamingProcessor->getActiveRequestId();
if (activeRequestId != 0) {
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index c5324db..f035fea 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -93,6 +93,10 @@
virtual status_t setCameraServiceWatchdog(bool enabled);
+ virtual void setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides);
+ virtual void clearStreamUseCaseOverrides();
+
/**
* Interface used by CameraService
*/
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 281d971..34b3948 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -1781,6 +1781,15 @@
return mDevice->setCameraMute(enabled);
}
+void CameraDeviceClient::setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides) {
+ mDevice->setStreamUseCaseOverrides(useCaseOverrides);
+}
+
+void CameraDeviceClient::clearStreamUseCaseOverrides() {
+ mDevice->clearStreamUseCaseOverrides();
+}
+
binder::Status CameraDeviceClient::switchToOffline(
const sp<hardware::camera2::ICameraDeviceCallbacks>& cameraCb,
const std::vector<int>& offlineOutputIds,
@@ -2091,10 +2100,11 @@
mCompositeStreamMap.clear();
}
+ bool hasDeviceError = mDevice->hasDeviceError();
Camera2ClientBase::detachDevice();
int32_t closeLatencyMs = ns2ms(systemTime() - startTime);
- mCameraServiceProxyWrapper->logClose(mCameraIdStr, closeLatencyMs);
+ mCameraServiceProxyWrapper->logClose(mCameraIdStr, closeLatencyMs, hasDeviceError);
}
/** Device-related methods */
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index 0c87872..36c627a 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -214,6 +214,9 @@
virtual status_t setCameraServiceWatchdog(bool enabled);
+ virtual void setStreamUseCaseOverrides(const std::vector<int64_t>& useCaseOverrides);
+ virtual void clearStreamUseCaseOverrides() override;
+
/**
* Device listener interface
*/
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index ea90987..c260cfe 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -94,6 +94,13 @@
return INVALID_OPERATION;
}
+void CameraOfflineSessionClient::setStreamUseCaseOverrides(
+ const std::vector<int64_t>& /*useCaseOverrides*/) {
+}
+
+void CameraOfflineSessionClient::clearStreamUseCaseOverrides() {
+}
+
status_t CameraOfflineSessionClient::dump(int fd, const Vector<String16>& args) {
return BasicClient::dump(fd, args);
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index 89b27f8..906d454 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -89,6 +89,11 @@
status_t setCameraServiceWatchdog(bool enabled) override;
+ void setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides) override;
+
+ void clearStreamUseCaseOverrides() override;
+
// permissions management
status_t startCameraOps() override;
status_t finishCameraOps() override;
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index 8b8dbe8..e652546 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -1615,7 +1615,7 @@
return OK;
}
-void HeicCompositeStream::initCopyRowFunction(int32_t width)
+void HeicCompositeStream::initCopyRowFunction([[maybe_unused]] int32_t width)
{
using namespace libyuv;
diff --git a/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp b/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp
index fb8979d..71f52db 100644
--- a/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/JpegRCompositeStream.cpp
@@ -26,7 +26,7 @@
#include "common/CameraProviderManager.h"
#include <gui/Surface.h>
-#include <jpegrecoverymap/recoverymap.h>
+#include <jpegrecoverymap/jpegr.h>
#include <utils/ExifUtils.h>
#include <utils/Log.h>
#include "utils/SessionConfigurationUtils.h"
@@ -285,22 +285,37 @@
}
size_t actualJpegRSize = 0;
- if (mSupportInternalJpeg) {
- recoverymap::jpegr_uncompressed_struct p010;
- recoverymap::jpegr_compressed_struct jpeg;
- recoverymap::jpegr_compressed_struct jpegR;
+ jpegrecoverymap::jpegr_uncompressed_struct p010;
+ jpegrecoverymap::jpegr_compressed_struct jpegR;
+ jpegrecoverymap::JpegR jpegREncoder;
- p010.height = inputFrame.p010Buffer.height;
- p010.width = inputFrame.p010Buffer.width;
- p010.colorGamut = recoverymap::jpegr_color_gamut::JPEGR_COLORGAMUT_BT2100;
- size_t yChannelSizeInByte = p010.width * p010.height * 2;
- size_t uvChannelSizeInByte = p010.width * p010.height;
- p010.data = new uint8_t[yChannelSizeInByte + uvChannelSizeInByte];
- std::unique_ptr<uint8_t[]> p010_data;
- p010_data.reset(reinterpret_cast<uint8_t*>(p010.data));
- memcpy((uint8_t*)p010.data, inputFrame.p010Buffer.data, yChannelSizeInByte);
- memcpy((uint8_t*)p010.data + yChannelSizeInByte, inputFrame.p010Buffer.dataCb,
- uvChannelSizeInByte);
+ p010.height = inputFrame.p010Buffer.height;
+ p010.width = inputFrame.p010Buffer.width;
+ p010.colorGamut = jpegrecoverymap::jpegr_color_gamut::JPEGR_COLORGAMUT_BT2100;
+ size_t yChannelSizeInByte = p010.width * p010.height * 2;
+ size_t uvChannelSizeInByte = p010.width * p010.height;
+ p010.data = new uint8_t[yChannelSizeInByte + uvChannelSizeInByte];
+ std::unique_ptr<uint8_t[]> p010_data;
+ p010_data.reset(reinterpret_cast<uint8_t*>(p010.data));
+ memcpy((uint8_t*)p010.data, inputFrame.p010Buffer.data, yChannelSizeInByte);
+ memcpy((uint8_t*)p010.data + yChannelSizeInByte, inputFrame.p010Buffer.dataCb,
+ uvChannelSizeInByte);
+
+ jpegR.data = dstBuffer;
+ jpegR.maxLength = maxJpegRBufferSize;
+
+ jpegrecoverymap::jpegr_transfer_function transferFunction;
+ switch (mP010DynamicRange) {
+ case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10:
+ case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS:
+ transferFunction = jpegrecoverymap::jpegr_transfer_function::JPEGR_TF_PQ;
+ break;
+ default:
+ transferFunction = jpegrecoverymap::jpegr_transfer_function::JPEGR_TF_HLG;
+ }
+
+ if (mSupportInternalJpeg) {
+ jpegrecoverymap::jpegr_compressed_struct jpeg;
jpeg.data = inputFrame.jpegBuffer.data;
jpeg.length = android::camera2::JpegProcessor::findJpegSize(inputFrame.jpegBuffer.data,
@@ -312,33 +327,12 @@
}
if (mOutputColorSpace == ANDROID_REQUEST_AVAILABLE_COLOR_SPACE_PROFILES_MAP_DISPLAY_P3) {
- jpeg.colorGamut = recoverymap::jpegr_color_gamut::JPEGR_COLORGAMUT_P3;
+ jpeg.colorGamut = jpegrecoverymap::jpegr_color_gamut::JPEGR_COLORGAMUT_P3;
} else {
- jpeg.colorGamut = recoverymap::jpegr_color_gamut::JPEGR_COLORGAMUT_BT709;
+ jpeg.colorGamut = jpegrecoverymap::jpegr_color_gamut::JPEGR_COLORGAMUT_BT709;
}
- recoverymap::jpegr_transfer_function transferFunction;
- switch (mP010DynamicRange) {
- case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10:
- case ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HDR10_PLUS:
- transferFunction = recoverymap::jpegr_transfer_function::JPEGR_TF_PQ;
- break;
- default:
- transferFunction = recoverymap::jpegr_transfer_function::JPEGR_TF_HLG;
- }
-
- jpegR.data = dstBuffer;
- jpegR.maxLength = maxJpegRBufferSize;
-
- recoverymap::RecoveryMap recoveryMap;
- res = recoveryMap.encodeJPEGR(&p010, &jpeg, transferFunction, &jpegR);
- if (res != OK) {
- ALOGE("%s: Error trying to encode JPEG/R: %s (%d)", __FUNCTION__, strerror(-res), res);
- return res;
- }
-
- actualJpegRSize = jpegR.length;
- p010_data.release();
+ res = jpegREncoder.encodeJPEGR(&p010, &jpeg, transferFunction, &jpegR);
} else {
const uint8_t* exifBuffer = nullptr;
size_t exifBufferSize = 0;
@@ -352,8 +346,22 @@
} else {
ALOGE("%s: Unable to generate App1 buffer", __FUNCTION__);
}
+
+ jpegrecoverymap::jpegr_exif_struct exif;
+ exif.data = reinterpret_cast<void*>(const_cast<uint8_t*>(exifBuffer));
+ exif.length = exifBufferSize;
+
+ res = jpegREncoder.encodeJPEGR(&p010, transferFunction, &jpegR, jpegQuality, &exif);
}
+ if (res != OK) {
+ ALOGE("%s: Error trying to encode JPEG/R: %s (%d)", __FUNCTION__, strerror(-res), res);
+ return res;
+ }
+
+ actualJpegRSize = jpegR.length;
+ p010_data.release();
+
size_t finalJpegRSize = actualJpegRSize + sizeof(CameraBlob);
if (finalJpegRSize > maxJpegRBufferSize) {
ALOGE("%s: Final jpeg buffer not large enough for the jpeg blob header", __FUNCTION__);
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 4555838..0a2819c 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -158,7 +158,8 @@
}
/** Start watchdog thread */
- mCameraServiceWatchdog = new CameraServiceWatchdog();
+ mCameraServiceWatchdog = new CameraServiceWatchdog(TClientBase::mCameraIdStr,
+ mCameraServiceProxyWrapper);
res = mCameraServiceWatchdog->run("Camera2ClientBaseWatchdog");
if (res != OK) {
ALOGE("%s: Unable to start camera service watchdog thread: %s (%d)",
@@ -359,6 +360,29 @@
}
template <typename TClientBase>
+void Camera2ClientBase<TClientBase>::notifyPhysicalCameraChange(const std::string &physicalId) {
+ // We're only interested in this notification if overrideToPortrait is turned on.
+ if (!TClientBase::mOverrideToPortrait) {
+ return;
+ }
+
+ String8 physicalId8(physicalId.c_str());
+ auto physicalCameraMetadata = mDevice->infoPhysical(physicalId8);
+ auto orientationEntry = physicalCameraMetadata.find(ANDROID_SENSOR_ORIENTATION);
+
+ if (orientationEntry.count == 1) {
+ int orientation = orientationEntry.data.i32[0];
+ int rotateAndCropMode = ANDROID_SCALER_ROTATE_AND_CROP_NONE;
+
+ if (orientation == 0 || orientation == 180) {
+ rotateAndCropMode = ANDROID_SCALER_ROTATE_AND_CROP_90;
+ }
+
+ static_cast<TClientBase *>(this)->setRotateAndCropOverride(rotateAndCropMode);
+ }
+}
+
+template <typename TClientBase>
status_t Camera2ClientBase<TClientBase>::notifyActive(float maxPreviewFps) {
if (!mDeviceActive) {
status_t res = TClientBase::startCameraStreamingOps();
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 89347eb..5cf3033 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -77,6 +77,7 @@
virtual void notifyError(int32_t errorCode,
const CaptureResultExtras& resultExtras);
+ virtual void notifyPhysicalCameraChange(const std::string &physicalId) override;
// Returns errors on app ops permission failures
virtual status_t notifyActive(float maxPreviewFps);
virtual void notifyIdle(int64_t /*requestCount*/, int64_t /*resultErrorCount*/,
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 6c30606..6f15653 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -475,11 +475,25 @@
virtual wp<camera3::StatusTracker> getStatusTracker() = 0;
/**
+ * If the device is in eror state
+ */
+ virtual bool hasDeviceError() = 0;
+
+ /**
* Set bitmask for image dump flag
*/
void setImageDumpMask(int mask) { mImageDumpMask = mask; }
/**
+ * Set stream use case overrides
+ */
+ void setStreamUseCaseOverrides(const std::vector<int64_t>& useCaseOverrides) {
+ mStreamUseCaseOverrides = useCaseOverrides;
+ }
+
+ void clearStreamUseCaseOverrides() {}
+
+ /**
* The injection camera session to replace the internal camera
* session.
*/
@@ -493,6 +507,7 @@
protected:
bool mImageDumpMask = 0;
+ std::vector<int64_t> mStreamUseCaseOverrides;
};
}; // namespace android
diff --git a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
index f39b92a..63abcf0 100644
--- a/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
+++ b/services/camera/libcameraservice/common/CameraOfflineSessionBase.h
@@ -40,6 +40,10 @@
// Required for API 1 and 2
virtual void notifyError(int32_t errorCode,
const CaptureResultExtras &resultExtras) = 0;
+
+ // Optional for API 1 and 2
+ virtual void notifyPhysicalCameraChange(const std::string &/*physicalId*/) {}
+
// May return an error since it checks appops
virtual status_t notifyActive(float maxPreviewFps) = 0;
virtual void notifyIdle(int64_t requestCount, int64_t resultError, bool deviceError,
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 5b8e3a1..3b40da9 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -1004,19 +1004,21 @@
auto availableDurations = ch.find(tag);
if (availableDurations.count > 0) {
// Duration entry contains 4 elements (format, width, height, duration)
- for (size_t i = 0; i < availableDurations.count; i += 4) {
- for (const auto& size : sizes) {
- int64_t width = std::get<0>(size);
- int64_t height = std::get<1>(size);
+ for (const auto& size : sizes) {
+ int64_t width = std::get<0>(size);
+ int64_t height = std::get<1>(size);
+ for (size_t i = 0; i < availableDurations.count; i += 4) {
if ((availableDurations.data.i64[i] == format) &&
(availableDurations.data.i64[i+1] == width) &&
(availableDurations.data.i64[i+2] == height)) {
durations->push_back(availableDurations.data.i64[i+3]);
+ break;
}
}
}
}
}
+
void CameraProviderManager::ProviderInfo::DeviceInfo3::getSupportedDynamicDepthDurations(
const std::vector<int64_t>& depthDurations, const std::vector<int64_t>& blobDurations,
std::vector<int64_t> *dynamicDepthDurations /*out*/) {
@@ -1137,8 +1139,7 @@
return BAD_VALUE;
}
- std::vector<std::tuple<size_t, size_t>> supportedP010Sizes, supportedBlobSizes,
- supportedDynamicDepthSizes, internalDepthSizes;
+ std::vector<std::tuple<size_t, size_t>> supportedP010Sizes, supportedBlobSizes;
auto capabilities = c.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
if (capabilities.count == 0) {
ALOGE("%s: Supported camera capabilities is empty!", __FUNCTION__);
@@ -1153,14 +1154,6 @@
return OK;
}
- if (!isConcurrentDynamicRangeCaptureSupported(c,
- ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_HLG10,
- ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD)) {
- // Advertise Jpeg/R only in case 10 and 8-bit concurrent capture is supported.
- // This can be removed when 10-bit to 8-bit tonemapping is available.
- return OK;
- }
-
getSupportedSizes(c, scalerSizesTag,
static_cast<android_pixel_format_t>(HAL_PIXEL_FORMAT_BLOB), &supportedBlobSizes);
getSupportedSizes(c, scalerSizesTag,
@@ -1199,9 +1192,11 @@
getSupportedDurations(c, scalerStallDurationsTag, HAL_PIXEL_FORMAT_BLOB,
supportedP010Sizes, &blobStallDurations);
if (blobStallDurations.empty() || blobMinDurations.empty() ||
- (blobMinDurations.size() != blobStallDurations.size())) {
- ALOGE("%s: Unexpected number of available blob durations! %zu vs. %zu",
- __FUNCTION__, blobMinDurations.size(), blobStallDurations.size());
+ supportedP010Sizes.size() != blobMinDurations.size() ||
+ blobMinDurations.size() != blobStallDurations.size()) {
+ ALOGE("%s: Unexpected number of available blob durations! %zu vs. %zu with "
+ "supportedP010Sizes size: %zu", __FUNCTION__, blobMinDurations.size(),
+ blobStallDurations.size(), supportedP010Sizes.size());
return BAD_VALUE;
}
@@ -2254,7 +2249,7 @@
}
CameraMetadata info2;
res = device->getCameraCharacteristics(true /*overrideForPerfClass*/, &info2,
- /*overrideToPortrait*/true);
+ /*overrideToPortrait*/false);
if (res == INVALID_OPERATION) {
dprintf(fd, " API2 not directly supported\n");
} else if (res != OK) {
@@ -2605,8 +2600,8 @@
if (overrideToPortrait) {
const auto &lensFacingEntry = characteristics->find(ANDROID_LENS_FACING);
const auto &sensorOrientationEntry = characteristics->find(ANDROID_SENSOR_ORIENTATION);
+ uint8_t lensFacing = lensFacingEntry.data.u8[0];
if (lensFacingEntry.count > 0 && sensorOrientationEntry.count > 0) {
- uint8_t lensFacing = lensFacingEntry.data.u8[0];
int32_t sensorOrientation = sensorOrientationEntry.data.i32[0];
int32_t newSensorOrientation = sensorOrientation;
@@ -2627,6 +2622,8 @@
}
if (characteristics->exists(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS)) {
+ ALOGV("%s: Erasing ANDROID_INFO_DEVICE_STATE_ORIENTATIONS for lens facing %d",
+ __FUNCTION__, lensFacing);
characteristics->erase(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS);
}
}
@@ -2662,8 +2659,8 @@
for (size_t i = 0; i < streamConfigs.count; i += 4) {
if ((streamConfigs.data.i32[i] == HAL_PIXEL_FORMAT_BLOB) && (streamConfigs.data.i32[i+3] ==
ANDROID_SCALER_AVAILABLE_STREAM_CONFIGURATIONS_OUTPUT)) {
- if (streamConfigs.data.i32[i+1] < thresholdW ||
- streamConfigs.data.i32[i+2] < thresholdH) {
+ if (streamConfigs.data.i32[i+1] * streamConfigs.data.i32[i+2] <
+ thresholdW * thresholdH) {
continue;
} else {
largeJpegCount ++;
@@ -2683,8 +2680,8 @@
mCameraCharacteristics.find(ANDROID_SCALER_AVAILABLE_MIN_FRAME_DURATIONS);
for (size_t i = 0; i < minDurations.count; i += 4) {
if (minDurations.data.i64[i] == HAL_PIXEL_FORMAT_BLOB) {
- if (minDurations.data.i64[i+1] < thresholdW ||
- minDurations.data.i64[i+2] < thresholdH) {
+ if ((int32_t)minDurations.data.i64[i+1] * (int32_t)minDurations.data.i64[i+2] <
+ thresholdW * thresholdH) {
continue;
} else {
largeJpegCount++;
@@ -2704,8 +2701,8 @@
mCameraCharacteristics.find(ANDROID_SCALER_AVAILABLE_STALL_DURATIONS);
for (size_t i = 0; i < stallDurations.count; i += 4) {
if (stallDurations.data.i64[i] == HAL_PIXEL_FORMAT_BLOB) {
- if (stallDurations.data.i64[i+1] < thresholdW ||
- stallDurations.data.i64[i+2] < thresholdH) {
+ if ((int32_t)stallDurations.data.i64[i+1] * (int32_t)stallDurations.data.i64[i+2] <
+ thresholdW * thresholdH) {
continue;
} else {
largeJpegCount++;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 28a150c..427d972 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -52,15 +52,16 @@
#include <android/hardware/camera/device/3.7/ICameraInjectionSession.h>
#include <android/hardware/camera2/ICameraDeviceUser.h>
-#include "utils/CameraTraces.h"
-#include "mediautils/SchedulingPolicyService.h"
-#include "device3/Camera3Device.h"
-#include "device3/Camera3OutputStream.h"
-#include "device3/Camera3InputStream.h"
-#include "device3/Camera3FakeStream.h"
-#include "device3/Camera3SharedOutputStream.h"
#include "CameraService.h"
+#include "aidl/AidlUtils.h"
+#include "device3/Camera3Device.h"
+#include "device3/Camera3FakeStream.h"
+#include "device3/Camera3InputStream.h"
+#include "device3/Camera3OutputStream.h"
+#include "device3/Camera3SharedOutputStream.h"
+#include "mediautils/SchedulingPolicyService.h"
#include "utils/CameraThreadState.h"
+#include "utils/CameraTraces.h"
#include "utils/SessionConfigurationUtils.h"
#include "utils/TraceHFR.h"
@@ -96,7 +97,8 @@
mLastTemplateId(-1),
mNeedFixupMonochromeTags(false),
mOverrideForPerfClass(overrideForPerfClass),
- mOverrideToPortrait(overrideToPortrait)
+ mOverrideToPortrait(overrideToPortrait),
+ mActivePhysicalId("")
{
ATRACE_CALL();
ALOGV("%s: Created device for camera %s", __FUNCTION__, mId.string());
@@ -229,7 +231,7 @@
mInjectionMethods = createCamera3DeviceInjectionMethods(this);
/** Start watchdog thread */
- mCameraServiceWatchdog = new CameraServiceWatchdog();
+ mCameraServiceWatchdog = new CameraServiceWatchdog(mId, mCameraServiceProxyWrapper);
res = mCameraServiceWatchdog->run("CameraServiceWatchdog");
if (res != OK) {
SET_ERR_L("Unable to start camera service watchdog thread: %s (%d)",
@@ -2364,6 +2366,9 @@
tryRemoveFakeStreamLocked();
}
+ // Override stream use case based on "adb shell command"
+ overrideStreamUseCaseLocked();
+
// Start configuring the streams
ALOGV("%s: Camera %s: Starting stream configuration", __FUNCTION__, mId.string());
@@ -2942,6 +2947,7 @@
mSupportCameraMute(supportCameraMute),
mOverrideToPortrait(overrideToPortrait) {
mStatusId = statusTracker->addComponent("RequestThread");
+ mVndkVersion = property_get_int32("ro.vndk.version", __ANDROID_API_FUTURE__);
}
Camera3Device::RequestThread::~RequestThread() {}
@@ -3736,6 +3742,17 @@
}
captureRequest->mRotationAndCropUpdated = true;
}
+
+ for (it = captureRequest->mSettingsList.begin();
+ it != captureRequest->mSettingsList.end(); it++) {
+ res = hardware::cameraservice::utils::conversion::aidl::filterVndkKeys(
+ mVndkVersion, it->metadata, false /*isStatic*/);
+ if (res != OK) {
+ SET_ERR("RequestThread: Failed during VNDK filter of capture requests "
+ "%d: %s (%d)", halRequest->frame_number, strerror(-res), res);
+ return INVALID_OPERATION;
+ }
+ }
}
}
@@ -4161,6 +4178,25 @@
return OK;
}
+void Camera3Device::setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides) {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+ mStreamUseCaseOverrides = useCaseOverrides;
+}
+
+void Camera3Device::clearStreamUseCaseOverrides() {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+ mStreamUseCaseOverrides.clear();
+}
+
+bool Camera3Device::hasDeviceError() {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+ return mStatus == STATUS_ERROR;
+}
+
void Camera3Device::RequestThread::cleanUpFailedRequests(bool sendRequestError) {
if (mNextRequests.empty()) {
return;
@@ -5315,4 +5351,56 @@
return mInjectionMethods->stopInjection();
}
+void Camera3Device::overrideStreamUseCaseLocked() {
+ if (mStreamUseCaseOverrides.size() == 0) {
+ return;
+ }
+
+ // Start from an array of indexes in mStreamUseCaseOverrides, and sort them
+ // based first on size, and second on formats of [JPEG, RAW, YUV, PRIV].
+ // Refer to CameraService::printHelp for details.
+ std::vector<int> outputStreamsIndices(mOutputStreams.size());
+ for (size_t i = 0; i < outputStreamsIndices.size(); i++) {
+ outputStreamsIndices[i] = i;
+ }
+
+ std::sort(outputStreamsIndices.begin(), outputStreamsIndices.end(),
+ [&](int a, int b) -> bool {
+
+ auto formatScore = [](int format) {
+ switch (format) {
+ case HAL_PIXEL_FORMAT_BLOB:
+ return 4;
+ case HAL_PIXEL_FORMAT_RAW16:
+ case HAL_PIXEL_FORMAT_RAW10:
+ case HAL_PIXEL_FORMAT_RAW12:
+ return 3;
+ case HAL_PIXEL_FORMAT_YCBCR_420_888:
+ return 2;
+ case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
+ return 1;
+ default:
+ return 0;
+ }
+ };
+
+ int sizeA = mOutputStreams[a]->getWidth() * mOutputStreams[a]->getHeight();
+ int sizeB = mOutputStreams[a]->getWidth() * mOutputStreams[a]->getHeight();
+ int formatAScore = formatScore(mOutputStreams[a]->getFormat());
+ int formatBScore = formatScore(mOutputStreams[b]->getFormat());
+ if (sizeA > sizeB ||
+ (sizeA == sizeB && formatAScore >= formatBScore)) {
+ return true;
+ } else {
+ return false;
+ }
+ });
+
+ size_t overlapSize = std::min(mStreamUseCaseOverrides.size(), mOutputStreams.size());
+ for (size_t i = 0; i < mOutputStreams.size(); i++) {
+ mOutputStreams[outputStreamsIndices[i]]->setStreamUseCase(
+ mStreamUseCaseOverrides[std::min(i, overlapSize-1)]);
+ }
+}
+
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index 990f556..3a46ee6 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -303,9 +303,19 @@
*/
status_t setCameraServiceWatchdog(bool enabled);
+ // Set stream use case overrides
+ void setStreamUseCaseOverrides(
+ const std::vector<int64_t>& useCaseOverrides);
+
+ // Clear stream use case overrides
+ void clearStreamUseCaseOverrides();
+
// Get the status trackeer for the camera device
wp<camera3::StatusTracker> getStatusTracker() { return mStatusTracker; }
+ // Whether the device is in error state
+ bool hasDeviceError();
+
/**
* The injection camera session to replace the internal camera
* session.
@@ -1050,7 +1060,7 @@
wp<NotificationListener> mListener;
- const String8& mId; // The camera ID
+ const String8 mId; // The camera ID
int mStatusId; // The RequestThread's component ID for
// status tracking
@@ -1124,6 +1134,7 @@
const bool mUseHalBufManager;
const bool mSupportCameraMute;
const bool mOverrideToPortrait;
+ int32_t mVndkVersion = -1;
};
virtual sp<RequestThread> createNewRequestThread(wp<Camera3Device> /*parent*/,
@@ -1406,6 +1417,9 @@
// app compatibility reasons.
bool mOverrideToPortrait;
+ // Current active physical id of the logical multi-camera, if any
+ std::string mActivePhysicalId;
+
// The current minimum expected frame duration based on AE_TARGET_FPS_RANGE
nsecs_t mMinExpectedDuration = 0;
// Whether the camera device runs at fixed frame rate based on AE_MODE and
@@ -1494,6 +1508,8 @@
sp<Camera3DeviceInjectionMethods> mInjectionMethods;
+ void overrideStreamUseCaseLocked();
+
}; // class Camera3Device
}; // namespace android
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.h b/services/camera/libcameraservice/device3/Camera3FakeStream.h
index a93d1da..1e9f478 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.h
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.h
@@ -101,6 +101,8 @@
virtual status_t setBatchSize(size_t batchSize) override;
virtual void onMinDurationChanged(nsecs_t /*duration*/, bool /*fixedFps*/) {}
+
+ virtual void setStreamUseCase(int64_t /*streamUseCase*/) {}
protected:
/**
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 4d6ab3d..2227232 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -1386,6 +1386,11 @@
mFixedFps = fixedFps;
}
+void Camera3OutputStream::setStreamUseCase(int64_t streamUseCase) {
+ Mutex::Autolock l(mLock);
+ camera_stream::use_case = streamUseCase;
+}
+
void Camera3OutputStream::returnPrefetchedBuffersLocked() {
std::vector<Surface::BatchBuffer> batchedBuffers;
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index a2f16d4..9a08485 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -259,6 +259,11 @@
virtual void onMinDurationChanged(nsecs_t duration, bool fixedFps) override;
/**
+ * Modify stream use case
+ */
+ virtual void setStreamUseCase(int64_t streamUseCase) override;
+
+ /**
* Apply ZSL related consumer usage quirk.
*/
static void applyZSLUsageQuirk(int format, uint64_t *consumerUsage /*inout*/);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
index dbc6fe1..4baa7e8 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStreamInterface.h
@@ -117,6 +117,11 @@
* AE_TARGET_FPS_RANGE in the capture request.
*/
virtual void onMinDurationChanged(nsecs_t duration, bool fixedFps) = 0;
+
+ /**
+ * Modify the stream use case for this output.
+ */
+ virtual void setStreamUseCase(int64_t streamUseCase) = 0;
};
// Helper class to organize a synchronized mapping of stream IDs to stream instances
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index a441638..738c314 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -623,27 +623,35 @@
if (result->partial_result != 0)
request.resultExtras.partialResultCount = result->partial_result;
- if ((result->result != nullptr) && !states.legacyClient && !states.overrideToPortrait) {
+ if (result->result != nullptr) {
camera_metadata_ro_entry entry;
auto ret = find_camera_metadata_ro_entry(result->result,
ANDROID_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID, &entry);
if ((ret == OK) && (entry.count > 0)) {
std::string physicalId(reinterpret_cast<const char *>(entry.data.u8));
- auto deviceInfo = states.physicalDeviceInfoMap.find(physicalId);
- if (deviceInfo != states.physicalDeviceInfoMap.end()) {
- auto orientation = deviceInfo->second.find(ANDROID_SENSOR_ORIENTATION);
- if (orientation.count > 0) {
- ret = CameraUtils::getRotationTransform(deviceInfo->second,
- OutputConfiguration::MIRROR_MODE_AUTO, &request.transform);
- if (ret != OK) {
- ALOGE("%s: Failed to calculate current stream transformation: %s (%d)",
- __FUNCTION__, strerror(-ret), ret);
+ if (!states.activePhysicalId.empty() && physicalId != states.activePhysicalId) {
+ states.listener->notifyPhysicalCameraChange(physicalId);
+ }
+ states.activePhysicalId = physicalId;
+
+ if (!states.legacyClient && !states.overrideToPortrait) {
+ auto deviceInfo = states.physicalDeviceInfoMap.find(physicalId);
+ if (deviceInfo != states.physicalDeviceInfoMap.end()) {
+ auto orientation = deviceInfo->second.find(ANDROID_SENSOR_ORIENTATION);
+ if (orientation.count > 0) {
+ ret = CameraUtils::getRotationTransform(deviceInfo->second,
+ OutputConfiguration::MIRROR_MODE_AUTO, &request.transform);
+ if (ret != OK) {
+ ALOGE("%s: Failed to calculate current stream transformation: %s "
+ "(%d)", __FUNCTION__, strerror(-ret), ret);
+ }
+ } else {
+ ALOGE("%s: Physical device orientation absent!", __FUNCTION__);
}
} else {
- ALOGE("%s: Physical device orientation absent!", __FUNCTION__);
+ ALOGE("%s: Physical device not found in device info map found!",
+ __FUNCTION__);
}
- } else {
- ALOGE("%s: Physical device not found in device info map found!", __FUNCTION__);
}
}
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index 019c8a8..d5328c5 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -108,6 +108,7 @@
nsecs_t& minFrameDuration;
bool& isFixedFps;
bool overrideToPortrait;
+ std::string &activePhysicalId;
};
void processCaptureResult(CaptureOutputStates& states, const camera_capture_result *result);
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
index 7eba57f..30f6d18 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3Device.cpp
@@ -379,7 +379,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
*this, *(mInterface), mLegacyClient, mMinExpectedDuration, mIsFixedFps,
- mOverrideToPortrait}, mResultMetadataQueue
+ mOverrideToPortrait, mActivePhysicalId}, mResultMetadataQueue
};
for (const auto& result : results) {
@@ -421,7 +421,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
*this, *(mInterface), mLegacyClient, mMinExpectedDuration, mIsFixedFps,
- mOverrideToPortrait}, mResultMetadataQueue
+ mOverrideToPortrait, mActivePhysicalId}, mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
index 9ce0622..4b1fb1d 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OfflineSession.cpp
@@ -111,6 +111,7 @@
listener = mListener.promote();
}
+ std::string activePhysicalId(""); // Unused
AidlCaptureOutputStates states {
{mId,
mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -125,7 +126,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
*this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
- /*overrideToPortrait*/false}, mResultMetadataQueue
+ /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -157,6 +158,7 @@
listener = mListener.promote();
}
+ std::string activePhysicalId(""); // Unused
AidlCaptureOutputStates states {
{mId,
mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -171,7 +173,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this,
*this, mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
- /*overrideToPortrait*/false}, mResultMetadataQueue
+ /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
index 44c60cf..382b287 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
@@ -365,8 +365,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait},
- mResultMetadataQueue
+ *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait,
+ mActivePhysicalId}, mResultMetadataQueue
};
//HidlCaptureOutputStates hidlStates {
@@ -428,8 +428,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait},
- mResultMetadataQueue
+ *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait,
+ mActivePhysicalId}, mResultMetadataQueue
};
for (const auto& result : results) {
@@ -476,8 +476,8 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait},
- mResultMetadataQueue
+ *mInterface, mLegacyClient, mMinExpectedDuration, mIsFixedFps, mOverrideToPortrait,
+ mActivePhysicalId}, mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
index c7f8fa1..0a6a6f7 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OfflineSession.cpp
@@ -92,6 +92,7 @@
listener = mListener.promote();
}
+ std::string activePhysicalId("");
HidlCaptureOutputStates states {
{mId,
mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -106,7 +107,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
- /*overrideToPortrait*/false}, mResultMetadataQueue
+ /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -133,6 +134,7 @@
hardware::hidl_vec<hardware::camera::device::V3_4::PhysicalCameraMetadata> noPhysMetadata;
+ std::string activePhysicalId("");
HidlCaptureOutputStates states {
{mId,
mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -147,7 +149,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
- /*overrideToPortrait*/false}, mResultMetadataQueue
+ /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -169,6 +171,7 @@
listener = mListener.promote();
}
+ std::string activePhysicalId("");
HidlCaptureOutputStates states {
{mId,
mOfflineReqsLock, mLastCompletedRegularFrameNumber,
@@ -183,7 +186,7 @@
mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
mBufferRecords, /*legacyClient*/ false, mMinExpectedDuration, mIsFixedFps,
- /*overrideToPortrait*/false}, mResultMetadataQueue
+ /*overrideToPortrait*/false, activePhysicalId}, mResultMetadataQueue
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
index bed576f..7aaf6b2 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
@@ -46,11 +46,13 @@
}
void CameraServiceProxyWrapper::CameraSessionStatsWrapper::onClose(
- sp<hardware::ICameraServiceProxy>& proxyBinder, int32_t latencyMs) {
+ sp<hardware::ICameraServiceProxy>& proxyBinder, int32_t latencyMs,
+ bool deviceError) {
Mutex::Autolock l(mLock);
mSessionStats.mNewCameraState = CameraSessionStats::CAMERA_STATE_CLOSED;
mSessionStats.mLatencyMs = latencyMs;
+ mSessionStats.mDeviceError = deviceError;
updateProxyDeviceState(proxyBinder);
}
@@ -259,7 +261,7 @@
sessionStats->onOpen(proxyBinder);
}
-void CameraServiceProxyWrapper::logClose(const String8& id, int32_t latencyMs) {
+void CameraServiceProxyWrapper::logClose(const String8& id, int32_t latencyMs, bool deviceError) {
std::shared_ptr<CameraSessionStatsWrapper> sessionStats;
{
Mutex::Autolock l(mLock);
@@ -275,13 +277,15 @@
__FUNCTION__, id.c_str());
return;
}
+
mSessionStatsMap.erase(id);
- ALOGV("%s: Erasing id %s", __FUNCTION__, id.c_str());
+ ALOGV("%s: Erasing id %s, deviceError %d", __FUNCTION__, id.c_str(), deviceError);
}
- ALOGV("%s: id %s, latencyMs %d", __FUNCTION__, id.c_str(), latencyMs);
+ ALOGV("%s: id %s, latencyMs %d, deviceError %d", __FUNCTION__,
+ id.c_str(), latencyMs, deviceError);
sp<hardware::ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
- sessionStats->onClose(proxyBinder, latencyMs);
+ sessionStats->onClose(proxyBinder, latencyMs, deviceError);
}
bool CameraServiceProxyWrapper::isCameraDisabled(int userId) {
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
index 0f77fc9..f90a841 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
@@ -54,7 +54,8 @@
{ }
void onOpen(sp<hardware::ICameraServiceProxy>& proxyBinder);
- void onClose(sp<hardware::ICameraServiceProxy>& proxyBinder, int32_t latencyMs);
+ void onClose(sp<hardware::ICameraServiceProxy>& proxyBinder, int32_t latencyMs,
+ bool deviceError);
void onStreamConfigured(int operatingMode, bool internalReconfig, int32_t latencyMs);
void onActive(sp<hardware::ICameraServiceProxy>& proxyBinder, float maxPreviewFps);
void onIdle(sp<hardware::ICameraServiceProxy>& proxyBinder,
@@ -83,7 +84,7 @@
int32_t latencyMs);
// Close
- void logClose(const String8& id, int32_t latencyMs);
+ void logClose(const String8& id, int32_t latencyMs, bool deviceError);
// Stream configuration
void logStreamConfigured(const String8& id, int operatingMode, bool internalReconfig,
diff --git a/services/mediacodec/Android.bp b/services/mediacodec/Android.bp
index 3222950..a2f17c2 100644
--- a/services/mediacodec/Android.bp
+++ b/services/mediacodec/Android.bp
@@ -147,6 +147,9 @@
arm64: {
src: "seccomp_policy/mediacodec-arm64.policy",
},
+ riscv64: {
+ enabled: false,
+ },
x86: {
src: "seccomp_policy/mediacodec-x86.policy",
},
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index e8d3f6e..c90488f 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -138,6 +138,7 @@
"AudioTypes.cpp",
"cleaner.cpp",
"iface_statsd.cpp",
+ "MediaDrmStatsdHelper.cpp",
"MediaMetricsService.cpp",
"statsd_audiopolicy.cpp",
"statsd_audiorecord.cpp",
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 7af6c41..948cee1 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -238,6 +238,9 @@
"sample_rate",
"content_type",
"sharing_requested",
+ "format_hardware",
+ "channel_count_hardware",
+ "sample_rate_hardware",
};
static constexpr const char * HeadTrackerDeviceEnabledFields[] {
@@ -1360,6 +1363,19 @@
const auto sharingModeRequested =
types::lookup<types::AAUDIO_SHARING_MODE, int32_t>(sharingModeRequestedStr);
+ std::string formatHardwareStr;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_ENCODINGHARDWARE, &formatHardwareStr);
+ const auto formatHardware = types::lookup<types::ENCODING, int32_t>(formatHardwareStr);
+
+ int32_t channelCountHardware = -1;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_CHANNELCOUNTHARDWARE, &channelCountHardware);
+
+ int32_t sampleRateHardware = 0;
+ mAudioAnalytics.mAnalyticsState->timeMachine().get(
+ key, AMEDIAMETRICS_PROP_SAMPLERATEHARDWARE, &sampleRateHardware);
+
LOG(LOG_LEVEL) << "key:" << key
<< " path:" << path
<< " direction:" << direction << "(" << directionStr << ")"
@@ -1379,7 +1395,10 @@
<< " sample_rate: " << sampleRate
<< " content_type: " << contentType << "(" << contentTypeStr << ")"
<< " sharing_requested:" << sharingModeRequested
- << "(" << sharingModeRequestedStr << ")";
+ << "(" << sharingModeRequestedStr << ")"
+ << " format_hardware:" << formatHardware << "(" << formatHardwareStr << ")"
+ << " channel_count_hardware:" << channelCountHardware
+ << " sample_rate_hardware: " << sampleRateHardware;
if (mAudioAnalytics.mDeliverStatistics) {
const stats::media_metrics::BytesField bf_serialized(
@@ -1404,6 +1423,9 @@
, sampleRate
, contentType
, sharingModeRequested
+ , formatHardware
+ , channelCountHardware
+ , sampleRateHardware
);
std::stringstream ss;
ss << "result:" << result;
@@ -1427,6 +1449,9 @@
, sampleRate
, contentType
, sharingModeRequested
+ , formatHardware
+ , channelCountHardware
+ , sampleRateHardware
);
ss << " " << fieldsStr;
std::string str = ss.str();
diff --git a/services/mediametrics/MediaDrmStatsdHelper.cpp b/services/mediametrics/MediaDrmStatsdHelper.cpp
new file mode 100644
index 0000000..d762672
--- /dev/null
+++ b/services/mediametrics/MediaDrmStatsdHelper.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "MediaDrmStatsdHelper.h"
+#include <tuple>
+#include <map>
+#include <unordered_map>
+
+namespace {
+
+struct UUID {
+ uint64_t msb, lsb; // NOLINT(misc-non-private-member-variables-in-classes)
+ bool operator < (const UUID& that) const {
+ return std::tie(msb, lsb) < std::tie(that.msb, that.lsb);
+ }
+};
+
+// KEEP IN SYNC WITH frameworks/proto_logging/stats/enums/media/drm/enums.proto
+std::map<UUID, int32_t> const kUuidSchemeEnumMap {
+ {{.msb = 0x6DD8B3C345F44A68, .lsb = 0xBF3A64168D01A4A6}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__ABV_MODRM }, // ABV_MODRM
+ {{.msb = 0xF239E769EFA34850, .lsb = 0x9C16A903C6932EFB}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__ADOBE_PRIMETIME }, // ADOBE_PRIMETIME
+ {{.msb = 0x616C746963617374, .lsb = 0x2D50726F74656374}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__ALTICAST }, // ALTICAST
+ {{.msb = 0x94CE86FB07FF4F43, .lsb = 0xADB893D2FA968CA2}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__APPLE_FAIRPLAY }, // APPLE_FAIRPLAY
+ {{.msb = 0x279FE473512C48FE, .lsb = 0xADE8D176FEE6B40F}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__ARRIS_TITANIUM }, // ARRIS_TITANIUM
+ {{.msb = 0x3D5E6D359B9A41E8, .lsb = 0xB843DD3C6E72C42C}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__CHINADRM }, // CHINADRM
+ {{.msb = 0x3EA8778F77424BF9, .lsb = 0xB18BE834B2ACBD47}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__CLEAR_KEY_AES_128 }, // CLEAR_KEY_AES_128
+ {{.msb = 0xBE58615B19C44684, .lsb = 0x88B3C8C57E99E957}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__CLEAR_KEY_SAMPLE_AES }, // CLEAR_KEY_SAMPLE_AES
+ {{.msb = 0xE2719D58A985B3C9, .lsb = 0x781AB030AF78D30E}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__CLEAR_KEY_DASH_IF }, // CLEAR_KEY_DASH_IF
+ {{.msb = 0x644FE7B5260F4FAD, .lsb = 0x949A0762FFB054B4}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__CMLA_OMA }, // CMLA_OMA
+ {{.msb = 0x37C332587B994C7E, .lsb = 0xB15D19AF74482154}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__COMMSCOPE_TITANIUM }, // COMMSCOPE_TITANIUM
+ {{.msb = 0x45D481CB8FE049C0, .lsb = 0xADA9AB2D2455B2F2}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__CORECRYPT }, // CORECRYPT
+ {{.msb = 0xDCF4E3E362F15818, .lsb = 0x7BA60A6FE33FF3DD}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__DIGICAP_SMARTXESS }, // DIGICAP_SMARTXESS
+ {{.msb = 0x35BF197B530E42D7, .lsb = 0x8B651B4BF415070F}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__DIVX }, // DIVX
+ {{.msb = 0x80A6BE7E14484C37, .lsb = 0x9E70D5AEBE04C8D2}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__IRDETO }, // IRDETO
+ {{.msb = 0x5E629AF538DA4063, .lsb = 0x897797FFBD9902D4}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__MARLIN }, // MARLIN
+ {{.msb = 0x9A04F07998404286, .lsb = 0xAB92E65BE0885F95}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__MICROSOFT_PLAYREADY }, // MICROSOFT_PLAYREADY
+ {{.msb = 0x6A99532D869F5922, .lsb = 0x9A91113AB7B1E2F3}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__MOBITV }, // MOBITV
+ {{.msb = 0xADB41C242DBF4A6D, .lsb = 0x958B4457C0D27B95}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__NAGRA_MEDIAACCESS }, // NAGRA_MEDIAACCESS
+ {{.msb = 0x1F83E1E86EE94F0D, .lsb = 0xBA2F5EC4E3ED1A66}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__SECUREMEDIA }, // SECUREMEDIA
+ {{.msb = 0x992C46E6C4374899, .lsb = 0xB6A050FA91AD0E39}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__SECUREMEDIA_STEELKNOT }, // SECUREMEDIA_STEELKNOT
+ {{.msb = 0xA68129D3575B4F1A, .lsb = 0x9CBA3223846CF7C3}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__SYNAMEDIA_VIDEOGUARD }, // SYNAMEDIA_VIDEOGUARD
+ {{.msb = 0xAA11967FCC014A4A, .lsb = 0x8E99C5D3DDDFEA2D}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__UNITEND_UDRM }, // UNITEND_UDRM
+ {{.msb = 0x9A27DD82FDE24725, .lsb = 0x8CBC4234AA06EC09}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__VERIMATRIX_VCAS }, // VERIMATRIX_VCAS
+ {{.msb = 0xB4413586C58CFFB0, .lsb = 0x94A5D4896C1AF6C3}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__VIACCESS_ORCA }, // VIACCESS_ORCA
+ {{.msb = 0x793B79569F944946, .lsb = 0xA94223E7EF7E44B4}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__VISIONCRYPT }, // VISIONCRYPT
+ {{.msb = 0x1077EFECC0B24D02, .lsb = 0xACE33C1E52E2FB4B}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__W3C_COMMON }, // W3C_COMMON
+ {{.msb = 0xEDEF8BA979D64ACE, .lsb = 0xA3C827DCD51D21ED}, android::stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__WIDEVINE }, // WIDEVINE
+};
+
+// KEEP IN SYNC WITH frameworks/av/drm/libmediadrm/include/mediadrm/IDrm.h
+// KEEP IN SYNC WITH frameworks/proto_logging/stats/enums/media/drm/enums.proto
+std::unordered_map<std::string, int32_t> const kDrmApiEnumMap {
+ {"initCheck" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_INIT_CHECK },
+ {"isCryptoSchemeSupported" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_IS_CRYPTO_SCHEME_SUPPORTED },
+ {"createPlugin" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_CREATE_PLUGIN },
+ {"destroyPlugin" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_DESTROY_PLUGIN },
+ {"openSession" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_OPEN_SESSION },
+ {"closeSession" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_CLOSE_SESSION },
+ {"getKeyRequest" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_KEY_REQUEST },
+ {"provideKeyResponse" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_PROVIDE_KEY_RESPONSE },
+ {"removeKeys" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_REMOVE_KEYS },
+ {"restoreKeys" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_RESTORE_KEYS },
+ {"queryKeyStatus" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_QUERY_KEY_STATUS },
+ {"getProvisionRequest" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_PROVISION_REQUEST },
+ {"provideProvisionResponse" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_PROVIDE_PROVISION_RESPONSE },
+ {"getSecureStops" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_SECURE_STOPS },
+ {"getSecureStopIds" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_SECURE_STOP_IDS },
+ {"getSecureStop" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_SECURE_STOP },
+ {"releaseSecureStops" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_RELEASE_SECURE_STOPS },
+ {"removeSecureStop" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_REMOVE_SECURE_STOP },
+ {"removeAllSecureStops" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_REMOVE_ALL_SECURE_STOPS },
+ {"getHdcpLevels" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_HDCP_LEVELS },
+ {"getNumberOfSessions" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_NUMBER_OF_SESSIONS },
+ {"getSecurityLevel" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_SECURITY_LEVEL },
+ {"getOfflineLicenseKeySetIds" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_OFFLINE_LICENSE_KEY_SET_IDS },
+ {"removeOfflineLicense" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_REMOVE_OFFLINE_LICENSE },
+ {"getOfflineLicenseState" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_OFFLINE_LICENSE_STATE },
+ {"getPropertyString" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_PROPERTY_STRING },
+ {"getPropertyByteArray" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_PROPERTY_BYTE_ARRAY },
+ {"setPropertyString" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_SET_PROPERTY_STRING },
+ {"setPropertyByteArray" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_SET_PROPERTY_BYTE_ARRAY },
+ {"getMetrics" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_METRICS },
+ {"setCipherAlgorithm" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_SET_CIPHER_ALGORITHM },
+ {"setMacAlgorithm" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_SET_MAC_ALGORITHM },
+ {"encrypt" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GENERIC_ENCRYPT },
+ {"decrypt" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GENERIC_DECRYPT },
+ {"sign" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GENERIC_SIGN },
+ {"verify" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GENERIC_VERIFY },
+ {"signRSA" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_SIGN_RSA },
+ {"setListener" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_SET_LISTENER },
+ {"requiresSecureDecoder" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_REQUIRES_SECURE_DECODER },
+ {"requiresSecureDecoderLevel" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_REQUIRES_SECURE_DECODER_LEVEL },
+ {"setPlaybackId" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_SET_PLAYBACK_ID },
+ {"getLogMessages" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_LOG_MESSAGES },
+ {"getSupportedSchemes" , android::stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_GET_SUPPORTED_SCHEMES },
+};
+
+} // anonymous namespace
+
+namespace android {
+
+int32_t MediaDrmStatsdHelper::findDrmScheme(const int64_t msb, const int64_t lsb) {
+ auto it = kUuidSchemeEnumMap.find({.msb = static_cast<uint64_t>(msb), .lsb = static_cast<uint64_t>(lsb)});
+ if (it == kUuidSchemeEnumMap.end()) return stats::media_metrics::MEDIA_DRM_SESSION_OPENED__SCHEME__DRM_SCHEME_OTHER;
+ return it->second;
+}
+
+int32_t MediaDrmStatsdHelper::findDrmApi(const std::string& api) {
+ auto it = kDrmApiEnumMap.find(api);
+ if (it == kDrmApiEnumMap.end()) return stats::media_metrics::MEDIA_DRM_ERRORED__API__DRM_API_UNKNOWN;
+ return it->second;
+}
+
+} // namespace android
\ No newline at end of file
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index b4de4f4..adb2217 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -511,6 +511,8 @@
const std::string &key = item->getKey();
if (startsWith(key, "audio.")) return true;
if (startsWith(key, "drm.vendor.")) return true;
+ if (startsWith(key, "mediadrm.")) return true;
+
// the list of allowedKey uses statsd_handlers
// in iface_statsd.cpp as reference
// drmmanager is from a trusted uid, therefore not needed here
diff --git a/services/mediametrics/iface_statsd.cpp b/services/mediametrics/iface_statsd.cpp
index 8a48ce5..7f4e6e8 100644
--- a/services/mediametrics/iface_statsd.cpp
+++ b/services/mediametrics/iface_statsd.cpp
@@ -83,13 +83,13 @@
{ "drmmanager", statsd_drmmanager },
{ "extractor", statsd_extractor },
{ "mediadrm", statsd_mediadrm },
+ { "mediadrm.created", statsd_mediadrm_created },
+ { "mediadrm.errored", statsd_mediadrm_errored },
+ { "mediadrm.session_opened", statsd_mediadrm_session_opened },
{ "mediaparser", statsd_mediaparser },
{ "nuplayer", statsd_nuplayer },
{ "nuplayer2", statsd_nuplayer },
{ "recorder", statsd_recorder },
- { "media_drm_created", statsd_media_drm_created },
- { "media_drm_session_opened", statsd_media_drm_session_opened },
- { "media_drm_errored", statsd_media_drm_errored },
};
return dump2StatsdInternal(statsd_pushers, item, statsdLog);
}
diff --git a/services/mediametrics/include/mediametricsservice/MediaDrmStatsdHelper.h b/services/mediametrics/include/mediametricsservice/MediaDrmStatsdHelper.h
new file mode 100644
index 0000000..2e5e7ff
--- /dev/null
+++ b/services/mediametrics/include/mediametricsservice/MediaDrmStatsdHelper.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2023 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef MEDIA_DRM_STATSD_HELPER_H
+#define MEDIA_DRM_STATSD_HELPER_H
+
+#include <cstdint>
+#include <stats_media_metrics.h>
+#include <string>
+namespace android {
+
+class MediaDrmStatsdHelper {
+public:
+ static int32_t findDrmScheme(const int64_t msb, const int64_t lsb);
+ static int32_t findDrmApi(const std::string& api);
+};
+
+} // namespace android
+#endif // MEDIA_DRM_STATSD_HELPER_H
\ No newline at end of file
diff --git a/services/mediametrics/include/mediametricsservice/iface_statsd.h b/services/mediametrics/include/mediametricsservice/iface_statsd.h
index a97a386..5bc293b 100644
--- a/services/mediametrics/include/mediametricsservice/iface_statsd.h
+++ b/services/mediametrics/include/mediametricsservice/iface_statsd.h
@@ -30,16 +30,15 @@
extern statsd_pusher statsd_audiothread;
extern statsd_pusher statsd_audiotrack;
extern statsd_pusher statsd_codec;
+extern statsd_pusher statsd_drmmanager;
extern statsd_pusher statsd_extractor;
+extern statsd_pusher statsd_mediadrm;
+extern statsd_pusher statsd_mediadrm_created;
+extern statsd_pusher statsd_mediadrm_errored;
+extern statsd_pusher statsd_mediadrm_session_opened;
extern statsd_pusher statsd_mediaparser;
-extern statsd_pusher statsd_media_drm_created;
-extern statsd_pusher statsd_media_drm_session_opened;
-extern statsd_pusher statsd_media_drm_errored;
-
extern statsd_pusher statsd_nuplayer;
extern statsd_pusher statsd_recorder;
-extern statsd_pusher statsd_mediadrm;
-extern statsd_pusher statsd_drmmanager;
using statsd_puller = bool (const std::shared_ptr<const mediametrics::Item>& item,
AStatsEventList *, const std::shared_ptr<mediametrics::StatsdLog>& statsdLog);
diff --git a/services/mediametrics/statsd_drm.cpp b/services/mediametrics/statsd_drm.cpp
index 1008531..863fdbe 100644
--- a/services/mediametrics/statsd_drm.cpp
+++ b/services/mediametrics/statsd_drm.cpp
@@ -18,8 +18,9 @@
#define LOG_TAG "statsd_drm"
#include <utils/Log.h>
#include <media/stagefright/foundation/base64.h>
+#include <binder/IPCThreadState.h>
-#include <stdint.h>
+#include <cstdint>
#include <inttypes.h>
#include <sys/types.h>
#include <sys/stat.h>
@@ -32,6 +33,7 @@
#include <pwd.h>
#include "MediaMetricsService.h"
+#include "MediaDrmStatsdHelper.h"
#include "StringUtils.h"
#include "iface_statsd.h"
@@ -233,66 +235,76 @@
return true;
}
-bool statsd_media_drm_created(const std::shared_ptr<const mediametrics::Item>& item,
+bool statsd_mediadrm_created(const std::shared_ptr<const mediametrics::Item>& item,
const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
int64_t uuid_lsb = -1;
if (!item->getInt64("uuid_lsb", &uuid_lsb)) return false;
int64_t uuid_msb = -1;
if (!item->getInt64("uuid_msb", &uuid_msb)) return false;
- int64_t object_nonce_lsb = -1;
- if (!item->getInt64("object_nonce_lsb", &object_nonce_lsb)) return false;
- int64_t object_nonce_msb = -1;
- if (!item->getInt64("object_nonce_msb", &object_nonce_msb)) return false;
+ const int32_t scheme = MediaDrmStatsdHelper::findDrmScheme(uuid_msb, uuid_lsb);
+ const int32_t uid = IPCThreadState::self()->getCallingUid();
+ int32_t frontend = 0;
+ if (!item->getInt32("frontend", &frontend)) return false;
+
+ // Optional to be included
int64_t apex_version = -1;
item->getInt64("apex_version", &apex_version);
- const int result = stats_write(
- stats::media_metrics::MEDIA_DRM_CREATED,
- uuid_lsb, uuid_msb, object_nonce_lsb,
- object_nonce_msb, apex_version);
+ const int result = stats_write(stats::media_metrics::MEDIA_DRM_CREATED,
+ scheme, uuid_lsb, uuid_msb, uid, frontend, apex_version);
std::stringstream log;
log << "result:" << result << " {"
<< " media_drm_created:"
<< stats::media_metrics::MEDIA_DRM_CREATED
+ << " scheme:" << scheme
<< " uuid_lsb:" << uuid_lsb
<< " uuid_msb:" << uuid_msb
- << " object_nonce_lsb:" << object_nonce_lsb
- << " object_nonce_msb:" << object_nonce_msb
+ << " uid:" << uid
+ << " frontend:" << frontend
<< " apex_version:" << apex_version
<< " }";
statsdLog->log(stats::media_metrics::MEDIA_DRM_CREATED, log.str());
return true;
}
-bool statsd_media_drm_session_opened(const std::shared_ptr<const mediametrics::Item>& item,
+bool statsd_mediadrm_session_opened(const std::shared_ptr<const mediametrics::Item>& item,
const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
- int64_t object_nonce_lsb = -1;
- if (!item->getInt64("object_nonce_lsb", &object_nonce_lsb)) return false;
- int64_t object_nonce_msb = -1;
- if (!item->getInt64("object_nonce_msb", &object_nonce_msb)) return false;
- int64_t session_nonce_lsb = -1;
- if (!item->getInt64("session_nonce_lsb", &session_nonce_lsb)) return false;
- int64_t session_nonce_msb = -1;
- if (!item->getInt64("session_nonce_msb", &session_nonce_msb)) return false;
+ int64_t uuid_lsb = -1;
+ if (!item->getInt64("uuid_lsb", &uuid_lsb)) return false;
+ int64_t uuid_msb = -1;
+ if (!item->getInt64("uuid_msb", &uuid_msb)) return false;
+ const int32_t scheme = MediaDrmStatsdHelper::findDrmScheme(uuid_msb, uuid_lsb);
+ std::string object_nonce = "";
+ if (!item->getString("object_nonce", &object_nonce)) return false;
+ const int32_t uid = IPCThreadState::self()->getCallingUid();
+ int32_t frontend = 0;
+ if (!item->getInt32("frontend", &frontend)) return false;
int32_t requested_security_level = -1;
if (!item->getInt32("requested_security_level", &requested_security_level)) return false;
int32_t opened_security_level = -1;
if (!item->getInt32("opened_security_level", &opened_security_level)) return false;
- const int result = stats_write(
- stats::media_metrics::MEDIA_DRM_SESSION_OPENED, object_nonce_lsb,
- object_nonce_msb, session_nonce_lsb, session_nonce_msb,
- requested_security_level, opened_security_level);
+
+ // Optional to be included
+ int64_t apex_version = -1;
+ item->getInt64("apex_version", &apex_version);
+ const int result = stats_write(stats::media_metrics::MEDIA_DRM_SESSION_OPENED,
+ scheme, uuid_lsb, uuid_msb, uid, frontend, apex_version,
+ object_nonce.c_str(), requested_security_level,
+ opened_security_level);
std::stringstream log;
log << "result:" << result << " {"
<< " media_drm_session_opened:"
<< stats::media_metrics::MEDIA_DRM_SESSION_OPENED
- << " object_nonce_lsb:" << object_nonce_lsb
- << " object_nonce_msb:" << object_nonce_msb
- << " session_nonce_lsb:" << session_nonce_lsb
- << " session_nonce_msb:" << session_nonce_msb
+ << " scheme:" << scheme
+ << " uuid_lsb:" << uuid_lsb
+ << " uuid_msb:" << uuid_msb
+ << " uid:" << uid
+ << " frontend:" << frontend
+ << " apex_version:" << apex_version
+ << " object_nonce:" << object_nonce
<< " requested_security_level:" << requested_security_level
<< " opened_security_level:" << opened_security_level
<< " }";
@@ -300,42 +312,63 @@
return true;
}
-bool statsd_media_drm_errored(const std::shared_ptr<const mediametrics::Item>& item,
+bool statsd_mediadrm_errored(const std::shared_ptr<const mediametrics::Item>& item,
const std::shared_ptr<mediametrics::StatsdLog>& statsdLog)
{
- int64_t object_nonce_lsb = -1;
- if (!item->getInt64("object_nonce_lsb", &object_nonce_lsb)) return false;
- int64_t object_nonce_msb = -1;
- if (!item->getInt64("object_nonce_msb", &object_nonce_msb)) return false;
- int64_t session_nonce_lsb = 0;
- item->getInt64("session_nonce_lsb", &session_nonce_lsb);
- int64_t session_nonce_msb = 0;
- item->getInt64("session_nonce_msb", &session_nonce_msb);
- int32_t api = -1;
- if (!item->getInt32("api", &api)) return false;
+ int64_t uuid_lsb = -1;
+ if (!item->getInt64("uuid_lsb", &uuid_lsb)) return false;
+ int64_t uuid_msb = -1;
+ if (!item->getInt64("uuid_msb", &uuid_msb)) return false;
+ const int32_t scheme = MediaDrmStatsdHelper::findDrmScheme(uuid_msb, uuid_lsb);
+ const int32_t uid = IPCThreadState::self()->getCallingUid();
+ int32_t frontend = 0;
+ if (!item->getInt32("frontend", &frontend)) return false;
+ std::string object_nonce = "";
+ if (!item->getString("object_nonce", &object_nonce)) return false;
+ int32_t security_level = -1;
+ if (!item->getInt32("security_level", &security_level)) return false;
+ std::string api_str = "";
+ if (!item->getString("api", &api_str)) return false;
+ const int32_t api = MediaDrmStatsdHelper::findDrmApi(api_str);
int32_t error_code = -1;
if (!item->getInt32("error_code", &error_code)) return false;
+
+ // Optional to be included
+ int64_t apex_version = -1;
+ item->getInt64("apex_version", &apex_version);
+ std::string session_nonce = "";
+ item->getString("session_nonce", &session_nonce);
+
int32_t cdm_err = 0;
item->getInt32("cdm_err", &cdm_err);
int32_t oem_err = 0;
item->getInt32("oem_err", &oem_err);
- const int result = stats_write(
- stats::media_metrics::MEDIA_DRM_ERRORED, object_nonce_lsb,
- object_nonce_msb, session_nonce_lsb, session_nonce_msb,
- api, error_code, cdm_err, oem_err);
+ int32_t error_context = -1;
+ item->getInt32("error_context", &error_context);
+
+ const int result = stats_write(stats::media_metrics::MEDIA_DRM_ERRORED, scheme, uuid_lsb,
+ uuid_msb, uid, frontend, apex_version, object_nonce.c_str(),
+ session_nonce.c_str(), security_level, api, error_code, cdm_err,
+ oem_err, error_context);
std::stringstream log;
log << "result:" << result << " {"
<< " media_drm_errored:"
<< stats::media_metrics::MEDIA_DRM_ERRORED
- << " object_nonce_lsb:" << object_nonce_lsb
- << " object_nonce_msb:" << object_nonce_msb
- << " session_nonce_lsb:" << session_nonce_lsb
- << " session_nonce_msb:" << session_nonce_msb
+ << " scheme:" << scheme
+ << " uuid_lsb:" << uuid_lsb
+ << " uuid_msb:" << uuid_msb
+ << " uid:" << uid
+ << " frontend:" << frontend
+ << " apex_version:" << apex_version
+ << " object_nonce:" << object_nonce
+ << " session_nonce:" << session_nonce
+ << " security_level:" << security_level
<< " api:" << api
<< " error_code:" << error_code
<< " cdm_err:" << cdm_err
<< " oem_err:" << oem_err
+ << " error_context:" << error_context
<< " }";
statsdLog->log(stats::media_metrics::MEDIA_DRM_ERRORED, log.str());
return true;
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index adf0a5e..5697acd 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -344,7 +344,8 @@
std::shared_ptr<ResourceManagerService> service =
::ndk::SharedRefBase::make<ResourceManagerService>();
binder_status_t status =
- AServiceManager_addService(service->asBinder().get(), getServiceName());
+ AServiceManager_addServiceWithAllowIsolated(
+ service->asBinder().get(), getServiceName(), /*allowIsolated=*/ true);
if (status != STATUS_OK) {
return;
}
diff --git a/services/mediaresourcemanager/ResourceObserverService.cpp b/services/mediaresourcemanager/ResourceObserverService.cpp
index 4e97406..b64afdc 100644
--- a/services/mediaresourcemanager/ResourceObserverService.cpp
+++ b/services/mediaresourcemanager/ResourceObserverService.cpp
@@ -100,8 +100,10 @@
std::shared_ptr<ResourceObserverService> ResourceObserverService::instantiate() {
std::shared_ptr<ResourceObserverService> observerService =
::ndk::SharedRefBase::make<ResourceObserverService>();
- binder_status_t status = AServiceManager_addService(observerService->asBinder().get(),
- ResourceObserverService::getServiceName());
+ binder_status_t status = AServiceManager_addServiceWithAllowIsolated(
+ observerService->asBinder().get(),ResourceObserverService::getServiceName(),
+ /*allowIsolated=*/ true);
+
if (status != STATUS_OK) {
return nullptr;
}
diff --git a/services/oboeservice/AAudioClientTracker.cpp b/services/oboeservice/AAudioClientTracker.cpp
index 054a896..c0dac11 100644
--- a/services/oboeservice/AAudioClientTracker.cpp
+++ b/services/oboeservice/AAudioClientTracker.cpp
@@ -73,13 +73,13 @@
return AAUDIO_ERROR_NULL;
}
- std::lock_guard<std::mutex> lock(mLock);
+ const std::lock_guard<std::mutex> lock(mLock);
if (mNotificationClients.count(pid) == 0) {
- sp<IBinder> binder = IInterface::asBinder(client);
- sp<NotificationClient> notificationClient = new NotificationClient(pid, binder);
+ const sp<IBinder> binder = IInterface::asBinder(client);
+ const sp<NotificationClient> notificationClient = new NotificationClient(pid, binder);
mNotificationClients[pid] = notificationClient;
- status_t status = binder->linkToDeath(notificationClient);
+ const status_t status = binder->linkToDeath(notificationClient);
ALOGW_IF(status != NO_ERROR, "registerClient() linkToDeath = %d\n", status);
return AAudioConvert_androidToAAudioResult(status);
} else {
@@ -90,12 +90,12 @@
void AAudioClientTracker::unregisterClient(pid_t pid) {
ALOGV("unregisterClient(), calling pid = %d, getpid() = %d\n", pid, getpid());
- std::lock_guard<std::mutex> lock(mLock);
+ const std::lock_guard<std::mutex> lock(mLock);
mNotificationClients.erase(pid);
}
int32_t AAudioClientTracker::getStreamCount(pid_t pid) {
- std::lock_guard<std::mutex> lock(mLock);
+ const std::lock_guard<std::mutex> lock(mLock);
auto it = mNotificationClients.find(pid);
if (it != mNotificationClients.end()) {
return it->second->getStreamCount();
@@ -105,18 +105,19 @@
}
aaudio_result_t
-AAudioClientTracker::registerClientStream(pid_t pid, sp<AAudioServiceStreamBase> serviceStream) {
+AAudioClientTracker::registerClientStream(
+ pid_t pid, const sp<AAudioServiceStreamBase>& serviceStream) {
ALOGV("registerClientStream(%d,)\n", pid);
- std::lock_guard<std::mutex> lock(mLock);
+ const std::lock_guard<std::mutex> lock(mLock);
return getNotificationClient_l(pid)->registerClientStream(serviceStream);
}
// Find the tracker for this process and remove it.
aaudio_result_t
AAudioClientTracker::unregisterClientStream(pid_t pid,
- sp<AAudioServiceStreamBase> serviceStream) {
+ const sp<AAudioServiceStreamBase>& serviceStream) {
ALOGV("unregisterClientStream(%d,)\n", pid);
- std::lock_guard<std::mutex> lock(mLock);
+ const std::lock_guard<std::mutex> lock(mLock);
auto it = mNotificationClients.find(pid);
if (it != mNotificationClients.end()) {
ALOGV("unregisterClientStream(%d,) found NotificationClient\n", pid);
@@ -129,12 +130,12 @@
void AAudioClientTracker::setExclusiveEnabled(pid_t pid, bool enabled) {
ALOGD("%s(%d, %d)\n", __func__, pid, enabled);
- std::lock_guard<std::mutex> lock(mLock);
+ const std::lock_guard<std::mutex> lock(mLock);
getNotificationClient_l(pid)->setExclusiveEnabled(enabled);
}
bool AAudioClientTracker::isExclusiveEnabled(pid_t pid) {
- std::lock_guard<std::mutex> lock(mLock);
+ const std::lock_guard<std::mutex> lock(mLock);
return getNotificationClient_l(pid)->isExclusiveEnabled();
}
@@ -158,24 +159,21 @@
: mProcessId(pid), mBinder(binder) {
}
-AAudioClientTracker::NotificationClient::~NotificationClient() {
-}
-
int32_t AAudioClientTracker::NotificationClient::getStreamCount() {
- std::lock_guard<std::mutex> lock(mLock);
+ const std::lock_guard<std::mutex> lock(mLock);
return mStreams.size();
}
aaudio_result_t AAudioClientTracker::NotificationClient::registerClientStream(
- sp<AAudioServiceStreamBase> serviceStream) {
- std::lock_guard<std::mutex> lock(mLock);
+ const sp<AAudioServiceStreamBase>& serviceStream) {
+ const std::lock_guard<std::mutex> lock(mLock);
mStreams.insert(serviceStream);
return AAUDIO_OK;
}
aaudio_result_t AAudioClientTracker::NotificationClient::unregisterClientStream(
- sp<AAudioServiceStreamBase> serviceStream) {
- std::lock_guard<std::mutex> lock(mLock);
+ const sp<AAudioServiceStreamBase>& serviceStream) {
+ const std::lock_guard<std::mutex> lock(mLock);
mStreams.erase(serviceStream);
return AAUDIO_OK;
}
@@ -189,20 +187,20 @@
std::set<sp<AAudioServiceStreamBase>> streamsToClose;
{
- std::lock_guard<std::mutex> lock(mLock);
+ const std::lock_guard<std::mutex> lock(mLock);
for (const auto& serviceStream : mStreams) {
streamsToClose.insert(serviceStream);
}
}
for (const auto& serviceStream : streamsToClose) {
- aaudio_handle_t handle = serviceStream->getHandle();
+ const aaudio_handle_t handle = serviceStream->getHandle();
ALOGW("binderDied() close abandoned stream 0x%08X\n", handle);
aaudioService->asAAudioServiceInterface().closeStream(handle);
}
// mStreams should be empty now
}
- sp<NotificationClient> keep(this);
+ const sp<NotificationClient> keep(this);
AAudioClientTracker::getInstance().unregisterClient(mProcessId);
}
diff --git a/services/oboeservice/AAudioClientTracker.h b/services/oboeservice/AAudioClientTracker.h
index 2b38621..cd3b75a 100644
--- a/services/oboeservice/AAudioClientTracker.h
+++ b/services/oboeservice/AAudioClientTracker.h
@@ -54,10 +54,10 @@
int32_t getStreamCount(pid_t pid);
aaudio_result_t registerClientStream(pid_t pid,
- android::sp<AAudioServiceStreamBase> serviceStream);
+ const android::sp<AAudioServiceStreamBase>& serviceStream);
- aaudio_result_t unregisterClientStream(pid_t pid,
- android::sp<AAudioServiceStreamBase> serviceStream);
+ aaudio_result_t unregisterClientStream(
+ pid_t pid, const android::sp<AAudioServiceStreamBase>& serviceStream);
/**
* Specify whether a process is allowed to create an EXCLUSIVE MMAP stream.
@@ -84,15 +84,17 @@
class NotificationClient : public IBinder::DeathRecipient {
public:
NotificationClient(pid_t pid, const android::sp<IBinder>& binder);
- virtual ~NotificationClient();
+ ~NotificationClient() override = default;
int32_t getStreamCount();
std::string dump() const;
- aaudio_result_t registerClientStream(android::sp<AAudioServiceStreamBase> serviceStream);
+ aaudio_result_t registerClientStream(
+ const android::sp<AAudioServiceStreamBase>& serviceStream);
- aaudio_result_t unregisterClientStream(android::sp<AAudioServiceStreamBase> serviceStream);
+ aaudio_result_t unregisterClientStream(
+ const android::sp<AAudioServiceStreamBase>& serviceStream);
void setExclusiveEnabled(bool enabled) {
mExclusiveEnabled = enabled;
@@ -103,7 +105,7 @@
}
// IBinder::DeathRecipient
- virtual void binderDied(const android::wp<IBinder>& who);
+ void binderDied(const android::wp<IBinder>& who) override;
private:
mutable std::mutex mLock;
diff --git a/services/oboeservice/AAudioCommandQueue.cpp b/services/oboeservice/AAudioCommandQueue.cpp
index 9bd18b3..be80b12 100644
--- a/services/oboeservice/AAudioCommandQueue.cpp
+++ b/services/oboeservice/AAudioCommandQueue.cpp
@@ -25,7 +25,7 @@
namespace aaudio {
-aaudio_result_t AAudioCommandQueue::sendCommand(std::shared_ptr<AAudioCommand> command) {
+aaudio_result_t AAudioCommandQueue::sendCommand(const std::shared_ptr<AAudioCommand>& command) {
{
std::scoped_lock<std::mutex> _l(mLock);
if (!mRunning) {
diff --git a/services/oboeservice/AAudioCommandQueue.h b/services/oboeservice/AAudioCommandQueue.h
index 64442a3..ad62d8a 100644
--- a/services/oboeservice/AAudioCommandQueue.h
+++ b/services/oboeservice/AAudioCommandQueue.h
@@ -26,7 +26,7 @@
namespace aaudio {
-typedef int32_t aaudio_command_opcode;
+using aaudio_command_opcode = int32_t;
class AAudioCommandParam {
public:
@@ -39,7 +39,7 @@
explicit AAudioCommand(
aaudio_command_opcode opCode, std::shared_ptr<AAudioCommandParam> param = nullptr,
bool waitForReply = false, int64_t timeoutNanos = 0)
- : operationCode(opCode), parameter(param), isWaitingForReply(waitForReply),
+ : operationCode(opCode), parameter(std::move(param)), isWaitingForReply(waitForReply),
timeoutNanoseconds(timeoutNanos) { }
virtual ~AAudioCommand() = default;
@@ -66,7 +66,7 @@
* @return the result of sending the command or the result of executing the command if command
* need to wait for a reply. If timeout happens, AAUDIO_ERROR_TIMEOUT will be returned.
*/
- aaudio_result_t sendCommand(std::shared_ptr<AAudioCommand> command);
+ aaudio_result_t sendCommand(const std::shared_ptr<AAudioCommand>& command);
/**
* Wait for next available command OR until the timeout is expired.
diff --git a/services/oboeservice/AAudioEndpointManager.cpp b/services/oboeservice/AAudioEndpointManager.cpp
index 20e4cc5..b5ee2f2 100644
--- a/services/oboeservice/AAudioEndpointManager.cpp
+++ b/services/oboeservice/AAudioEndpointManager.cpp
@@ -162,7 +162,7 @@
const aaudio::AAudioStreamRequest &request,
sp<AAudioServiceEndpoint> &endpointToSteal) {
- std::lock_guard<std::mutex> lock(mExclusiveLock);
+ const std::lock_guard<std::mutex> lock(mExclusiveLock);
const AAudioStreamConfiguration &configuration = request.getConstantConfiguration();
@@ -183,19 +183,20 @@
// and START calls. This will help preserve app compatibility.
// An app can avoid having this happen by closing their streams when
// the app is paused.
- pid_t pid = VALUE_OR_FATAL(
+ const pid_t pid = VALUE_OR_FATAL(
aidl2legacy_int32_t_pid_t(request.getAttributionSource().pid));
AAudioClientTracker::getInstance().setExclusiveEnabled(pid, false);
endpointToSteal = endpoint; // return it to caller
}
return nullptr;
} else {
- sp<AAudioServiceEndpointMMAP> endpointMMap = new AAudioServiceEndpointMMAP(aaudioService);
+ const sp<AAudioServiceEndpointMMAP> endpointMMap =
+ new AAudioServiceEndpointMMAP(aaudioService);
ALOGV("%s(), no match so try to open MMAP %p for dev %d",
__func__, endpointMMap.get(), configuration.getDeviceId());
endpoint = endpointMMap;
- aaudio_result_t result = endpoint->open(request);
+ const aaudio_result_t result = endpoint->open(request);
if (result != AAUDIO_OK) {
endpoint.clear();
} else {
@@ -217,10 +218,10 @@
AAudioService &aaudioService,
const aaudio::AAudioStreamRequest &request) {
- std::lock_guard<std::mutex> lock(mSharedLock);
+ const std::lock_guard<std::mutex> lock(mSharedLock);
const AAudioStreamConfiguration &configuration = request.getConstantConfiguration();
- aaudio_direction_t direction = configuration.getDirection();
+ const aaudio_direction_t direction = configuration.getDirection();
// Try to find an existing endpoint.
sp<AAudioServiceEndpointShared> endpoint = findSharedEndpoint_l(configuration);
@@ -228,7 +229,7 @@
// If we can't find an existing one then open a new one.
if (endpoint.get() == nullptr) {
// we must call openStream with audioserver identity
- int64_t token = IPCThreadState::self()->clearCallingIdentity();
+ const int64_t token = IPCThreadState::self()->clearCallingIdentity();
switch (direction) {
case AAUDIO_DIRECTION_INPUT:
endpoint = new AAudioServiceEndpointCapture(aaudioService);
@@ -241,7 +242,7 @@
}
if (endpoint.get() != nullptr) {
- aaudio_result_t result = endpoint->open(request);
+ const aaudio_result_t result = endpoint->open(request);
if (result != AAUDIO_OK) {
endpoint.clear();
} else {
@@ -261,7 +262,7 @@
return endpoint;
}
-void AAudioEndpointManager::closeEndpoint(sp<AAudioServiceEndpoint>serviceEndpoint) {
+void AAudioEndpointManager::closeEndpoint(const sp<AAudioServiceEndpoint>& serviceEndpoint) {
if (serviceEndpoint->getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
return closeExclusiveEndpoint(serviceEndpoint);
} else {
@@ -269,14 +270,15 @@
}
}
-void AAudioEndpointManager::closeExclusiveEndpoint(sp<AAudioServiceEndpoint> serviceEndpoint) {
+void AAudioEndpointManager::closeExclusiveEndpoint(
+ const sp<AAudioServiceEndpoint>& serviceEndpoint) {
if (serviceEndpoint.get() == nullptr) {
return;
}
// Decrement the reference count under this lock.
- std::lock_guard<std::mutex> lock(mExclusiveLock);
- int32_t newRefCount = serviceEndpoint->getOpenCount() - 1;
+ const std::lock_guard<std::mutex> lock(mExclusiveLock);
+ const int32_t newRefCount = serviceEndpoint->getOpenCount() - 1;
serviceEndpoint->setOpenCount(newRefCount);
// If no longer in use then actually close it.
@@ -292,14 +294,14 @@
}
}
-void AAudioEndpointManager::closeSharedEndpoint(sp<AAudioServiceEndpoint> serviceEndpoint) {
+void AAudioEndpointManager::closeSharedEndpoint(const sp<AAudioServiceEndpoint>& serviceEndpoint) {
if (serviceEndpoint.get() == nullptr) {
return;
}
// Decrement the reference count under this lock.
- std::lock_guard<std::mutex> lock(mSharedLock);
- int32_t newRefCount = serviceEndpoint->getOpenCount() - 1;
+ const std::lock_guard<std::mutex> lock(mSharedLock);
+ const int32_t newRefCount = serviceEndpoint->getOpenCount() - 1;
serviceEndpoint->setOpenCount(newRefCount);
// If no longer in use then actually close it.
diff --git a/services/oboeservice/AAudioEndpointManager.h b/services/oboeservice/AAudioEndpointManager.h
index b07bcef..1d38d26 100644
--- a/services/oboeservice/AAudioEndpointManager.h
+++ b/services/oboeservice/AAudioEndpointManager.h
@@ -61,7 +61,7 @@
android::sp<AAudioServiceEndpoint> openEndpoint(android::AAudioService &audioService,
const aaudio::AAudioStreamRequest &request);
- void closeEndpoint(android::sp<AAudioServiceEndpoint> serviceEndpoint);
+ void closeEndpoint(const android::sp<AAudioServiceEndpoint>& serviceEndpoint);
private:
android::sp<AAudioServiceEndpoint> openExclusiveEndpoint(android::AAudioService &aaudioService,
@@ -79,8 +79,8 @@
const AAudioStreamConfiguration& configuration)
REQUIRES(mSharedLock);
- void closeExclusiveEndpoint(android::sp<AAudioServiceEndpoint> serviceEndpoint);
- void closeSharedEndpoint(android::sp<AAudioServiceEndpoint> serviceEndpoint);
+ void closeExclusiveEndpoint(const android::sp<AAudioServiceEndpoint>& serviceEndpoint);
+ void closeSharedEndpoint(const android::sp<AAudioServiceEndpoint>& serviceEndpoint);
// Use separate locks because opening a Shared endpoint requires opening an Exclusive one.
// That could cause a recursive lock.
diff --git a/services/oboeservice/AAudioMixer.cpp b/services/oboeservice/AAudioMixer.cpp
index ad4b830..6890985 100644
--- a/services/oboeservice/AAudioMixer.cpp
+++ b/services/oboeservice/AAudioMixer.cpp
@@ -45,7 +45,8 @@
memset(mOutputBuffer.get(), 0, mBufferSizeInBytes);
}
-int32_t AAudioMixer::mix(int streamIndex, std::shared_ptr<FifoBuffer> fifo, bool allowUnderflow) {
+int32_t AAudioMixer::mix(
+ int streamIndex, const std::shared_ptr<FifoBuffer>& fifo, bool allowUnderflow) {
WrappingBuffer wrappingBuffer;
float *destination = mOutputBuffer.get();
diff --git a/services/oboeservice/AAudioMixer.h b/services/oboeservice/AAudioMixer.h
index 1a120f2..d773178 100644
--- a/services/oboeservice/AAudioMixer.h
+++ b/services/oboeservice/AAudioMixer.h
@@ -24,7 +24,7 @@
class AAudioMixer {
public:
- AAudioMixer() {}
+ AAudioMixer() = default;
void allocate(int32_t samplesPerFrame, int32_t framesPerBurst);
@@ -37,7 +37,9 @@
* @param allowUnderflow if true then allow mixer to advance read index past the write index
* @return frames read from this stream
*/
- int32_t mix(int streamIndex, std::shared_ptr<android::FifoBuffer> fifo, bool allowUnderflow);
+ int32_t mix(int streamIndex,
+ const std::shared_ptr<android::FifoBuffer>& fifo,
+ bool allowUnderflow);
float *getOutputBuffer();
diff --git a/services/oboeservice/AAudioService.cpp b/services/oboeservice/AAudioService.cpp
index 2679b2e..e9c0884 100644
--- a/services/oboeservice/AAudioService.cpp
+++ b/services/oboeservice/AAudioService.cpp
@@ -62,7 +62,7 @@
AAudioClientTracker::getInstance().setAAudioService(this);
}
-status_t AAudioService::dump(int fd, const Vector<String16>& args) {
+status_t AAudioService::dump(int fd, const Vector<String16>& /*args*/) {
std::string result;
if (!dumpAllowed()) {
@@ -83,7 +83,7 @@
}
Status AAudioService::registerClient(const sp<IAAudioClient> &client) {
- pid_t pid = IPCThreadState::self()->getCallingPid();
+ const pid_t pid = IPCThreadState::self()->getCallingPid();
AAudioClientTracker::getInstance().registerClient(pid, client);
return Status::ok();
}
@@ -106,24 +106,24 @@
// 4) Thread A can then get the lock and also open a shared stream.
// Without the lock. Thread A might sneak in and reallocate an exclusive stream
// before B can open the shared stream.
- std::unique_lock<std::recursive_mutex> lock(mOpenLock);
+ const std::unique_lock<std::recursive_mutex> lock(mOpenLock);
aaudio_result_t result = AAUDIO_OK;
sp<AAudioServiceStreamBase> serviceStream;
const AAudioStreamConfiguration &configurationInput = request.getConstantConfiguration();
- bool sharingModeMatchRequired = request.isSharingModeMatchRequired();
- aaudio_sharing_mode_t sharingMode = configurationInput.getSharingMode();
+ const bool sharingModeMatchRequired = request.isSharingModeMatchRequired();
+ const aaudio_sharing_mode_t sharingMode = configurationInput.getSharingMode();
// Enforce limit on client processes.
AttributionSourceState attributionSource = request.getAttributionSource();
- pid_t pid = IPCThreadState::self()->getCallingPid();
+ const pid_t pid = IPCThreadState::self()->getCallingPid();
attributionSource.pid = VALUE_OR_RETURN_ILLEGAL_ARG_STATUS(
legacy2aidl_pid_t_int32_t(pid));
attributionSource.uid = VALUE_OR_RETURN_ILLEGAL_ARG_STATUS(
legacy2aidl_uid_t_int32_t(IPCThreadState::self()->getCallingUid()));
attributionSource.token = sp<BBinder>::make();
if (attributionSource.pid != mAudioClient.attributionSource.pid) {
- int32_t count = AAudioClientTracker::getInstance().getStreamCount(pid);
+ const int32_t count = AAudioClientTracker::getInstance().getStreamCount(pid);
if (count >= MAX_STREAMS_PER_PROCESS) {
ALOGE("openStream(): exceeded max streams per process %d >= %d",
count, MAX_STREAMS_PER_PROCESS);
@@ -168,7 +168,7 @@
serviceStream.clear();
AIDL_RETURN(result);
} else {
- aaudio_handle_t handle = mStreamTracker.addStreamForHandle(serviceStream.get());
+ const aaudio_handle_t handle = mStreamTracker.addStreamForHandle(serviceStream.get());
serviceStream->setHandle(handle);
AAudioClientTracker::getInstance().registerClientStream(pid, serviceStream);
paramsOut.copyFrom(*serviceStream);
@@ -185,7 +185,7 @@
static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
// Check permission and ownership first.
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ const sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("closeStream(0x%0x), illegal stream handle", streamHandle);
AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
@@ -197,13 +197,13 @@
int32_t *_aidl_return) {
static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ const sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
}
AudioEndpointParcelable endpointParcelable;
- aaudio_result_t result = serviceStream->getDescription(endpointParcelable);
+ const aaudio_result_t result = serviceStream->getDescription(endpointParcelable);
if (result == AAUDIO_OK) {
*endpoint = std::move(endpointParcelable).parcelable();
}
@@ -213,7 +213,7 @@
Status AAudioService::startStream(int32_t streamHandle, int32_t *_aidl_return) {
static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ const sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
@@ -224,7 +224,7 @@
Status AAudioService::pauseStream(int32_t streamHandle, int32_t *_aidl_return) {
static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ const sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
@@ -235,7 +235,7 @@
Status AAudioService::stopStream(int32_t streamHandle, int32_t *_aidl_return) {
static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ const sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
@@ -246,7 +246,7 @@
Status AAudioService::flushStream(int32_t streamHandle, int32_t *_aidl_return) {
static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ const sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
@@ -254,16 +254,16 @@
AIDL_RETURN(serviceStream->flush());
}
-Status AAudioService::registerAudioThread(int32_t streamHandle, int32_t clientThreadId, int64_t periodNanoseconds,
- int32_t *_aidl_return) {
+Status AAudioService::registerAudioThread(int32_t streamHandle, int32_t clientThreadId,
+ int64_t /*periodNanoseconds*/, int32_t *_aidl_return) {
static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ const sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
}
- int32_t priority = isCallerInService()
+ const int32_t priority = isCallerInService()
? kRealTimeAudioPriorityService : kRealTimeAudioPriorityClient;
AIDL_RETURN(serviceStream->registerAudioThread(clientThreadId, priority));
}
@@ -272,7 +272,7 @@
int32_t *_aidl_return) {
static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ const sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
@@ -283,13 +283,13 @@
Status AAudioService::exitStandby(int32_t streamHandle, Endpoint* endpoint, int32_t *_aidl_return) {
static_assert(std::is_same_v<aaudio_result_t, std::decay_t<typeof(*_aidl_return)>>);
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ const sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGE("getStreamDescription(), illegal stream handle = 0x%0x", streamHandle);
AIDL_RETURN(AAUDIO_ERROR_INVALID_HANDLE);
}
AudioEndpointParcelable endpointParcelable;
- aaudio_result_t result = serviceStream->exitStandby(&endpointParcelable);
+ const aaudio_result_t result = serviceStream->exitStandby(&endpointParcelable);
if (result == AAUDIO_OK) {
*endpoint = std::move(endpointParcelable).parcelable();
}
@@ -297,16 +297,18 @@
}
bool AAudioService::isCallerInService() {
- pid_t clientPid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mAudioClient.attributionSource.pid));
- uid_t clientUid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mAudioClient.attributionSource.uid));
+ const pid_t clientPid = VALUE_OR_FATAL(
+ aidl2legacy_int32_t_pid_t(mAudioClient.attributionSource.pid));
+ const uid_t clientUid = VALUE_OR_FATAL(
+ aidl2legacy_int32_t_uid_t(mAudioClient.attributionSource.uid));
return clientPid == IPCThreadState::self()->getCallingPid() &&
clientUid == IPCThreadState::self()->getCallingUid();
}
-aaudio_result_t AAudioService::closeStream(sp<AAudioServiceStreamBase> serviceStream) {
+aaudio_result_t AAudioService::closeStream(const sp<AAudioServiceStreamBase>& serviceStream) {
// This is protected by a lock in AAudioClientTracker.
// It is safe to unregister the same stream twice.
- pid_t pid = serviceStream->getOwnerProcessId();
+ const pid_t pid = serviceStream->getOwnerProcessId();
AAudioClientTracker::getInstance().unregisterClientStream(pid, serviceStream);
// This is protected by a lock in mStreamTracker.
// It is safe to remove the same stream twice.
@@ -325,10 +327,10 @@
const uid_t ownerUserId = serviceStream->getOwnerUserId();
const uid_t clientUid = VALUE_OR_FATAL(
aidl2legacy_int32_t_uid_t(mAudioClient.attributionSource.uid));
- bool callerOwnsIt = callingUserId == ownerUserId;
- bool serverCalling = callingUserId == clientUid;
- bool serverOwnsIt = ownerUserId == clientUid;
- bool allowed = callerOwnsIt || serverCalling || serverOwnsIt;
+ const bool callerOwnsIt = callingUserId == ownerUserId;
+ const bool serverCalling = callingUserId == clientUid;
+ const bool serverOwnsIt = ownerUserId == clientUid;
+ const bool allowed = callerOwnsIt || serverCalling || serverOwnsIt;
if (!allowed) {
ALOGE("AAudioService: calling uid %d cannot access stream 0x%08X owned by %d",
callingUserId, streamHandle, ownerUserId);
@@ -342,7 +344,7 @@
const android::AudioClient& client,
const audio_attributes_t *attr,
audio_port_handle_t *clientHandle) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ const sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
@@ -352,7 +354,7 @@
aaudio_result_t AAudioService::stopClient(aaudio_handle_t streamHandle,
audio_port_handle_t portHandle) {
- sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
+ const sp<AAudioServiceStreamBase> serviceStream = convertHandleToServiceStream(streamHandle);
if (serviceStream.get() == nullptr) {
ALOGW("%s(), invalid streamHandle = 0x%0x", __func__, streamHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
@@ -364,14 +366,14 @@
// So we do not have to check permissions.
aaudio_result_t AAudioService::disconnectStreamByPortHandle(audio_port_handle_t portHandle) {
ALOGD("%s(%d) called", __func__, portHandle);
- sp<AAudioServiceStreamBase> serviceStream =
+ const sp<AAudioServiceStreamBase> serviceStream =
mStreamTracker.findStreamByPortHandle(portHandle);
if (serviceStream.get() == nullptr) {
ALOGE("%s(), could not find stream with portHandle = %d", __func__, portHandle);
return AAUDIO_ERROR_INVALID_HANDLE;
}
// This is protected by a lock and will just return if already stopped.
- aaudio_result_t result = serviceStream->stop();
+ const aaudio_result_t result = serviceStream->stop();
serviceStream->disconnect();
return result;
}
diff --git a/services/oboeservice/AAudioService.h b/services/oboeservice/AAudioService.h
index 0a111fb..df66f1b 100644
--- a/services/oboeservice/AAudioService.h
+++ b/services/oboeservice/AAudioService.h
@@ -45,7 +45,7 @@
public:
AAudioService();
- virtual ~AAudioService() = default;
+ ~AAudioService() override = default;
aaudio::AAudioServiceInterface& asAAudioServiceInterface() {
return mAdapter;
@@ -53,7 +53,7 @@
static const char* getServiceName() { return AAUDIO_SERVICE_NAME; }
- virtual status_t dump(int fd, const Vector<String16>& args) override;
+ status_t dump(int fd, const Vector<String16>& args) override;
binder::Status registerClient(const ::android::sp<::aaudio::IAAudioClient>& client) override;
@@ -103,7 +103,7 @@
* This is only called from within the Service.
* It bypasses the permission checks in closeStream(handle).
*/
- aaudio_result_t closeStream(sp<aaudio::AAudioServiceStreamBase> serviceStream);
+ aaudio_result_t closeStream(const sp<aaudio::AAudioServiceStreamBase>& serviceStream);
private:
class Adapter : public aaudio::AAudioBinderAdapter {
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index fd546f6..2b94dbf 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -88,7 +88,7 @@
// @return true if stream found
bool AAudioServiceEndpoint::isStreamRegistered(audio_port_handle_t portHandle) {
- std::lock_guard<std::mutex> lock(mLockStreams);
+ const std::lock_guard<std::mutex> lock(mLockStreams);
for (const auto& stream : mRegisteredStreams) {
if (stream->getPortHandle() == portHandle) {
return true;
@@ -101,7 +101,7 @@
AAudioServiceEndpoint::disconnectRegisteredStreams() {
std::vector<android::sp<AAudioServiceStreamBase>> streamsDisconnected;
{
- std::lock_guard<std::mutex> lock(mLockStreams);
+ const std::lock_guard<std::mutex> lock(mLockStreams);
mRegisteredStreams.swap(streamsDisconnected);
}
mConnected.store(false);
@@ -122,7 +122,7 @@
void AAudioServiceEndpoint::releaseRegisteredStreams() {
// List of streams to be closed after we disconnect everything.
- std::vector<android::sp<AAudioServiceStreamBase>> streamsToClose
+ const std::vector<android::sp<AAudioServiceStreamBase>> streamsToClose
= disconnectRegisteredStreams();
// Close outside the lock to avoid recursive locks.
@@ -133,14 +133,14 @@
}
}
-aaudio_result_t AAudioServiceEndpoint::registerStream(sp<AAudioServiceStreamBase>stream) {
- std::lock_guard<std::mutex> lock(mLockStreams);
+aaudio_result_t AAudioServiceEndpoint::registerStream(const sp<AAudioServiceStreamBase>& stream) {
+ const std::lock_guard<std::mutex> lock(mLockStreams);
mRegisteredStreams.push_back(stream);
return AAUDIO_OK;
}
-aaudio_result_t AAudioServiceEndpoint::unregisterStream(sp<AAudioServiceStreamBase>stream) {
- std::lock_guard<std::mutex> lock(mLockStreams);
+aaudio_result_t AAudioServiceEndpoint::unregisterStream(const sp<AAudioServiceStreamBase>& stream) {
+ const std::lock_guard<std::mutex> lock(mLockStreams);
mRegisteredStreams.erase(std::remove(
mRegisteredStreams.begin(), mRegisteredStreams.end(), stream),
mRegisteredStreams.end());
diff --git a/services/oboeservice/AAudioServiceEndpoint.h b/services/oboeservice/AAudioServiceEndpoint.h
index 92004c5..dff571b 100644
--- a/services/oboeservice/AAudioServiceEndpoint.h
+++ b/services/oboeservice/AAudioServiceEndpoint.h
@@ -43,7 +43,7 @@
, public AAudioStreamParameters {
public:
- virtual ~AAudioServiceEndpoint();
+ ~AAudioServiceEndpoint() override;
virtual std::string dump() const;
@@ -55,9 +55,9 @@
*/
virtual void close() = 0;
- aaudio_result_t registerStream(android::sp<AAudioServiceStreamBase> stream);
+ aaudio_result_t registerStream(const android::sp<AAudioServiceStreamBase>& stream);
- aaudio_result_t unregisterStream(android::sp<AAudioServiceStreamBase> stream);
+ aaudio_result_t unregisterStream(const android::sp<AAudioServiceStreamBase>& stream);
virtual aaudio_result_t startStream(android::sp<AAudioServiceStreamBase> stream,
audio_port_handle_t *clientHandle) = 0;
@@ -65,14 +65,14 @@
virtual aaudio_result_t stopStream(android::sp<AAudioServiceStreamBase> stream,
audio_port_handle_t clientHandle) = 0;
- virtual aaudio_result_t startClient(const android::AudioClient& client,
- const audio_attributes_t *attr,
- audio_port_handle_t *clientHandle) {
+ virtual aaudio_result_t startClient(const android::AudioClient& /*client*/,
+ const audio_attributes_t* /*attr*/,
+ audio_port_handle_t* /*clientHandle*/) {
ALOGD("AAudioServiceEndpoint::startClient(...) AAUDIO_ERROR_UNAVAILABLE");
return AAUDIO_ERROR_UNAVAILABLE;
}
- virtual aaudio_result_t stopClient(audio_port_handle_t clientHandle) {
+ virtual aaudio_result_t stopClient(audio_port_handle_t /*clientHandle*/) {
ALOGD("AAudioServiceEndpoint::stopClient(...) AAUDIO_ERROR_UNAVAILABLE");
return AAUDIO_ERROR_UNAVAILABLE;
}
@@ -82,7 +82,7 @@
return AAUDIO_ERROR_UNAVAILABLE;
}
- virtual aaudio_result_t exitStandby(AudioEndpointParcelable* parcelable) {
+ virtual aaudio_result_t exitStandby(AudioEndpointParcelable* /*parcelable*/) {
ALOGD("AAudioServiceEndpoint::exitStandby() AAUDIO_ERROR_UNAVAILABLE");
return AAUDIO_ERROR_UNAVAILABLE;
}
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.cpp b/services/oboeservice/AAudioServiceEndpointCapture.cpp
index 95bd4bb..ba8070b 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.cpp
+++ b/services/oboeservice/AAudioServiceEndpointCapture.cpp
@@ -90,5 +90,5 @@
}
ALOGD("callbackLoop() exiting");
- return NULL; // TODO review
+ return nullptr; // TODO review
}
diff --git a/services/oboeservice/AAudioServiceEndpointCapture.h b/services/oboeservice/AAudioServiceEndpointCapture.h
index 2ca43cf..b77100d 100644
--- a/services/oboeservice/AAudioServiceEndpointCapture.h
+++ b/services/oboeservice/AAudioServiceEndpointCapture.h
@@ -30,7 +30,7 @@
class AAudioServiceEndpointCapture : public AAudioServiceEndpointShared {
public:
explicit AAudioServiceEndpointCapture(android::AAudioService &audioService);
- virtual ~AAudioServiceEndpointCapture() = default;
+ ~AAudioServiceEndpointCapture() override = default;
aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 3d237b3..d4cdb0b 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -37,7 +37,7 @@
#include "AAudioServiceEndpointPlay.h"
#include "AAudioServiceEndpointMMAP.h"
-#define AAUDIO_BUFFER_CAPACITY_MIN 4 * 512
+#define AAUDIO_BUFFER_CAPACITY_MIN (4 * 512)
#define AAUDIO_SAMPLE_RATE_DEFAULT 48000
// This is an estimate of the time difference between the HW and the MMAP time.
@@ -85,7 +85,7 @@
return it != NEXT_FORMAT_TO_TRY.end() ? it->second : AUDIO_FORMAT_DEFAULT;
}
-}
+} // namespace
aaudio_result_t AAudioServiceEndpointMMAP::open(const aaudio::AAudioStreamRequest &request) {
aaudio_result_t result = AAUDIO_OK;
@@ -111,7 +111,7 @@
audio_format_t nextFormatToTry = AUDIO_FORMAT_DEFAULT;
result = openWithFormat(audioFormat, &nextFormatToTry);
- if (result == AAUDIO_OK || result != AAUDIO_ERROR_UNAVAILABLE) {
+ if (result != AAUDIO_ERROR_UNAVAILABLE) {
// Return if it is successful or there is an error that is not
// AAUDIO_ERROR_UNAVAILABLE happens.
ALOGI("Opened format=%#x with result=%d", audioFormat, result);
@@ -165,12 +165,12 @@
return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
}
- MmapStreamInterface::stream_direction_t streamDirection =
+ const MmapStreamInterface::stream_direction_t streamDirection =
(direction == AAUDIO_DIRECTION_OUTPUT)
? MmapStreamInterface::DIRECTION_OUTPUT
: MmapStreamInterface::DIRECTION_INPUT;
- aaudio_session_id_t requestedSessionId = getSessionId();
+ const aaudio_session_id_t requestedSessionId = getSessionId();
audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
// Open HAL stream. Set mMmapStream
@@ -178,15 +178,15 @@
"sample_rate=%u, channel_mask=%#x, device=%d",
__func__, config.format, config.sample_rate,
config.channel_mask, deviceId);
- status_t status = MmapStreamInterface::openMmapStream(streamDirection,
- &attributes,
- &config,
- mMmapClient,
- &deviceId,
- &sessionId,
- this, // callback
- mMmapStream,
- &mPortHandle);
+ const status_t status = MmapStreamInterface::openMmapStream(streamDirection,
+ &attributes,
+ &config,
+ mMmapClient,
+ &deviceId,
+ &sessionId,
+ this, // callback
+ mMmapStream,
+ &mPortHandle);
ALOGD("%s() mMapClient.attributionSource = %s => portHandle = %d\n",
__func__, mMmapClient.attributionSource.toString().c_str(), mPortHandle);
if (status != OK) {
@@ -209,7 +209,7 @@
ALOGW("%s() - openMmapStream() failed to set sessionId", __func__);
}
- aaudio_session_id_t actualSessionId =
+ const aaudio_session_id_t actualSessionId =
(requestedSessionId == AAUDIO_SESSION_ID_NONE)
? AAUDIO_SESSION_ID_NONE
: (aaudio_session_id_t) sessionId;
@@ -278,7 +278,7 @@
if (stream != nullptr) {
attr = getAudioAttributesFrom(stream.get());
}
- aaudio_result_t result = startClient(
+ const aaudio_result_t result = startClient(
mMmapClient, stream == nullptr ? nullptr : &attr, &tempHandle);
// When AudioFlinger is passed a valid port handle then it should not change it.
LOG_ALWAYS_FATAL_IF(tempHandle != mPortHandle,
@@ -288,8 +288,8 @@
return result;
}
-aaudio_result_t AAudioServiceEndpointMMAP::stopStream(sp<AAudioServiceStreamBase> stream,
- audio_port_handle_t clientHandle __unused) {
+aaudio_result_t AAudioServiceEndpointMMAP::stopStream(sp<AAudioServiceStreamBase> /*stream*/,
+ audio_port_handle_t /*clientHandle*/) {
mFramesTransferred.reset32();
// Round 64-bit counter up to a multiple of the buffer capacity.
@@ -306,23 +306,21 @@
aaudio_result_t AAudioServiceEndpointMMAP::startClient(const android::AudioClient& client,
const audio_attributes_t *attr,
audio_port_handle_t *clientHandle) {
- if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
- status_t status = mMmapStream->start(client, attr, clientHandle);
- return AAudioConvert_androidToAAudioResult(status);
+ return mMmapStream == nullptr
+ ? AAUDIO_ERROR_NULL
+ : AAudioConvert_androidToAAudioResult(mMmapStream->start(client, attr, clientHandle));
}
aaudio_result_t AAudioServiceEndpointMMAP::stopClient(audio_port_handle_t clientHandle) {
- if (mMmapStream == nullptr) return AAUDIO_ERROR_NULL;
- aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->stop(clientHandle));
- return result;
+ return mMmapStream == nullptr
+ ? AAUDIO_ERROR_NULL
+ : AAudioConvert_androidToAAudioResult(mMmapStream->stop(clientHandle));
}
aaudio_result_t AAudioServiceEndpointMMAP::standby() {
- if (mMmapStream == nullptr) {
- return AAUDIO_ERROR_NULL;
- }
- aaudio_result_t result = AAudioConvert_androidToAAudioResult(mMmapStream->standby());
- return result;
+ return mMmapStream == nullptr
+ ? AAUDIO_ERROR_NULL
+ : AAudioConvert_androidToAAudioResult(mMmapStream->standby());
}
aaudio_result_t AAudioServiceEndpointMMAP::exitStandby(AudioEndpointParcelable* parcelable) {
@@ -330,11 +328,12 @@
return AAUDIO_ERROR_NULL;
}
mAudioDataFileDescriptor.reset();
- aaudio_result_t result = createMmapBuffer(&mAudioDataFileDescriptor);
+ const aaudio_result_t result = createMmapBuffer(&mAudioDataFileDescriptor);
if (result == AAUDIO_OK) {
- int32_t bytesPerFrame = calculateBytesPerFrame();
- int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
- int fdIndex = parcelable->addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+ const int32_t bytesPerFrame = calculateBytesPerFrame();
+ const int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
+ const int fdIndex = parcelable->addFileDescriptor(
+ mAudioDataFileDescriptor, capacityInBytes);
parcelable->mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
parcelable->mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
parcelable->mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
@@ -350,10 +349,10 @@
if (mMmapStream == nullptr) {
return AAUDIO_ERROR_NULL;
}
- status_t status = mMmapStream->getMmapPosition(&position);
+ const status_t status = mMmapStream->getMmapPosition(&position);
ALOGV("%s() status= %d, pos = %d, nanos = %lld\n",
__func__, status, position.position_frames, (long long) position.time_nanoseconds);
- aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
+ const aaudio_result_t result = AAudioConvert_androidToAAudioResult(status);
if (result == AAUDIO_ERROR_UNAVAILABLE) {
ALOGW("%s(): getMmapPosition() has no position data available", __func__);
} else if (result != AAUDIO_OK) {
@@ -367,8 +366,8 @@
return result;
}
-aaudio_result_t AAudioServiceEndpointMMAP::getTimestamp(int64_t *positionFrames,
- int64_t *timeNanos) {
+aaudio_result_t AAudioServiceEndpointMMAP::getTimestamp(int64_t* /*positionFrames*/,
+ int64_t* /*timeNanos*/) {
return 0; // TODO
}
@@ -381,7 +380,7 @@
} else {
// Must be a SHARED stream?
ALOGD("%s(%d) disconnect a specific stream", __func__, portHandle);
- aaudio_result_t result = mAAudioService.disconnectStreamByPortHandle(portHandle);
+ const aaudio_result_t result = mAAudioService.disconnectStreamByPortHandle(portHandle);
ALOGD("%s(%d) disconnectStreamByPortHandle returned %d", __func__, portHandle, result);
}
};
@@ -389,7 +388,7 @@
// This is called by AudioFlinger when it wants to destroy a stream.
void AAudioServiceEndpointMMAP::onTearDown(audio_port_handle_t portHandle) {
ALOGD("%s(portHandle = %d) called", __func__, portHandle);
- android::sp<AAudioServiceEndpointMMAP> holdEndpoint(this);
+ const android::sp<AAudioServiceEndpointMMAP> holdEndpoint(this);
std::thread asyncTask([holdEndpoint, portHandle]() {
holdEndpoint->handleTearDownAsync(portHandle);
});
@@ -398,18 +397,18 @@
void AAudioServiceEndpointMMAP::onVolumeChanged(float volume) {
ALOGD("%s() volume = %f", __func__, volume);
- std::lock_guard<std::mutex> lock(mLockStreams);
+ const std::lock_guard<std::mutex> lock(mLockStreams);
for(const auto& stream : mRegisteredStreams) {
stream->onVolumeChanged(volume);
}
};
void AAudioServiceEndpointMMAP::onRoutingChanged(audio_port_handle_t portHandle) {
- const int32_t deviceId = static_cast<int32_t>(portHandle);
+ const auto deviceId = static_cast<int32_t>(portHandle);
ALOGD("%s() called with dev %d, old = %d", __func__, deviceId, getDeviceId());
if (getDeviceId() != deviceId) {
if (getDeviceId() != AUDIO_PORT_HANDLE_NONE) {
- android::sp<AAudioServiceEndpointMMAP> holdEndpoint(this);
+ const android::sp<AAudioServiceEndpointMMAP> holdEndpoint(this);
std::thread asyncTask([holdEndpoint, deviceId]() {
ALOGD("onRoutingChanged() asyncTask launched");
holdEndpoint->disconnectRegisteredStreams();
@@ -429,9 +428,9 @@
AudioEndpointParcelable* parcelable)
{
// Gather information on the data queue based on HAL info.
- int32_t bytesPerFrame = calculateBytesPerFrame();
- int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
- int fdIndex = parcelable->addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
+ const int32_t bytesPerFrame = calculateBytesPerFrame();
+ const int32_t capacityInBytes = getBufferCapacity() * bytesPerFrame;
+ const int fdIndex = parcelable->addFileDescriptor(mAudioDataFileDescriptor, capacityInBytes);
parcelable->mDownDataQueueParcelable.setupMemory(fdIndex, 0, capacityInBytes);
parcelable->mDownDataQueueParcelable.setBytesPerFrame(bytesPerFrame);
parcelable->mDownDataQueueParcelable.setFramesPerBurst(mFramesPerBurst);
@@ -447,7 +446,7 @@
}
uint64_t tempPositionFrames;
int64_t tempTimeNanos;
- status_t status = mMmapStream->getExternalPosition(&tempPositionFrames, &tempTimeNanos);
+ const status_t status = mMmapStream->getExternalPosition(&tempPositionFrames, &tempTimeNanos);
if (status != OK) {
// getExternalPosition reports error. The HAL may not support the API. Cache the result
// so that the call will not go to the HAL next time.
@@ -527,8 +526,8 @@
if (minSizeFrames <= 0) { // zero will get rejected
minSizeFrames = AAUDIO_BUFFER_CAPACITY_MIN;
}
- status_t status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
- bool isBufferShareable = mMmapBufferinfo.flags & AUDIO_MMAP_APPLICATION_SHAREABLE;
+ const status_t status = mMmapStream->createMmapBuffer(minSizeFrames, &mMmapBufferinfo);
+ const bool isBufferShareable = mMmapBufferinfo.flags & AUDIO_MMAP_APPLICATION_SHAREABLE;
if (status != OK) {
ALOGE("%s() - createMmapBuffer() failed with status %d %s",
__func__, status, strerror(-status));
@@ -545,7 +544,7 @@
setBufferCapacity(mMmapBufferinfo.buffer_size_frames);
if (!isBufferShareable) {
// Exclusive mode can only be used by the service because the FD cannot be shared.
- int32_t audioServiceUid =
+ const int32_t audioServiceUid =
VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
if ((mMmapClient.attributionSource.uid != audioServiceUid) &&
getSharingMode() == AAUDIO_SHARING_MODE_EXCLUSIVE) {
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 73e0f61..4f77393 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -44,7 +44,7 @@
public:
explicit AAudioServiceEndpointMMAP(android::AAudioService &audioService);
- virtual ~AAudioServiceEndpointMMAP() = default;
+ ~AAudioServiceEndpointMMAP() override = default;
std::string dump() const override;
diff --git a/services/oboeservice/AAudioServiceEndpointPlay.cpp b/services/oboeservice/AAudioServiceEndpointPlay.cpp
index 2a5939f..637405d 100644
--- a/services/oboeservice/AAudioServiceEndpointPlay.cpp
+++ b/services/oboeservice/AAudioServiceEndpointPlay.cpp
@@ -158,5 +158,5 @@
ALOGD("%s() exiting, enabled = %d, state = %d, result = %d <<<<<<<<<<<<< MIXER",
__func__, mCallbackEnabled.load(), getStreamInternal()->getState(), result);
- return NULL; // TODO review
+ return nullptr; // TODO review
}
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 02202d8..1dd0c3a 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -103,8 +103,7 @@
// Prevent the stream from being deleted while being used.
// This is just for extra safety. It is probably not needed because
// this callback should be joined before the stream is closed.
- AAudioServiceEndpointShared *endpointPtr =
- static_cast<AAudioServiceEndpointShared *>(arg);
+ auto endpointPtr = static_cast<AAudioServiceEndpointShared *>(arg);
android::sp<AAudioServiceEndpointShared> endpoint(endpointPtr);
// Balance the incStrong() in startSharingThread_l().
endpoint->decStrong(nullptr);
@@ -140,7 +139,7 @@
aaudio_result_t aaudio::AAudioServiceEndpointShared::stopSharingThread() {
mCallbackEnabled.store(false);
- return getStreamInternal()->joinThread(NULL);
+ return getStreamInternal()->joinThread(nullptr);
}
aaudio_result_t AAudioServiceEndpointShared::startStream(
@@ -180,8 +179,8 @@
return result;
}
-aaudio_result_t AAudioServiceEndpointShared::stopStream(sp<AAudioServiceStreamBase> sharedStream,
- audio_port_handle_t clientHandle) {
+aaudio_result_t AAudioServiceEndpointShared::stopStream(
+ sp<AAudioServiceStreamBase> /*sharedStream*/, audio_port_handle_t clientHandle) {
// Ignore result.
(void) getStreamInternal()->stopClient(clientHandle);
diff --git a/services/oboeservice/AAudioServiceEndpointShared.h b/services/oboeservice/AAudioServiceEndpointShared.h
index 3e760c4..0efb227 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.h
+++ b/services/oboeservice/AAudioServiceEndpointShared.h
@@ -39,7 +39,7 @@
public:
explicit AAudioServiceEndpointShared(AudioStreamInternal *streamInternal);
- virtual ~AAudioServiceEndpointShared() = default;
+ ~AAudioServiceEndpointShared() override = default;
std::string dump() const override;
@@ -79,6 +79,6 @@
std::atomic<int> mRunningStreamCount{0};
};
-}
+} // namespace aaudio
#endif //AAUDIO_SERVICE_ENDPOINT_SHARED_H
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 35d712c..8e1e497 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -83,9 +83,8 @@
}
std::string AAudioServiceStreamBase::dumpHeader() {
- return std::string(
- " T Handle UId Port Run State Format Burst Chan Mask Capacity"
- " HwFormat HwChan HwRate");
+ return {" T Handle UId Port Run State Format Burst Chan Mask Capacity"
+ " HwFormat HwChan HwRate"};
}
std::string AAudioServiceStreamBase::dump() const {
@@ -477,8 +476,7 @@
disconnect_l();
break;
case REGISTER_AUDIO_THREAD: {
- RegisterAudioThreadParam *param =
- (RegisterAudioThreadParam *) command->parameter.get();
+ auto param = (RegisterAudioThreadParam *) command->parameter.get();
command->result =
param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
: registerAudioThread_l(param->mOwnerPid,
@@ -487,21 +485,20 @@
}
break;
case UNREGISTER_AUDIO_THREAD: {
- UnregisterAudioThreadParam *param =
- (UnregisterAudioThreadParam *) command->parameter.get();
+ auto param = (UnregisterAudioThreadParam *) command->parameter.get();
command->result =
param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
: unregisterAudioThread_l(param->mClientThreadId);
}
break;
case GET_DESCRIPTION: {
- GetDescriptionParam *param = (GetDescriptionParam *) command->parameter.get();
+ auto param = (GetDescriptionParam *) command->parameter.get();
command->result = param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
: getDescription_l(param->mParcelable);
}
break;
case EXIT_STANDBY: {
- ExitStandbyParam *param = (ExitStandbyParam *) command->parameter.get();
+ auto param = (ExitStandbyParam *) command->parameter.get();
command->result = param == nullptr ? AAUDIO_ERROR_ILLEGAL_ARGUMENT
: exitStandby_l(param->mParcelable);
standbyTime = AudioClock::getNanoseconds() + IDLE_TIMEOUT_NANOS;
diff --git a/services/oboeservice/AAudioServiceStreamBase.h b/services/oboeservice/AAudioServiceStreamBase.h
index b5f8b90..0f51503 100644
--- a/services/oboeservice/AAudioServiceStreamBase.h
+++ b/services/oboeservice/AAudioServiceStreamBase.h
@@ -62,7 +62,7 @@
public:
explicit AAudioServiceStreamBase(android::AAudioService &aAudioService);
- virtual ~AAudioServiceStreamBase();
+ ~AAudioServiceStreamBase() override;
enum {
ILLEGAL_THREAD_ID = 0
@@ -254,7 +254,7 @@
RegisterAudioThreadParam(pid_t ownerPid, pid_t clientThreadId, int priority)
: AAudioCommandParam(), mOwnerPid(ownerPid),
mClientThreadId(clientThreadId), mPriority(priority) { }
- ~RegisterAudioThreadParam() = default;
+ ~RegisterAudioThreadParam() override = default;
pid_t mOwnerPid;
pid_t mClientThreadId;
@@ -265,9 +265,9 @@
class UnregisterAudioThreadParam : public AAudioCommandParam {
public:
- UnregisterAudioThreadParam(pid_t clientThreadId)
+ explicit UnregisterAudioThreadParam(pid_t clientThreadId)
: AAudioCommandParam(), mClientThreadId(clientThreadId) { }
- ~UnregisterAudioThreadParam() = default;
+ ~UnregisterAudioThreadParam() override = default;
pid_t mClientThreadId;
};
@@ -275,9 +275,9 @@
class GetDescriptionParam : public AAudioCommandParam {
public:
- GetDescriptionParam(AudioEndpointParcelable* parcelable)
+ explicit GetDescriptionParam(AudioEndpointParcelable* parcelable)
: AAudioCommandParam(), mParcelable(parcelable) { }
- ~GetDescriptionParam() = default;
+ ~GetDescriptionParam() override = default;
AudioEndpointParcelable* mParcelable;
};
@@ -324,9 +324,9 @@
}
class ExitStandbyParam : public AAudioCommandParam {
public:
- ExitStandbyParam(AudioEndpointParcelable* parcelable)
+ explicit ExitStandbyParam(AudioEndpointParcelable* parcelable)
: AAudioCommandParam(), mParcelable(parcelable) { }
- ~ExitStandbyParam() = default;
+ ~ExitStandbyParam() override = default;
AudioEndpointParcelable* mParcelable;
};
diff --git a/services/oboeservice/AAudioServiceStreamMMAP.h b/services/oboeservice/AAudioServiceStreamMMAP.h
index cd8c91e..8b8c5e6 100644
--- a/services/oboeservice/AAudioServiceStreamMMAP.h
+++ b/services/oboeservice/AAudioServiceStreamMMAP.h
@@ -47,7 +47,7 @@
public:
AAudioServiceStreamMMAP(android::AAudioService &aAudioService,
bool inService);
- virtual ~AAudioServiceStreamMMAP() = default;
+ ~AAudioServiceStreamMMAP() override = default;
aaudio_result_t open(const aaudio::AAudioStreamRequest &request) override;
diff --git a/services/oboeservice/AAudioServiceStreamShared.h b/services/oboeservice/AAudioServiceStreamShared.h
index 78f9787..0b2513a 100644
--- a/services/oboeservice/AAudioServiceStreamShared.h
+++ b/services/oboeservice/AAudioServiceStreamShared.h
@@ -44,7 +44,7 @@
public:
explicit AAudioServiceStreamShared(android::AAudioService &aAudioService);
- virtual ~AAudioServiceStreamShared() = default;
+ ~AAudioServiceStreamShared() override = default;
static std::string dumpHeader();
diff --git a/services/oboeservice/AAudioStreamTracker.cpp b/services/oboeservice/AAudioStreamTracker.cpp
index 9bbbc73..c86a7a2 100644
--- a/services/oboeservice/AAudioStreamTracker.cpp
+++ b/services/oboeservice/AAudioStreamTracker.cpp
@@ -78,7 +78,8 @@
return handle;
}
-aaudio_handle_t AAudioStreamTracker::addStreamForHandle(sp<AAudioServiceStreamBase> serviceStream) {
+aaudio_handle_t AAudioStreamTracker::addStreamForHandle(
+ const sp<AAudioServiceStreamBase>& serviceStream) {
std::lock_guard<std::mutex> lock(mHandleLock);
aaudio_handle_t handle = mPreviousHandle;
// Assign a unique handle.
diff --git a/services/oboeservice/AAudioStreamTracker.h b/services/oboeservice/AAudioStreamTracker.h
index 43870fc..99f4b6c 100644
--- a/services/oboeservice/AAudioStreamTracker.h
+++ b/services/oboeservice/AAudioStreamTracker.h
@@ -64,7 +64,7 @@
* @param serviceStream
* @return handle for identifying the stream
*/
- aaudio_handle_t addStreamForHandle(android::sp<AAudioServiceStreamBase> serviceStream);
+ aaudio_handle_t addStreamForHandle(const android::sp<AAudioServiceStreamBase>& serviceStream);
/**
* @return string that can be added to dumpsys
diff --git a/services/oboeservice/AAudioThread.h b/services/oboeservice/AAudioThread.h
index b2774e0..91ad715 100644
--- a/services/oboeservice/AAudioThread.h
+++ b/services/oboeservice/AAudioThread.h
@@ -29,7 +29,7 @@
*/
class Runnable {
public:
- Runnable() {};
+ Runnable() = default;
virtual ~Runnable() = default;
virtual void run() = 0;
diff --git a/services/oboeservice/Android.bp b/services/oboeservice/Android.bp
index 5076239..56c0dc9 100644
--- a/services/oboeservice/Android.bp
+++ b/services/oboeservice/Android.bp
@@ -21,6 +21,64 @@
default_applicable_licenses: ["frameworks_av_license"],
}
+tidy_errors = [
+ // https://clang.llvm.org/extra/clang-tidy/checks/list.html
+ // For many categories, the checks are too many to specify individually.
+ // Feel free to disable as needed - as warnings are generally ignored,
+ // we treat warnings as errors.
+ "android-*",
+ "bugprone-*",
+ "cert-*",
+ "clang-analyzer-security*",
+ "google-*",
+ "misc-*",
+ //"modernize-*", // explicitly list the modernize as they can be subjective.
+ "modernize-avoid-bind",
+ //"modernize-avoid-c-arrays", // std::array<> can be verbose
+ "modernize-concat-nested-namespaces",
+ //"modernize-deprecated-headers", // C headers still ok even if there is C++ equivalent.
+ "modernize-deprecated-ios-base-aliases",
+ "modernize-loop-convert",
+ "modernize-make-shared",
+ "modernize-make-unique",
+ "modernize-pass-by-value",
+ "modernize-raw-string-literal",
+ "modernize-redundant-void-arg",
+ "modernize-replace-auto-ptr",
+ "modernize-replace-random-shuffle",
+ "modernize-return-braced-init-list",
+ "modernize-shrink-to-fit",
+ "modernize-unary-static-assert",
+ "modernize-use-auto",
+ "modernize-use-bool-literals",
+ "modernize-use-default-member-init",
+ "modernize-use-emplace",
+ "modernize-use-equals-default",
+ "modernize-use-equals-delete",
+ // "modernize-use-nodiscard", // Maybe add this in the future
+ "modernize-use-noexcept",
+ "modernize-use-nullptr",
+ "modernize-use-override",
+ // "modernize-use-trailing-return-type", // not necessarily more readable
+ "modernize-use-transparent-functors",
+ "modernize-use-uncaught-exceptions",
+ "modernize-use-using",
+ "performance-*",
+
+ // Remove some pedantic stylistic requirements.
+ "-android-cloexec-dup", // found in AAudioServiceEndpointMMAP.cpp
+ "-bugprone-narrowing-conversions", // found in several interface from size_t to int32_t
+
+ "-google-readability-casting", // C++ casts not always necessary and may be verbose
+ "-google-readability-todo", // do not require TODO(info)
+ "-google-build-using-namespace", // Reenable and fix later.
+ "-google-global-names-in-headers", // found in several files
+
+ "-misc-non-private-member-variables-in-classes", // found in aidl generated files
+
+]
+
+
cc_library {
name: "libaaudioservice",
@@ -89,4 +147,11 @@
"frameworks/av/media/libnbaio/include_mono",
"frameworks/av/media/libnbaio/include",
],
+
+ tidy: true,
+ tidy_checks: tidy_errors,
+ tidy_checks_as_errors: tidy_errors,
+ tidy_flags: [
+ "-format-style=file",
+ ]
}
diff --git a/services/oboeservice/SharedMemoryProxy.cpp b/services/oboeservice/SharedMemoryProxy.cpp
index 78d4884..549a36e 100644
--- a/services/oboeservice/SharedMemoryProxy.cpp
+++ b/services/oboeservice/SharedMemoryProxy.cpp
@@ -58,7 +58,7 @@
}
// Get original memory address.
- mOriginalSharedMemory = (uint8_t *) mmap(0, mSharedMemorySizeInBytes,
+ mOriginalSharedMemory = (uint8_t *) mmap(nullptr, mSharedMemorySizeInBytes,
PROT_READ|PROT_WRITE,
MAP_SHARED,
mOriginalFileDescriptor, 0);
diff --git a/services/oboeservice/SharedMemoryProxy.h b/services/oboeservice/SharedMemoryProxy.h
index 89eeb4b..81a0753 100644
--- a/services/oboeservice/SharedMemoryProxy.h
+++ b/services/oboeservice/SharedMemoryProxy.h
@@ -30,7 +30,7 @@
*/
class SharedMemoryProxy {
public:
- SharedMemoryProxy() {}
+ SharedMemoryProxy() = default;
~SharedMemoryProxy();
diff --git a/services/oboeservice/SharedRingBuffer.cpp b/services/oboeservice/SharedRingBuffer.cpp
index fd2a454..eebff51 100644
--- a/services/oboeservice/SharedRingBuffer.cpp
+++ b/services/oboeservice/SharedRingBuffer.cpp
@@ -62,7 +62,7 @@
// Map the fd to memory addresses. Use a temporary pointer to keep the mmap result and update
// it to `mSharedMemory` only when mmap operate successfully.
- uint8_t* tmpPtr = (uint8_t *) mmap(0, mSharedMemorySizeInBytes,
+ auto tmpPtr = (uint8_t *) mmap(nullptr, mSharedMemorySizeInBytes,
PROT_READ|PROT_WRITE,
MAP_SHARED,
mFileDescriptor.get(), 0);
@@ -74,10 +74,8 @@
mSharedMemory = tmpPtr;
// Get addresses for our counters and data from the shared memory.
- fifo_counter_t *readCounterAddress =
- (fifo_counter_t *) &mSharedMemory[SHARED_RINGBUFFER_READ_OFFSET];
- fifo_counter_t *writeCounterAddress =
- (fifo_counter_t *) &mSharedMemory[SHARED_RINGBUFFER_WRITE_OFFSET];
+ auto readCounterAddress = (fifo_counter_t *) &mSharedMemory[SHARED_RINGBUFFER_READ_OFFSET];
+ auto writeCounterAddress = (fifo_counter_t *) &mSharedMemory[SHARED_RINGBUFFER_WRITE_OFFSET];
uint8_t *dataAddress = &mSharedMemory[SHARED_RINGBUFFER_DATA_OFFSET];
mFifoBuffer = std::make_shared<FifoBufferIndirect>(bytesPerFrame, capacityInFrames,
diff --git a/services/oboeservice/SharedRingBuffer.h b/services/oboeservice/SharedRingBuffer.h
index cff1261..463bf11 100644
--- a/services/oboeservice/SharedRingBuffer.h
+++ b/services/oboeservice/SharedRingBuffer.h
@@ -39,7 +39,7 @@
*/
class SharedRingBuffer {
public:
- SharedRingBuffer() {}
+ SharedRingBuffer() = default;
virtual ~SharedRingBuffer();
diff --git a/services/oboeservice/TimestampScheduler.h b/services/oboeservice/TimestampScheduler.h
index baa5c41..6b5f4b1 100644
--- a/services/oboeservice/TimestampScheduler.h
+++ b/services/oboeservice/TimestampScheduler.h
@@ -31,7 +31,7 @@
class TimestampScheduler
{
public:
- TimestampScheduler() {};
+ TimestampScheduler() = default;
virtual ~TimestampScheduler() = default;
/**
diff --git a/services/tuner/Android.bp b/services/tuner/Android.bp
index 0649061..ea5139d 100644
--- a/services/tuner/Android.bp
+++ b/services/tuner/Android.bp
@@ -88,6 +88,7 @@
"libbase",
"libbinder",
"libfmq",
+ "libhidlbase",
"liblog",
"libtunerservice",
"libutils",
diff --git a/services/tuner/TunerHelper.cpp b/services/tuner/TunerHelper.cpp
index dc67110..a03386f 100644
--- a/services/tuner/TunerHelper.cpp
+++ b/services/tuner/TunerHelper.cpp
@@ -83,6 +83,22 @@
tunerRM->setFrontendInfoList(feInfos);
tunerRM->setLnbInfoList(lnbHandles);
}
+void TunerHelper::updateTunerResources(const vector<TunerFrontendInfo>& feInfos,
+ const vector<TunerDemuxInfo>& demuxInfos,
+ const vector<int32_t>& lnbHandles) {
+ ::ndk::SpAIBinder binder(AServiceManager_waitForService("tv_tuner_resource_mgr"));
+ shared_ptr<ITunerResourceManager> tunerRM = ITunerResourceManager::fromBinder(binder);
+ if (tunerRM == nullptr) {
+ return;
+ }
+
+ updateTunerResources(feInfos, lnbHandles);
+
+ // for Tuner 2.0 and below, Demux resource is not really managed under TRM
+ if (demuxInfos.size() > 0) {
+ tunerRM->setDemuxInfoList(demuxInfos);
+ }
+}
// TODO: create a map between resource id and handles.
int TunerHelper::getResourceIdFromHandle(int resourceHandle, int /*type*/) {
diff --git a/services/tuner/TunerHelper.h b/services/tuner/TunerHelper.h
index 755df57..65a9b0b 100644
--- a/services/tuner/TunerHelper.h
+++ b/services/tuner/TunerHelper.h
@@ -17,9 +17,11 @@
#ifndef ANDROID_MEDIA_TUNERDVRHELPER_H
#define ANDROID_MEDIA_TUNERDVRHELPER_H
+#include <aidl/android/media/tv/tunerresourcemanager/TunerDemuxInfo.h>
#include <aidl/android/media/tv/tunerresourcemanager/TunerFrontendInfo.h>
#include <utils/String16.h>
+using ::aidl::android::media::tv::tunerresourcemanager::TunerDemuxInfo;
using ::aidl::android::media::tv::tunerresourcemanager::TunerFrontendInfo;
using ::android::String16;
@@ -55,6 +57,10 @@
// TODO: update Demux, Descrambler.
static void updateTunerResources(const vector<TunerFrontendInfo>& feInfos,
const vector<int32_t>& lnbHandles);
+
+ static void updateTunerResources(const vector<TunerFrontendInfo>& feInfos,
+ const vector<TunerDemuxInfo>& demuxInfos,
+ const vector<int32_t>& lnbHandles);
// TODO: create a map between resource id and handles.
static int getResourceIdFromHandle(int resourceHandle, int type);
static int getResourceHandleFromId(int id, int resourceType);
diff --git a/services/tuner/TunerService.cpp b/services/tuner/TunerService.cpp
index d59d95f..e5bcf1f 100644
--- a/services/tuner/TunerService.cpp
+++ b/services/tuner/TunerService.cpp
@@ -82,18 +82,75 @@
return AServiceManager_addService(tunerService->asBinder().get(), getServiceName());
}
-::ndk::ScopedAStatus TunerService::openDemux(int32_t /* in_demuxHandle */,
+::ndk::ScopedAStatus TunerService::openDemux(int32_t in_demuxHandle,
shared_ptr<ITunerDemux>* _aidl_return) {
ALOGV("openDemux");
- vector<int32_t> id;
shared_ptr<IDemux> demux;
- auto status = mTuner->openDemux(&id, &demux);
- if (status.isOk()) {
- *_aidl_return =
- ::ndk::SharedRefBase::make<TunerDemux>(demux, id[0], this->ref<TunerService>());
+ bool fallBackToOpenDemux = false;
+ vector<int32_t> ids;
+
+ if (mTunerVersion <= TUNER_HAL_VERSION_2_0) {
+ fallBackToOpenDemux = true;
+ } else {
+ mTuner->getDemuxIds(&ids);
+ if (ids.size() == 0) {
+ fallBackToOpenDemux = true;
+ }
}
- return status;
+ if (fallBackToOpenDemux) {
+ auto status = mTuner->openDemux(&ids, &demux);
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ } else {
+ int id = TunerHelper::getResourceIdFromHandle(in_demuxHandle, DEMUX);
+ auto status = mTuner->openDemuxById(id, &demux);
+ if (status.isOk()) {
+ *_aidl_return =
+ ::ndk::SharedRefBase::make<TunerDemux>(demux, id, this->ref<TunerService>());
+ }
+ return status;
+ }
+}
+
+::ndk::ScopedAStatus TunerService::getDemuxInfo(int32_t in_demuxHandle, DemuxInfo* _aidl_return) {
+ if (mTunerVersion <= TUNER_HAL_VERSION_2_0) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+ int id = TunerHelper::getResourceIdFromHandle(in_demuxHandle, DEMUX);
+ return mTuner->getDemuxInfo(id, _aidl_return);
+}
+
+::ndk::ScopedAStatus TunerService::getDemuxInfoList(vector<DemuxInfo>* _aidl_return) {
+ if (mTunerVersion <= TUNER_HAL_VERSION_2_0) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+ vector<DemuxInfo> demuxInfoList;
+ vector<int32_t> ids;
+ auto status = mTuner->getDemuxIds(&ids);
+ if (!status.isOk()) {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
+
+ for (int i = 0; i < ids.size(); i++) {
+ DemuxInfo demuxInfo;
+ auto res = mTuner->getDemuxInfo(ids[i], &demuxInfo);
+ if (!res.isOk()) {
+ continue;
+ }
+ demuxInfoList.push_back(demuxInfo);
+ }
+
+ if (demuxInfoList.size() > 0) {
+ *_aidl_return = demuxInfoList;
+ return ::ndk::ScopedAStatus::ok();
+ } else {
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(Result::UNAVAILABLE));
+ }
}
::ndk::ScopedAStatus TunerService::getDemuxCaps(DemuxCapabilities* _aidl_return) {
@@ -230,7 +287,9 @@
}
void TunerService::updateTunerResources() {
- TunerHelper::updateTunerResources(getTRMFrontendInfos(), getTRMLnbHandles());
+ TunerHelper::updateTunerResources(getTRMFrontendInfos(),
+ getTRMDemuxInfos(),
+ getTRMLnbHandles());
}
vector<TunerFrontendInfo> TunerService::getTRMFrontendInfos() {
@@ -258,6 +317,32 @@
return infos;
}
+vector<TunerDemuxInfo> TunerService::getTRMDemuxInfos() {
+ vector<TunerDemuxInfo> infos;
+ vector<int32_t> ids;
+
+ if (mTunerVersion <= TUNER_HAL_VERSION_2_0) {
+ return infos;
+ }
+
+ auto status = mTuner->getDemuxIds(&ids);
+ if (!status.isOk()) {
+ return infos;
+ }
+
+ for (int i = 0; i < ids.size(); i++) {
+ DemuxInfo demuxInfo;
+ mTuner->getDemuxInfo(ids[i], &demuxInfo);
+ TunerDemuxInfo tunerDemuxInfo{
+ .handle = TunerHelper::getResourceHandleFromId((int)ids[i], DEMUX),
+ .filterTypes = static_cast<int>(demuxInfo.filterTypes)
+ };
+ infos.push_back(tunerDemuxInfo);
+ }
+
+ return infos;
+}
+
vector<int32_t> TunerService::getTRMLnbHandles() {
vector<int32_t> lnbHandles;
if (mTuner != nullptr) {
diff --git a/services/tuner/TunerService.h b/services/tuner/TunerService.h
index 517df4a..190ccd4 100644
--- a/services/tuner/TunerService.h
+++ b/services/tuner/TunerService.h
@@ -32,6 +32,7 @@
using ::aidl::android::hardware::tv::tuner::DemuxCapabilities;
using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::DemuxInfo;
using ::aidl::android::hardware::tv::tuner::FrontendInfo;
using ::aidl::android::hardware::tv::tuner::FrontendType;
using ::aidl::android::hardware::tv::tuner::ITuner;
@@ -71,6 +72,8 @@
::ndk::ScopedAStatus openDemux(int32_t in_demuxHandle,
shared_ptr<ITunerDemux>* _aidl_return) override;
::ndk::ScopedAStatus getDemuxCaps(DemuxCapabilities* _aidl_return) override;
+ ::ndk::ScopedAStatus getDemuxInfo(int32_t in_demuxHandle, DemuxInfo* _aidl_return) override;
+ ::ndk::ScopedAStatus getDemuxInfoList(vector<DemuxInfo>* _aidl_return) override;
::ndk::ScopedAStatus openDescrambler(int32_t in_descramblerHandle,
shared_ptr<ITunerDescrambler>* _aidl_return) override;
::ndk::ScopedAStatus getTunerHalVersion(int32_t* _aidl_return) override;
@@ -90,6 +93,7 @@
private:
void updateTunerResources();
vector<TunerFrontendInfo> getTRMFrontendInfos();
+ vector<TunerDemuxInfo> getTRMDemuxInfos();
vector<int32_t> getTRMLnbHandles();
shared_ptr<ITuner> mTuner;
diff --git a/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl b/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
index 8d285e3..932133e 100644
--- a/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
+++ b/services/tuner/aidl/android/media/tv/tuner/ITunerService.aidl
@@ -17,6 +17,7 @@
package android.media.tv.tuner;
import android.hardware.tv.tuner.DemuxCapabilities;
+import android.hardware.tv.tuner.DemuxInfo;
import android.hardware.tv.tuner.FrontendInfo;
import android.hardware.tv.tuner.FrontendType;
import android.media.tv.tuner.ITunerDemux;
@@ -77,6 +78,21 @@
ITunerDemux openDemux(in int demuxHandle);
/**
+ * Retrieve the supported filter main types
+ *
+ * @param demuxHandle the handle of the demux to query demux info for
+ * @return the demux info
+ */
+ DemuxInfo getDemuxInfo(in int demuxHandle);
+
+ /**
+ * Retrieve the list of demux info for all the demuxes on the system
+ *
+ * @return the list of DemuxInfo
+ */
+ DemuxInfo[] getDemuxInfoList();
+
+ /**
* Retrieve the Tuner Demux capabilities.
*
* @return the demux’s capabilities.
diff --git a/services/tuner/hidl/TunerHidlService.cpp b/services/tuner/hidl/TunerHidlService.cpp
index 52005c2..6bc36be 100644
--- a/services/tuner/hidl/TunerHidlService.cpp
+++ b/services/tuner/hidl/TunerHidlService.cpp
@@ -123,6 +123,20 @@
return ::ndk::ScopedAStatus::fromServiceSpecificError(static_cast<int32_t>(res));
}
+::ndk::ScopedAStatus TunerHidlService::getDemuxInfo(int32_t /* in_demuxHandle */,
+ DemuxInfo* /* _aidl_return */) {
+ ALOGE("getDemuxInfo is not supported");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+}
+
+::ndk::ScopedAStatus TunerHidlService::getDemuxInfoList(
+ vector<DemuxInfo>* /* _aidle_return */) {
+ ALOGE("getDemuxInfoList is not supported");
+ return ::ndk::ScopedAStatus::fromServiceSpecificError(
+ static_cast<int32_t>(HidlResult::UNAVAILABLE));
+}
+
::ndk::ScopedAStatus TunerHidlService::getDemuxCaps(DemuxCapabilities* _aidl_return) {
ALOGV("getDemuxCaps");
HidlResult res;
diff --git a/services/tuner/hidl/TunerHidlService.h b/services/tuner/hidl/TunerHidlService.h
index 872aefc..526c5e6 100644
--- a/services/tuner/hidl/TunerHidlService.h
+++ b/services/tuner/hidl/TunerHidlService.h
@@ -33,6 +33,7 @@
using ::aidl::android::hardware::tv::tuner::DemuxCapabilities;
using ::aidl::android::hardware::tv::tuner::DemuxFilterEvent;
using ::aidl::android::hardware::tv::tuner::DemuxFilterStatus;
+using ::aidl::android::hardware::tv::tuner::DemuxInfo;
using ::aidl::android::hardware::tv::tuner::FrontendInfo;
using ::aidl::android::hardware::tv::tuner::FrontendType;
using ::aidl::android::media::tv::tuner::ITunerDemux;
@@ -83,6 +84,8 @@
::ndk::ScopedAStatus openDemux(int32_t in_demuxHandle,
shared_ptr<ITunerDemux>* _aidl_return) override;
::ndk::ScopedAStatus getDemuxCaps(DemuxCapabilities* _aidl_return) override;
+ ::ndk::ScopedAStatus getDemuxInfo(int32_t in_demuxHandle, DemuxInfo* _aidl_return) override;
+ ::ndk::ScopedAStatus getDemuxInfoList(vector<DemuxInfo>* _aidl_return) override;
::ndk::ScopedAStatus openDescrambler(int32_t in_descramblerHandle,
shared_ptr<ITunerDescrambler>* _aidl_return) override;
::ndk::ScopedAStatus getTunerHalVersion(int32_t* _aidl_return) override;
diff --git a/services/tuner/main_tunerservice.cpp b/services/tuner/main_tunerservice.cpp
index f8311ff..90f1731 100644
--- a/services/tuner/main_tunerservice.cpp
+++ b/services/tuner/main_tunerservice.cpp
@@ -18,6 +18,7 @@
#include <binder/IPCThreadState.h>
#include <binder/IServiceManager.h>
#include <utils/Log.h>
+#include <hidl/HidlTransportSupport.h>
#include "TunerService.h"
#include "hidl/TunerHidlService.h"
@@ -32,7 +33,8 @@
sp<ProcessState> proc(ProcessState::self());
sp<IServiceManager> sm = defaultServiceManager();
- ProcessState::self()->setThreadPoolMaxThreadCount(8);
+ hardware::configureRpcThreadpool(16, true);
+ ProcessState::self()->setThreadPoolMaxThreadCount(16);
// Check legacy HIDL HAL first. If it's not existed, use AIDL HAL.
binder_status_t status = TunerHidlService::instantiate();