Merge "codec2: add get consumeusage for C2BufferQueueBlockPool"
diff --git a/METADATA b/METADATA
index aabda36..146bfcb 100644
--- a/METADATA
+++ b/METADATA
@@ -2,22 +2,22 @@
# CONSULT THE OWNERS AND opensource-licensing@google.com BEFORE
# DEPENDING ON IT IN YOUR PROJECT. ***
third_party {
- # would be NOTICE save for Widevine Master License Agreement in:
- # drm/mediadrm/plugins/clearkey/hidl/DeviceFiles.cpp
- # drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp
- # drm/mediadrm/plugins/clearkey/hidl/include/DeviceFiles.h
- # drm/mediadrm/plugins/clearkey/hidl/protos/DeviceFiles.proto
- # drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h
- # and patent disclaimers in:
- # media/codec2/components/aac/patent_disclaimer.txt
- # media/codec2/components/amr_nb_wb/patent_disclaimer.txt
- # media/codec2/components/mp3/patent_disclaimer.txt
- # media/codec2/components/mpeg4_h263/patent_disclaimer.txt
- # media/codecs/amrnb/patent_disclaimer.txt
- # media/codecs/amrwb/dec/patent_disclaimer.txt
- # media/codecs/amrwb/enc/patent_disclaimer.txt
- # media/codecs/m4v_h263/patent_disclaimer.txt
- # media/codecs/mp3dec/patent_disclaimer.txt
- # media/libstagefright/codecs/aacenc/patent_disclaimer.txt
+ license_note: "would be NOTICE save for Widevine Master License Agreement in:\n"
+ " drm/mediadrm/plugins/clearkey/hidl/DeviceFiles.cpp\n"
+ " drm/mediadrm/plugins/clearkey/hidl/MemoryFileSystem.cpp\n"
+ " drm/mediadrm/plugins/clearkey/hidl/include/DeviceFiles.h\n"
+ " drm/mediadrm/plugins/clearkey/hidl/protos/DeviceFiles.proto\n"
+ " drm/mediadrm/plugins/clearkey/hidl/include/MemoryFileSystem.h\n"
+ " and patent disclaimers in:\n"
+ " media/codec2/components/aac/patent_disclaimer.txt\n"
+ " media/codec2/components/amr_nb_wb/patent_disclaimer.txt\n"
+ " media/codec2/components/mp3/patent_disclaimer.txt\n"
+ " media/codec2/components/mpeg4_h263/patent_disclaimer.txt\n"
+ " media/codecs/amrnb/patent_disclaimer.txt\n"
+ " media/codecs/amrwb/dec/patent_disclaimer.txt\n"
+ " media/codecs/amrwb/enc/patent_disclaimer.txt\n"
+ " media/codecs/m4v_h263/patent_disclaimer.txt\n"
+ " media/codecs/mp3dec/patent_disclaimer.txt\n"
+ " media/libstagefright/codecs/aacenc/patent_disclaimer.txt"
license_type: BY_EXCEPTION_ONLY
}
diff --git a/OWNERS b/OWNERS
index 0be1196..40c65e7 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,7 +1,6 @@
# Bug component: 1344
elaurent@google.com
etalvala@google.com
-hkuang@google.com
lajos@google.com
# go/android-fwk-media-solutions for info on areas of ownership.
diff --git a/apex/Android.bp b/apex/Android.bp
index b9abd12..b9b9bde 100644
--- a/apex/Android.bp
+++ b/apex/Android.bp
@@ -23,7 +23,6 @@
apex_defaults {
name: "com.android.media-defaults",
- updatable: true,
bootclasspath_fragments: ["com.android.media-bootclasspath-fragment"],
systemserverclasspath_fragments: ["com.android.media-systemserverclasspath-fragment"],
multilib: {
@@ -67,14 +66,13 @@
// Use a custom AndroidManifest.xml used for API targeting.
androidManifest: ":com.android.media-androidManifest",
- // IMPORTANT: For the APEX to be installed on Android 10 (API 29),
- // min_sdk_version should be 29. This enables the build system to make
+ // IMPORTANT: q-launched-apex-module enables the build system to make
// sure the package compatible to Android 10 in two ways:
// - build the APEX package compatible to Android 10
// so that the package can be installed.
// - build artifacts (lib/javalib/bin) against Android 10 SDK
// so that the artifacts can run.
- min_sdk_version: "29",
+ defaults: ["q-launched-apex-module"],
// Indicates that pre-installed version of this apex can be compressed.
// Whether it actually will be compressed is controlled on per-device basis.
compressible: true,
@@ -126,6 +124,26 @@
// modified by the Soong or platform compat team.
hidden_api: {
max_target_o_low_priority: ["hiddenapi/hiddenapi-max-target-o-low-priority.txt"],
+
+ // The following packages contain classes from other modules on the
+ // bootclasspath. That means that the hidden API flags for this module
+ // has to explicitly list every single class this module provides in
+ // that package to differentiate them from the classes provided by other
+ // modules. That can include private classes that are not part of the
+ // API.
+ split_packages: [
+ "android.media",
+ ],
+
+ // The following packages and all their subpackages currently only
+ // contain classes from this bootclasspath_fragment. Listing a package
+ // here won't prevent other bootclasspath modules from adding classes in
+ // any of those packages but it will prevent them from adding those
+ // classes into an API surface, e.g. public, system, etc.. Doing so will
+ // result in a build failure due to inconsistent flags.
+ package_prefixes: [
+ "android.media.internal",
+ ],
},
}
@@ -148,7 +166,6 @@
apex_defaults {
name: "com.android.media.swcodec-defaults",
- updatable: true,
binaries: [
"mediaswcodec",
],
@@ -172,14 +189,13 @@
// Use a custom AndroidManifest.xml used for API targeting.
androidManifest: ":com.android.media.swcodec-androidManifest",
- // IMPORTANT: For the APEX to be installed on Android 10 (API 29),
- // min_sdk_version should be 29. This enables the build system to make
+ // IMPORTANT: q-launched-apex-module enables the build system to make
// sure the package compatible to Android 10 in two ways:
// - build the APEX package compatible to Android 10
// so that the package can be installed.
// - build artifacts (lib/javalib/bin) against Android 10 SDK
// so that the artifacts can run.
- min_sdk_version: "29",
+ defaults: ["q-launched-apex-module"],
// Indicates that pre-installed version of this apex can be compressed.
// Whether it actually will be compressed is controlled on per-device basis.
compressible: true,
diff --git a/camera/OWNERS b/camera/OWNERS
index 385c163..2a1d523 100644
--- a/camera/OWNERS
+++ b/camera/OWNERS
@@ -1,3 +1,4 @@
+
# Bug component: 41727
etalvala@google.com
arakesh@google.com
diff --git a/camera/aidl/android/hardware/ICameraServiceProxy.aidl b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
index bbb0289..f5d0120 100644
--- a/camera/aidl/android/hardware/ICameraServiceProxy.aidl
+++ b/camera/aidl/android/hardware/ICameraServiceProxy.aidl
@@ -37,8 +37,11 @@
oneway void notifyCameraState(in CameraSessionStats cameraSessionStats);
/**
- * Reports whether the top activity needs a rotate and crop override.
+ * Returns the necessary rotate and crop override for the top activity which
+ * will be one of ({@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_NONE},
+ * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_90},
+ * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_180},
+ * {@link android.hardware.camera2.CameraMetadata#SCALER_ROTATE_AND_CROP_270}).
*/
- boolean isRotateAndCropOverrideNeeded(String packageName, int sensorOrientation,
- int lensFacing);
+ int getRotateAndCropOverride(String packageName, int lensFacing, int userId);
}
diff --git a/camera/ndk/include/camera/NdkCameraMetadataTags.h b/camera/ndk/include/camera/NdkCameraMetadataTags.h
index 86781e5..816303c 100644
--- a/camera/ndk/include/camera/NdkCameraMetadataTags.h
+++ b/camera/ndk/include/camera/NdkCameraMetadataTags.h
@@ -3600,7 +3600,8 @@
* YUV_420_888 | all output sizes available for JPEG, up to the maximum video size | LIMITED |
* IMPLEMENTATION_DEFINED | same as YUV_420_888 | Any |</p>
* <p>For applications targeting SDK version 31 or newer, if the mobile device declares to be
- * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">media performance class</a> S,
+ * media performance class 12 or higher by setting
+ * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
* the primary camera devices (first rear/front camera in the camera ID list) will not
* support JPEG sizes smaller than 1080p. If the application configures a JPEG stream
* smaller than 1080p, the camera device will round up the JPEG image size to at least
@@ -3618,9 +3619,11 @@
* YUV_420_888 | all output sizes available for FULL hardware level, up to the maximum video size | LIMITED |
* IMPLEMENTATION_DEFINED | same as YUV_420_888 | Any |</p>
* <p>For applications targeting SDK version 31 or newer, if the mobile device doesn't declare
- * to be media performance class S, or if the camera device isn't a primary rear/front
- * camera, the minimum required output stream configurations are the same as for applications
- * targeting SDK version older than 31.</p>
+ * to be media performance class 12 or better by setting
+ * <a href="https://developer.android.com/reference/android/os/Build/VERSION_CDOES/MEDIA_PERFORMANCE_CLASS.html">MEDIA_PERFORMANCE_CLASS</a> to be 31 or larger,
+ * or if the camera device isn't a primary rear/front camera, the minimum required output
+ * stream configurations are the same as for applications targeting SDK version older than
+ * 31.</p>
* <p>Refer to ACAMERA_REQUEST_AVAILABLE_CAPABILITIES for additional
* mandatory stream configurations on a per-capability basis.</p>
* <p>Exception on 176x144 (QCIF) resolution: camera devices usually have a fixed capability for
@@ -4578,6 +4581,25 @@
*
* <p>Also defines the direction of rolling shutter readout, which is from top to bottom in
* the sensor's coordinate system.</p>
+ * <p>Starting with Android API level 32, camera clients that query the orientation via
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/CameraCharacteristics.html#get">CameraCharacteristics#get</a> on foldable devices which
+ * include logical cameras can receive a value that can dynamically change depending on the
+ * device/fold state.
+ * Clients are advised to not cache or store the orientation value of such logical sensors.
+ * In case repeated queries to CameraCharacteristics are not preferred, then clients can
+ * also access the entire mapping from device state to sensor orientation in
+ * <a href="https://developer.android.com/reference/android/hardware/camera2/params/DeviceStateSensorOrientationMap.html">DeviceStateSensorOrientationMap</a>.
+ * Do note that a dynamically changing sensor orientation value in camera characteristics
+ * will not be the best way to establish the orientation per frame. Clients that want to
+ * know the sensor orientation of a particular captured frame should query the
+ * ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID from the corresponding capture result and
+ * check the respective physical camera orientation.</p>
+ * <p>Native camera clients must query ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS for the mapping
+ * between device state and camera sensor orientation. Dynamic updates to the sensor
+ * orientation are not supported in this code path.</p>
+ *
+ * @see ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS
+ * @see ACAMERA_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID
*/
ACAMERA_SENSOR_ORIENTATION = // int32
ACAMERA_SENSOR_START + 14,
@@ -6284,6 +6306,21 @@
*/
ACAMERA_INFO_VERSION = // byte
ACAMERA_INFO_START + 1,
+ /**
+ *
+ * <p>Type: int64[2*n]</p>
+ *
+ * <p>This tag may appear in:
+ * <ul>
+ * <li>ACameraMetadata from ACameraManager_getCameraCharacteristics</li>
+ * </ul></p>
+ *
+ * <p>HAL must populate the array with
+ * (hardware::camera::provider::V2_5::DeviceState, sensorOrientation) pairs for each
+ * supported device state bitwise combination.</p>
+ */
+ ACAMERA_INFO_DEVICE_STATE_ORIENTATIONS = // int64[2*n]
+ ACAMERA_INFO_START + 3,
ACAMERA_INFO_END,
/**
diff --git a/cmds/stagefright/stagefright.cpp b/cmds/stagefright/stagefright.cpp
index c430f05..6d1263e 100644
--- a/cmds/stagefright/stagefright.cpp
+++ b/cmds/stagefright/stagefright.cpp
@@ -70,6 +70,10 @@
using namespace android;
+namespace {
+ constexpr static int PIXEL_FORMAT_RGBA_1010102_AS_8888 = -HAL_PIXEL_FORMAT_RGBA_1010102;
+}
+
static long gNumRepetitions;
static long gMaxNumFrames; // 0 means decode all available.
static long gReproduceBug; // if not -1.
@@ -629,7 +633,14 @@
fprintf(stderr, " -m max-number-of-frames-to-decode in each pass\n");
fprintf(stderr, " -b bug to reproduce\n");
fprintf(stderr, " -i(nfo) dump codec info (profiles and color formats supported, details)\n");
- fprintf(stderr, " -t(humbnail) extract video thumbnail or album art\n");
+ fprintf(stderr, " -t(humbnail) extract video thumbnail or album art (/sdcard/out.jpg)\n");
+ fprintf(stderr, " -P(ixelFormat) pixel format to use for raw thumbnail "
+ "(/sdcard/out.raw)\n");
+ fprintf(stderr, " %d: RGBA_565\n", HAL_PIXEL_FORMAT_RGB_565);
+ fprintf(stderr, " %d: RGBA_8888\n", HAL_PIXEL_FORMAT_RGBA_8888);
+ fprintf(stderr, " %d: BGRA_8888\n", HAL_PIXEL_FORMAT_BGRA_8888);
+ fprintf(stderr, " %d: RGBA_1010102\n", HAL_PIXEL_FORMAT_RGBA_1010102);
+ fprintf(stderr, " %d: RGBA_1010102 as RGBA_8888\n", PIXEL_FORMAT_RGBA_1010102_AS_8888);
fprintf(stderr, " -s(oftware) prefer software codec\n");
fprintf(stderr, " -r(hardware) force to use hardware codec\n");
fprintf(stderr, " -o playback audio\n");
@@ -787,6 +798,7 @@
bool useSurfaceTexAlloc = false;
bool dumpStream = false;
bool dumpPCMStream = false;
+ int32_t pixelFormat = 0; // thumbnail pixel format
String8 dumpStreamFilename;
gNumRepetitions = 1;
gMaxNumFrames = 0;
@@ -800,7 +812,7 @@
sp<android::ALooper> looper;
int res;
- while ((res = getopt(argc, argv, "vhaqn:lm:b:itsrow:kN:xSTd:D:")) >= 0) {
+ while ((res = getopt(argc, argv, "vhaqn:lm:b:itsrow:kN:xSTd:D:P:")) >= 0) {
switch (res) {
case 'a':
{
@@ -841,6 +853,7 @@
break;
}
+ case 'P':
case 'm':
case 'n':
case 'b':
@@ -856,6 +869,8 @@
gNumRepetitions = x;
} else if (res == 'm') {
gMaxNumFrames = x;
+ } else if (res == 'P') {
+ pixelFormat = x;
} else {
CHECK_EQ(res, 'b');
gReproduceBug = x;
@@ -978,24 +993,71 @@
close(fd);
fd = -1;
+ uint32_t retrieverPixelFormat = HAL_PIXEL_FORMAT_RGB_565;
+ if (pixelFormat == PIXEL_FORMAT_RGBA_1010102_AS_8888) {
+ retrieverPixelFormat = HAL_PIXEL_FORMAT_RGBA_1010102;
+ } else if (pixelFormat) {
+ retrieverPixelFormat = pixelFormat;
+ }
sp<IMemory> mem =
retriever->getFrameAtTime(-1,
MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC,
- HAL_PIXEL_FORMAT_RGB_565,
- false /*metaOnly*/);
+ retrieverPixelFormat, false /*metaOnly*/);
if (mem != NULL) {
failed = false;
- printf("getFrameAtTime(%s) => OK\n", filename);
+ printf("getFrameAtTime(%s) format=%d => OK\n", filename, retrieverPixelFormat);
VideoFrame *frame = (VideoFrame *)mem->unsecurePointer();
- CHECK_EQ(writeJpegFile("/sdcard/out.jpg",
- frame->getFlattenedData(),
- frame->mWidth, frame->mHeight), 0);
+ if (pixelFormat) {
+ int bpp = 0;
+ switch (pixelFormat) {
+ case HAL_PIXEL_FORMAT_RGB_565:
+ bpp = 2;
+ break;
+ case PIXEL_FORMAT_RGBA_1010102_AS_8888:
+ // convert RGBA_1010102 to RGBA_8888
+ {
+ uint32_t *data = (uint32_t *)frame->getFlattenedData();
+ uint32_t *end = data + frame->mWidth * frame->mHeight;
+ for (; data < end; ++data) {
+ *data =
+ // pick out 8-bit R, G, B values and move them to the
+ // correct position
+ ( (*data & 0x3fc) >> 2) | // R
+ ( (*data & 0xff000) >> 4) | // G
+ ( (*data & 0x3fc00000) >> 6) | // B
+ // pick out 2-bit A and expand to 8-bits
+ (((*data & 0xc0000000) >> 6) * 0x55);
+ }
+ }
+
+ FALLTHROUGH_INTENDED;
+
+ case HAL_PIXEL_FORMAT_RGBA_1010102:
+ case HAL_PIXEL_FORMAT_RGBA_8888:
+ case HAL_PIXEL_FORMAT_BGRA_8888:
+ bpp = 4;
+ break;
+ }
+ if (bpp) {
+ FILE *out = fopen("/sdcard/out.raw", "wb");
+ fwrite(frame->getFlattenedData(), bpp * frame->mWidth, frame->mHeight, out);
+ fclose(out);
+
+ printf("write out %d x %d x %dbpp\n", frame->mWidth, frame->mHeight, bpp);
+ } else {
+ printf("unknown pixel format.\n");
+ }
+ } else {
+ CHECK_EQ(writeJpegFile("/sdcard/out.jpg",
+ frame->getFlattenedData(),
+ frame->mWidth, frame->mHeight), 0);
+ }
}
- {
+ if (!pixelFormat) {
mem = retriever->extractAlbumArt();
if (mem != NULL) {
diff --git a/drm/OWNERS b/drm/OWNERS
index e788754..090c021 100644
--- a/drm/OWNERS
+++ b/drm/OWNERS
@@ -1 +1,3 @@
jtinker@google.com
+kelzhan@google.com
+robertshih@google.com
diff --git a/drm/mediadrm/plugins/TEST_MAPPING b/drm/mediadrm/plugins/TEST_MAPPING
index 7bd1568..9919e90 100644
--- a/drm/mediadrm/plugins/TEST_MAPPING
+++ b/drm/mediadrm/plugins/TEST_MAPPING
@@ -1,16 +1,10 @@
{
"presubmit": [
{
- "name": "CtsMediaTestCases",
+ "name": "CtsMediaDrmFrameworkTestCases",
"options" : [
{
"include-annotation": "android.platform.test.annotations.Presubmit"
- },
- {
- "include-filter": "android.media.cts.MediaDrmClearkeyTest"
- },
- {
- "include-filter": "android.media.cts.MediaDrmMetricsTest"
}
]
}
diff --git a/include/OWNERS b/include/OWNERS
index 88de595..e1d4db7 100644
--- a/include/OWNERS
+++ b/include/OWNERS
@@ -3,3 +3,4 @@
jtinker@google.com
lajos@google.com
essick@google.com
+philburk@google.com
diff --git a/media/Android.mk b/media/Android.mk
new file mode 100644
index 0000000..220a358
--- /dev/null
+++ b/media/Android.mk
@@ -0,0 +1,5 @@
+LOCAL_PATH := $(call my-dir)
+
+$(eval $(call declare-1p-copy-files,frameworks/av/media/libeffects,audio_effects.conf))
+$(eval $(call declare-1p-copy-files,frameworks/av/media/libeffects,audio_effects.xml))
+$(eval $(call declare-1p-copy-files,frameworks/av/media/libstagefright,))
diff --git a/media/OWNERS b/media/OWNERS
index 099729f..4a25b68 100644
--- a/media/OWNERS
+++ b/media/OWNERS
@@ -1,7 +1,6 @@
# Bug component: 1344
elaurent@google.com
essick@google.com
-hkuang@google.com
hunga@google.com
jiabin@google.com
jmtrivi@google.com
diff --git a/media/TEST_MAPPING b/media/TEST_MAPPING
index 5bc7262..41fe080 100644
--- a/media/TEST_MAPPING
+++ b/media/TEST_MAPPING
@@ -3,18 +3,18 @@
"presubmit-large": [
// runs whenever we change something in this tree
{
- "name": "CtsMediaTestCases",
+ "name": "CtsMediaCodecTestCases",
"options": [
{
- "include-filter": "android.media.cts.EncodeDecodeTest"
+ "include-filter": "android.media.codec.cts.EncodeDecodeTest"
}
]
},
{
- "name": "CtsMediaTestCases",
+ "name": "CtsMediaCodecTestCases",
"options": [
{
- "include-filter": "android.media.cts.DecodeEditEncodeTest"
+ "include-filter": "android.media.codec.cts.DecodeEditEncodeTest"
}
]
}
@@ -46,18 +46,18 @@
// runs regularly, independent of changes in this tree.
// signals if changes elsewhere break media functionality
{
- "name": "CtsMediaTestCases",
+ "name": "CtsMediaCodecTestCases",
"options": [
{
- "include-filter": "android.media.cts.EncodeDecodeTest"
+ "include-filter": "android.media.codec.cts.EncodeDecodeTest"
}
]
},
{
- "name": "CtsMediaTestCases",
+ "name": "CtsMediaCodecTestCases",
"options": [
{
- "include-filter": "android.media.cts.DecodeEditEncodeTest"
+ "include-filter": "android.media.codec.cts.DecodeEditEncodeTest"
}
]
}
diff --git a/media/bufferpool/2.0/tests/Android.bp b/media/bufferpool/2.0/tests/Android.bp
index 803a813..5e26e3a 100644
--- a/media/bufferpool/2.0/tests/Android.bp
+++ b/media/bufferpool/2.0/tests/Android.bp
@@ -80,3 +80,22 @@
],
compile_multilib: "both",
}
+
+cc_test {
+ name: "BufferpoolUnitTest",
+ test_suites: ["device-tests"],
+ defaults: ["VtsHalTargetTestDefaults"],
+ srcs: [
+ "allocator.cpp",
+ "BufferpoolUnitTest.cpp",
+ ],
+ static_libs: [
+ "android.hardware.media.bufferpool@2.0",
+ "libcutils",
+ "libstagefright_bufferpool@2.0.1",
+ ],
+ shared_libs: [
+ "libfmq",
+ ],
+ compile_multilib: "both",
+}
diff --git a/media/bufferpool/2.0/tests/AndroidTest.xml b/media/bufferpool/2.0/tests/AndroidTest.xml
new file mode 100644
index 0000000..b027ad0
--- /dev/null
+++ b/media/bufferpool/2.0/tests/AndroidTest.xml
@@ -0,0 +1,23 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2021 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Test module config for bufferpool unit tests">
+ <option name="test-suite-tag" value="BufferpoolUnitTest" />
+
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="native-test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="BufferpoolUnitTest" />
+ </test>
+</configuration>
diff --git a/media/bufferpool/2.0/tests/BufferpoolUnitTest.cpp b/media/bufferpool/2.0/tests/BufferpoolUnitTest.cpp
new file mode 100644
index 0000000..b448405
--- /dev/null
+++ b/media/bufferpool/2.0/tests/BufferpoolUnitTest.cpp
@@ -0,0 +1,541 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "BufferpoolUnitTest"
+#include <utils/Log.h>
+
+#include <binder/ProcessState.h>
+#include <bufferpool/ClientManager.h>
+#include <gtest/gtest.h>
+#include <hidl/LegacySupport.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unordered_set>
+#include <vector>
+#include "allocator.h"
+
+using android::hardware::configureRpcThreadpool;
+using android::hardware::media::bufferpool::BufferPoolData;
+using android::hardware::media::bufferpool::V2_0::IClientManager;
+using android::hardware::media::bufferpool::V2_0::ResultStatus;
+using android::hardware::media::bufferpool::V2_0::implementation::BufferId;
+using android::hardware::media::bufferpool::V2_0::implementation::ClientManager;
+using android::hardware::media::bufferpool::V2_0::implementation::ConnectionId;
+using android::hardware::media::bufferpool::V2_0::implementation::TransactionId;
+
+using namespace android;
+
+// communication message types between processes.
+enum PipeCommand : int32_t {
+ INIT,
+ TRANSFER,
+ STOP,
+
+ INIT_OK,
+ INIT_ERROR,
+ TRANSFER_OK,
+ TRANSFER_ERROR,
+ STOP_OK,
+ STOP_ERROR,
+};
+
+// communication message between processes.
+union PipeMessage {
+ struct {
+ int32_t command;
+ int32_t memsetValue;
+ BufferId bufferId;
+ ConnectionId connectionId;
+ TransactionId transactionId;
+ int64_t timestampUs;
+ } data;
+ char array[0];
+};
+
+static int32_t kNumIterationCount = 10;
+
+class BufferpoolTest {
+ public:
+ BufferpoolTest() : mConnectionValid(false), mManager(nullptr), mAllocator(nullptr) {
+ mConnectionId = -1;
+ mReceiverId = -1;
+ }
+
+ ~BufferpoolTest() {
+ if (mConnectionValid) {
+ mManager->close(mConnectionId);
+ }
+ }
+
+ protected:
+ bool mConnectionValid;
+ ConnectionId mConnectionId;
+ ConnectionId mReceiverId;
+
+ android::sp<ClientManager> mManager;
+ std::shared_ptr<BufferPoolAllocator> mAllocator;
+
+ void setupBufferpoolManager();
+};
+
+void BufferpoolTest::setupBufferpoolManager() {
+ // retrieving per process bufferpool object sp<ClientManager>
+ mManager = ClientManager::getInstance();
+ ASSERT_NE(mManager, nullptr) << "unable to get ClientManager\n";
+
+ mAllocator = std::make_shared<TestBufferPoolAllocator>();
+ ASSERT_NE(mAllocator, nullptr) << "unable to create TestBufferPoolAllocator\n";
+
+ // set-up local bufferpool connection for sender
+ ResultStatus status = mManager->create(mAllocator, &mConnectionId);
+ ASSERT_EQ(status, ResultStatus::OK)
+ << "unable to set-up local bufferpool connection for sender\n";
+ mConnectionValid = true;
+}
+
+class BufferpoolUnitTest : public BufferpoolTest, public ::testing::Test {
+ public:
+ virtual void SetUp() override { setupBufferpoolManager(); }
+
+ virtual void TearDown() override {}
+};
+
+class BufferpoolFunctionalityTest : public BufferpoolTest, public ::testing::Test {
+ public:
+ virtual void SetUp() override {
+ mReceiverPid = -1;
+
+ ASSERT_TRUE(pipe(mCommandPipeFds) == 0) << "pipe connection failed for commandPipe\n";
+ ASSERT_TRUE(pipe(mResultPipeFds) == 0) << "pipe connection failed for resultPipe\n";
+
+ mReceiverPid = fork();
+ ASSERT_TRUE(mReceiverPid >= 0) << "fork failed\n";
+
+ if (mReceiverPid == 0) {
+ doReceiver();
+ // In order to ignore gtest behaviour, wait for being killed from tearDown
+ pause();
+ }
+ setupBufferpoolManager();
+ }
+
+ virtual void TearDown() override {
+ if (mReceiverPid > 0) {
+ kill(mReceiverPid, SIGKILL);
+ int wstatus;
+ wait(&wstatus);
+ }
+ }
+
+ protected:
+ pid_t mReceiverPid;
+ int mCommandPipeFds[2];
+ int mResultPipeFds[2];
+
+ bool sendMessage(int* pipes, const PipeMessage& message) {
+ int ret = write(pipes[1], message.array, sizeof(PipeMessage));
+ return ret == sizeof(PipeMessage);
+ }
+
+ bool receiveMessage(int* pipes, PipeMessage* message) {
+ int ret = read(pipes[0], message->array, sizeof(PipeMessage));
+ return ret == sizeof(PipeMessage);
+ }
+
+ void doReceiver();
+};
+
+void BufferpoolFunctionalityTest::doReceiver() {
+ // Configures the threadpool used for handling incoming RPC calls in this process.
+ configureRpcThreadpool(1 /*threads*/, false /*willJoin*/);
+ bool receiverRunning = true;
+ while (receiverRunning) {
+ PipeMessage message;
+ receiveMessage(mCommandPipeFds, &message);
+ ResultStatus err = ResultStatus::OK;
+ switch (message.data.command) {
+ case PipeCommand::INIT: {
+ // receiver manager creation
+ mManager = ClientManager::getInstance();
+ if (!mManager) {
+ message.data.command = PipeCommand::INIT_ERROR;
+ sendMessage(mResultPipeFds, message);
+ return;
+ }
+
+ android::status_t status = mManager->registerAsService();
+ if (status != android::OK) {
+ message.data.command = PipeCommand::INIT_ERROR;
+ sendMessage(mResultPipeFds, message);
+ return;
+ }
+ message.data.command = PipeCommand::INIT_OK;
+ sendMessage(mResultPipeFds, message);
+ break;
+ }
+ case PipeCommand::TRANSFER: {
+ native_handle_t* receiveHandle = nullptr;
+ std::shared_ptr<BufferPoolData> receiveBuffer;
+ err = mManager->receive(message.data.connectionId, message.data.transactionId,
+ message.data.bufferId, message.data.timestampUs,
+ &receiveHandle, &receiveBuffer);
+ if (err != ResultStatus::OK) {
+ message.data.command = PipeCommand::TRANSFER_ERROR;
+ sendMessage(mResultPipeFds, message);
+ return;
+ }
+ if (!TestBufferPoolAllocator::Verify(receiveHandle, message.data.memsetValue)) {
+ message.data.command = PipeCommand::TRANSFER_ERROR;
+ sendMessage(mResultPipeFds, message);
+ return;
+ }
+ if (receiveHandle) {
+ native_handle_close(receiveHandle);
+ native_handle_delete(receiveHandle);
+ }
+ receiveHandle = nullptr;
+ receiveBuffer.reset();
+ message.data.command = PipeCommand::TRANSFER_OK;
+ sendMessage(mResultPipeFds, message);
+ break;
+ }
+ case PipeCommand::STOP: {
+ err = mManager->close(message.data.connectionId);
+ if (err != ResultStatus::OK) {
+ message.data.command = PipeCommand::STOP_ERROR;
+ sendMessage(mResultPipeFds, message);
+ return;
+ }
+ message.data.command = PipeCommand::STOP_OK;
+ sendMessage(mResultPipeFds, message);
+ receiverRunning = false;
+ break;
+ }
+ default:
+ ALOGE("unknown command. try again");
+ break;
+ }
+ }
+}
+
+// Buffer allocation test.
+// Check whether each buffer allocation is done successfully with unique buffer id.
+TEST_F(BufferpoolUnitTest, AllocateBuffer) {
+ std::vector<uint8_t> vecParams;
+ getTestAllocatorParams(&vecParams);
+
+ std::vector<std::shared_ptr<BufferPoolData>> buffers{};
+ std::vector<native_handle_t*> allocHandle{};
+ ResultStatus status;
+ for (int i = 0; i < kNumIterationCount; ++i) {
+ native_handle_t* handle = nullptr;
+ std::shared_ptr<BufferPoolData> buffer{};
+ status = mManager->allocate(mConnectionId, vecParams, &handle, &buffer);
+ ASSERT_EQ(status, ResultStatus::OK) << "allocate failed for " << i << "iteration";
+
+ buffers.push_back(std::move(buffer));
+ if (handle) {
+ allocHandle.push_back(std::move(handle));
+ }
+ }
+
+ for (int i = 0; i < kNumIterationCount; ++i) {
+ for (int j = i + 1; j < kNumIterationCount; ++j) {
+ ASSERT_TRUE(buffers[i]->mId != buffers[j]->mId) << "allocated buffers are not unique";
+ }
+ }
+ // delete the buffer handles
+ for (auto handle : allocHandle) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ // clear the vectors
+ buffers.clear();
+ allocHandle.clear();
+}
+
+// Buffer recycle test.
+// Check whether de-allocated buffers are recycled.
+TEST_F(BufferpoolUnitTest, RecycleBuffer) {
+ std::vector<uint8_t> vecParams;
+ getTestAllocatorParams(&vecParams);
+
+ ResultStatus status;
+ std::vector<BufferId> bid{};
+ std::vector<native_handle_t*> allocHandle{};
+ for (int i = 0; i < kNumIterationCount; ++i) {
+ native_handle_t* handle = nullptr;
+ std::shared_ptr<BufferPoolData> buffer;
+ status = mManager->allocate(mConnectionId, vecParams, &handle, &buffer);
+ ASSERT_EQ(status, ResultStatus::OK) << "allocate failed for " << i << "iteration";
+
+ bid.push_back(buffer->mId);
+ if (handle) {
+ allocHandle.push_back(std::move(handle));
+ }
+ buffer.reset();
+ }
+
+ std::unordered_set<BufferId> set(bid.begin(), bid.end());
+ ASSERT_EQ(set.size(), 1) << "buffers are not recycled properly";
+
+ // delete the buffer handles
+ for (auto handle : allocHandle) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ allocHandle.clear();
+}
+
+// Validate cache evict and invalidate APIs.
+TEST_F(BufferpoolUnitTest, FlushTest) {
+ std::vector<uint8_t> vecParams;
+ getTestAllocatorParams(&vecParams);
+
+ ResultStatus status = mManager->registerSender(mManager, mConnectionId, &mReceiverId);
+ ASSERT_TRUE(status == ResultStatus::ALREADY_EXISTS && mReceiverId == mConnectionId);
+
+ // testing empty flush
+ status = mManager->flush(mConnectionId);
+ ASSERT_EQ(status, ResultStatus::OK) << "failed to flush connection : " << mConnectionId;
+
+ std::vector<std::shared_ptr<BufferPoolData>> senderBuffer{};
+ std::vector<native_handle_t*> allocHandle{};
+ std::vector<TransactionId> tid{};
+ std::vector<int64_t> timestampUs{};
+
+ std::map<TransactionId, BufferId> bufferMap{};
+
+ for (int i = 0; i < kNumIterationCount; i++) {
+ int64_t postUs;
+ TransactionId transactionId;
+ native_handle_t* handle = nullptr;
+ std::shared_ptr<BufferPoolData> buffer{};
+ status = mManager->allocate(mConnectionId, vecParams, &handle, &buffer);
+ ASSERT_EQ(status, ResultStatus::OK) << "allocate failed for " << i << " iteration";
+
+ ASSERT_TRUE(TestBufferPoolAllocator::Fill(handle, i));
+
+ status = mManager->postSend(mReceiverId, buffer, &transactionId, &postUs);
+ ASSERT_EQ(status, ResultStatus::OK) << "unable to post send transaction on bufferpool";
+
+ timestampUs.push_back(postUs);
+ tid.push_back(transactionId);
+ bufferMap.insert({transactionId, buffer->mId});
+
+ senderBuffer.push_back(std::move(buffer));
+ if (handle) {
+ allocHandle.push_back(std::move(handle));
+ }
+ buffer.reset();
+ }
+
+ status = mManager->flush(mConnectionId);
+ ASSERT_EQ(status, ResultStatus::OK) << "failed to flush connection : " << mConnectionId;
+
+ std::shared_ptr<BufferPoolData> receiverBuffer{};
+ native_handle_t* recvHandle = nullptr;
+ for (int i = 0; i < kNumIterationCount; i++) {
+ status = mManager->receive(mReceiverId, tid[i], senderBuffer[i]->mId, timestampUs[i],
+ &recvHandle, &receiverBuffer);
+ ASSERT_EQ(status, ResultStatus::OK) << "receive failed for buffer " << senderBuffer[i]->mId;
+
+ // find the buffer id from transaction id
+ auto findIt = bufferMap.find(tid[i]);
+ ASSERT_NE(findIt, bufferMap.end()) << "inconsistent buffer mapping";
+
+ // buffer id received must be same as the buffer id sent
+ ASSERT_EQ(findIt->second, receiverBuffer->mId) << "invalid buffer received";
+
+ ASSERT_TRUE(TestBufferPoolAllocator::Verify(recvHandle, i))
+ << "Message received not same as that sent";
+
+ bufferMap.erase(findIt);
+ if (recvHandle) {
+ native_handle_close(recvHandle);
+ native_handle_delete(recvHandle);
+ }
+ recvHandle = nullptr;
+ receiverBuffer.reset();
+ }
+
+ ASSERT_EQ(bufferMap.size(), 0) << "buffers received is less than the number of buffers sent";
+
+ for (auto handle : allocHandle) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ allocHandle.clear();
+ senderBuffer.clear();
+ timestampUs.clear();
+}
+
+// Buffer transfer test between processes.
+TEST_F(BufferpoolFunctionalityTest, TransferBuffer) {
+ // initialize the receiver
+ PipeMessage message;
+ message.data.command = PipeCommand::INIT;
+ sendMessage(mCommandPipeFds, message);
+ ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
+ ASSERT_EQ(message.data.command, PipeCommand::INIT_OK) << "receiver init failed";
+
+ android::sp<IClientManager> receiver = IClientManager::getService();
+ ASSERT_NE(receiver, nullptr) << "getService failed for receiver\n";
+
+ ConnectionId receiverId;
+ ResultStatus status = mManager->registerSender(receiver, mConnectionId, &receiverId);
+ ASSERT_EQ(status, ResultStatus::OK)
+ << "registerSender failed for connection id " << mConnectionId << "\n";
+
+ std::vector<uint8_t> vecParams;
+ getTestAllocatorParams(&vecParams);
+
+ for (int i = 0; i < kNumIterationCount; ++i) {
+ native_handle_t* handle = nullptr;
+ std::shared_ptr<BufferPoolData> buffer;
+ status = mManager->allocate(mConnectionId, vecParams, &handle, &buffer);
+ ASSERT_EQ(status, ResultStatus::OK) << "allocate failed for " << i << "iteration";
+
+ ASSERT_TRUE(TestBufferPoolAllocator::Fill(handle, i))
+ << "Fill fail for buffer handle " << handle << "\n";
+
+ // send the buffer to the receiver
+ int64_t postUs;
+ TransactionId transactionId;
+ status = mManager->postSend(receiverId, buffer, &transactionId, &postUs);
+ ASSERT_EQ(status, ResultStatus::OK)
+ << "postSend failed for receiver " << receiverId << "\n";
+
+ // PipeMessage message;
+ message.data.command = PipeCommand::TRANSFER;
+ message.data.memsetValue = i;
+ message.data.bufferId = buffer->mId;
+ message.data.connectionId = receiverId;
+ message.data.transactionId = transactionId;
+ message.data.timestampUs = postUs;
+ sendMessage(mCommandPipeFds, message);
+ // delete buffer handle
+ if (handle) {
+ native_handle_close(handle);
+ native_handle_delete(handle);
+ }
+ ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
+ ASSERT_EQ(message.data.command, PipeCommand::TRANSFER_OK)
+ << "received error during buffer transfer\n";
+ }
+ message.data.command = PipeCommand::STOP;
+ sendMessage(mCommandPipeFds, message);
+ ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
+ ASSERT_EQ(message.data.command, PipeCommand::STOP_OK)
+ << "received error during buffer transfer\n";
+}
+
+/* Validate bufferpool for following corner cases:
+ 1. invalid connectionID
+ 2. invalid receiver
+ 3. when sender is not registered
+ 4. when connection is closed
+*/
+// TODO: Enable when the issue in b/212196495 is fixed
+TEST_F(BufferpoolFunctionalityTest, DISABLED_ValidityTest) {
+ std::vector<uint8_t> vecParams;
+ getTestAllocatorParams(&vecParams);
+
+ std::shared_ptr<BufferPoolData> senderBuffer;
+ native_handle_t* allocHandle = nullptr;
+
+ // call allocate() on a random connection id
+ ConnectionId randomId = rand();
+ ResultStatus status = mManager->allocate(randomId, vecParams, &allocHandle, &senderBuffer);
+ EXPECT_TRUE(status == ResultStatus::NOT_FOUND);
+
+ // initialize the receiver
+ PipeMessage message;
+ message.data.command = PipeCommand::INIT;
+ sendMessage(mCommandPipeFds, message);
+ ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
+ ASSERT_EQ(message.data.command, PipeCommand::INIT_OK) << "receiver init failed";
+
+ allocHandle = nullptr;
+ senderBuffer.reset();
+ status = mManager->allocate(mConnectionId, vecParams, &allocHandle, &senderBuffer);
+
+ ASSERT_TRUE(TestBufferPoolAllocator::Fill(allocHandle, 0x77));
+
+ // send buffers w/o registering sender
+ int64_t postUs;
+ TransactionId transactionId;
+
+ // random receiver
+ status = mManager->postSend(randomId, senderBuffer, &transactionId, &postUs);
+ ASSERT_NE(status, ResultStatus::OK) << "bufferpool shouldn't allow send on random receiver";
+
+ // establish connection
+ android::sp<IClientManager> receiver = IClientManager::getService();
+ ASSERT_NE(receiver, nullptr) << "getService failed for receiver\n";
+
+ ConnectionId receiverId;
+ status = mManager->registerSender(receiver, mConnectionId, &receiverId);
+ ASSERT_EQ(status, ResultStatus::OK)
+ << "registerSender failed for connection id " << mConnectionId << "\n";
+
+ allocHandle = nullptr;
+ senderBuffer.reset();
+ status = mManager->allocate(mConnectionId, vecParams, &allocHandle, &senderBuffer);
+ ASSERT_EQ(status, ResultStatus::OK) << "allocate failed for connection " << mConnectionId;
+
+ ASSERT_TRUE(TestBufferPoolAllocator::Fill(allocHandle, 0x88));
+
+ // send the buffer to the receiver
+ status = mManager->postSend(receiverId, senderBuffer, &transactionId, &postUs);
+ ASSERT_EQ(status, ResultStatus::OK) << "postSend failed for receiver " << receiverId << "\n";
+
+ // PipeMessage message;
+ message.data.command = PipeCommand::TRANSFER;
+ message.data.memsetValue = 0x88;
+ message.data.bufferId = senderBuffer->mId;
+ message.data.connectionId = receiverId;
+ message.data.transactionId = transactionId;
+ message.data.timestampUs = postUs;
+ sendMessage(mCommandPipeFds, message);
+ ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
+ ASSERT_EQ(message.data.command, PipeCommand::TRANSFER_OK)
+ << "received error during buffer transfer\n";
+
+ if (allocHandle) {
+ native_handle_close(allocHandle);
+ native_handle_delete(allocHandle);
+ }
+
+ message.data.command = PipeCommand::STOP;
+ sendMessage(mCommandPipeFds, message);
+ ASSERT_TRUE(receiveMessage(mResultPipeFds, &message)) << "receiveMessage failed\n";
+ ASSERT_EQ(message.data.command, PipeCommand::STOP_OK)
+ << "received error during buffer transfer\n";
+
+ // try to send msg to closed connection
+ status = mManager->postSend(receiverId, senderBuffer, &transactionId, &postUs);
+ ASSERT_NE(status, ResultStatus::OK) << "bufferpool shouldn't allow send on closed connection";
+}
+
+int main(int argc, char** argv) {
+ android::hardware::details::setTrebleTestingOverride(true);
+ ::testing::InitGoogleTest(&argc, argv);
+ int status = RUN_ALL_TESTS();
+ ALOGV("Test result = %d\n", status);
+ return status;
+}
diff --git a/media/bufferpool/2.0/tests/README.md b/media/bufferpool/2.0/tests/README.md
new file mode 100644
index 0000000..5efd966
--- /dev/null
+++ b/media/bufferpool/2.0/tests/README.md
@@ -0,0 +1,33 @@
+## Media Testing ##
+---
+#### Bufferpool :
+The Bufferpool Test Suite validates bufferpool library in android.
+
+Run the following steps to build the test suite:
+```
+m BufferpoolUnitTest
+```
+
+The 32-bit binaries will be created in the following path : ${OUT}/data/nativetest/
+
+The 64-bit binaries will be created in the following path : ${OUT}/data/nativetest64/
+
+To test 64-bit binary push binaries from nativetest64.
+```
+adb push ${OUT}/data/nativetest64/BufferpoolUnitTest/BufferpoolUnitTest /data/local/tmp/
+```
+
+To test 32-bit binary push binaries from nativetest.
+```
+adb push ${OUT}/data/nativetest/BufferpoolUnitTest/BufferpoolUnitTest /data/local/tmp/
+```
+
+usage: BufferpoolUnitTest
+```
+adb shell /data/local/tmp/BufferpoolUnitTest
+```
+Alternatively, the test can also be run using atest command.
+
+```
+atest BufferpoolUnitTest
+```
diff --git a/media/codec2/TEST_MAPPING b/media/codec2/TEST_MAPPING
index 2b96055..90bb054 100644
--- a/media/codec2/TEST_MAPPING
+++ b/media/codec2/TEST_MAPPING
@@ -8,7 +8,7 @@
],
"presubmit-large": [
{
- "name": "CtsMediaTestCases",
+ "name": "CtsMediaMiscTestCases",
"options": [
{
"include-annotation": "android.platform.test.annotations.Presubmit"
@@ -37,6 +37,17 @@
]
},
{
+ "name": "CtsMediaDecoderTestCases",
+ "options": [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ },
+ {
+ "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+ }
+ ]
+ },
+ {
"name": "CtsMediaEncoderTestCases",
"options": [
{
@@ -48,6 +59,17 @@
]
},
{
+ "name": "CtsMediaCodecTestCases",
+ "options": [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ },
+ {
+ "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+ }
+ ]
+ },
+ {
"name": "CtsMediaPlayerTestCases",
"options": [
{
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
index bb63e1f..7afea91 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.cpp
@@ -225,7 +225,7 @@
work->result = C2_CORRUPTED;
return;
}
- uint64_t outTimeStamp =
+ int64_t outTimeStamp =
mProcessedSamples * 1000000ll / mIntf->getSampleRate();
size_t inPos = 0;
size_t outPos = 0;
@@ -266,7 +266,7 @@
ALOGV("causal sample size %d", mFilledLen);
if (mIsFirst && outPos != 0) {
mIsFirst = false;
- mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
+ mAnchorTimeStamp = work->input.ordinal.timestamp.peekll();
}
fillEmptyWork(work);
if (outPos != 0) {
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.h b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.h
index 6ab14db..4920b23 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.h
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrNbEnc.h
@@ -54,7 +54,7 @@
bool mIsFirst;
bool mSignalledError;
bool mSignalledOutputEos;
- uint64_t mAnchorTimeStamp;
+ int64_t mAnchorTimeStamp;
uint64_t mProcessedSamples;
int32_t mFilledLen;
int16_t mInputFrame[kNumSamplesPerFrame];
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
index 84728ae..29b1040 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.cpp
@@ -307,7 +307,7 @@
work->result = wView.error();
return;
}
- uint64_t outTimeStamp =
+ int64_t outTimeStamp =
mProcessedSamples * 1000000ll / mIntf->getSampleRate();
size_t inPos = 0;
size_t outPos = 0;
@@ -341,7 +341,7 @@
ALOGV("causal sample size %d", mFilledLen);
if (mIsFirst && outPos != 0) {
mIsFirst = false;
- mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
+ mAnchorTimeStamp = work->input.ordinal.timestamp.peekll();
}
fillEmptyWork(work);
if (outPos != 0) {
diff --git a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.h b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.h
index 0cc9e9f..72990c3 100644
--- a/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.h
+++ b/media/codec2/components/amr_nb_wb/C2SoftAmrWbEnc.h
@@ -55,7 +55,7 @@
bool mIsFirst;
bool mSignalledError;
bool mSignalledOutputEos;
- uint64_t mAnchorTimeStamp;
+ int64_t mAnchorTimeStamp;
uint64_t mProcessedSamples;
int32_t mFilledLen;
int16_t mInputFrame[kNumSamplesPerFrame];
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index c08cd59..c7985ca 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -505,124 +505,6 @@
}
}
-static void copyOutputBufferToYuvPlanarFrame(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- uint32_t width, uint32_t height) {
-
- for (size_t i = 0; i < height; ++i) {
- memcpy(dstY, srcY, width);
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstV, srcV, width / 2);
- srcV += srcVStride;
- dstV += dstUVStride;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstU, srcU, width / 2);
- srcU += srcUStride;
- dstU += dstUVStride;
- }
-}
-
-static void convertYUV420Planar16ToY410(uint32_t *dst,
- const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstStride, size_t width, size_t height) {
-
- // Converting two lines at a time, slightly faster
- for (size_t y = 0; y < height; y += 2) {
- uint32_t *dstTop = (uint32_t *) dst;
- uint32_t *dstBot = (uint32_t *) (dst + dstStride);
- uint16_t *ySrcTop = (uint16_t*) srcY;
- uint16_t *ySrcBot = (uint16_t*) (srcY + srcYStride);
- uint16_t *uSrc = (uint16_t*) srcU;
- uint16_t *vSrc = (uint16_t*) srcV;
-
- uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
- size_t x = 0;
- for (; x < width - 3; x += 4) {
-
- u01 = *((uint32_t*)uSrc); uSrc += 2;
- v01 = *((uint32_t*)vSrc); vSrc += 2;
-
- y01 = *((uint32_t*)ySrcTop); ySrcTop += 2;
- y23 = *((uint32_t*)ySrcTop); ySrcTop += 2;
- y45 = *((uint32_t*)ySrcBot); ySrcBot += 2;
- y67 = *((uint32_t*)ySrcBot); ySrcBot += 2;
-
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
-
- *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
- *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
-
- *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
- *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
- }
-
- // There should be at most 2 more pixels to process. Note that we don't
- // need to consider odd case as the buffer is always aligned to even.
- if (x < width) {
- u01 = *uSrc;
- v01 = *vSrc;
- y01 = *((uint32_t*)ySrcTop);
- y45 = *((uint32_t*)ySrcBot);
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = ((y01 >> 16) << 10) | uv0;
- *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = ((y45 >> 16) << 10) | uv0;
- }
-
- srcY += srcYStride * 2;
- srcU += srcUStride;
- srcV += srcVStride;
- dst += dstStride * 2;
- }
-
- return;
-}
-
-static void convertYUV420Planar16ToYUV420Planar(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- size_t width, size_t height) {
-
- for (size_t y = 0; y < height; ++y) {
- for (size_t x = 0; x < width; ++x) {
- dstY[x] = (uint8_t)(srcY[x] >> 2);
- }
-
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- for (size_t y = 0; y < (height + 1) / 2; ++y) {
- for (size_t x = 0; x < (width + 1) / 2; ++x) {
- dstU[x] = (uint8_t)(srcU[x] >> 2);
- dstV[x] = (uint8_t)(srcV[x] >> 2);
- }
-
- srcU += srcUStride;
- srcV += srcVStride;
- dstU += dstUVStride;
- dstV += dstUVStride;
- }
- return;
-}
bool C2SoftAomDec::outputBuffer(
const std::shared_ptr<C2BlockPool> &pool,
const std::unique_ptr<C2Work> &work)
@@ -711,21 +593,16 @@
dstYStride / sizeof(uint32_t),
mWidth, mHeight);
} else {
- convertYUV420Planar16ToYUV420Planar(dstY, dstU, dstV,
- srcY, srcU, srcV,
- srcYStride / 2, srcUStride / 2, srcVStride / 2,
- dstYStride, dstUVStride,
- mWidth, mHeight);
+ convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
+ mWidth, mHeight);
}
} else {
const uint8_t *srcY = (const uint8_t *)img->planes[AOM_PLANE_Y];
const uint8_t *srcU = (const uint8_t *)img->planes[AOM_PLANE_U];
const uint8_t *srcV = (const uint8_t *)img->planes[AOM_PLANE_V];
- copyOutputBufferToYuvPlanarFrame(
- dstY, dstU, dstV, srcY, srcU, srcV,
- srcYStride, srcUStride, srcVStride,
- dstYStride, dstUVStride,
- mWidth, mHeight);
+ convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
+ srcVStride, dstYStride, dstUVStride, mWidth, mHeight);
}
finishWork(*(int64_t*)img->user_priv, work, std::move(block));
block = nullptr;
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index 6c4b7d9..434246f 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -29,7 +29,179 @@
#include <SimpleC2Component.h>
namespace android {
+constexpr uint8_t kNeutralUVBitDepth8 = 128;
+constexpr uint16_t kNeutralUVBitDepth10 = 512;
+void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
+ const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, uint32_t width, uint32_t height,
+ bool isMonochrome) {
+ for (size_t i = 0; i < height; ++i) {
+ memcpy(dstY, srcY, width);
+ srcY += srcYStride;
+ dstY += dstYStride;
+ }
+
+ if (isMonochrome) {
+ // Fill with neutral U/V values.
+ for (size_t i = 0; i < height / 2; ++i) {
+ memset(dstV, kNeutralUVBitDepth8, width / 2);
+ memset(dstU, kNeutralUVBitDepth8, width / 2);
+ dstV += dstUVStride;
+ dstU += dstUVStride;
+ }
+ return;
+ }
+
+ for (size_t i = 0; i < height / 2; ++i) {
+ memcpy(dstV, srcV, width / 2);
+ srcV += srcVStride;
+ dstV += dstUVStride;
+ }
+
+ for (size_t i = 0; i < height / 2; ++i) {
+ memcpy(dstU, srcU, width / 2);
+ srcU += srcUStride;
+ dstU += dstUVStride;
+ }
+}
+
+void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+ const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width, size_t height) {
+ // Converting two lines at a time, slightly faster
+ for (size_t y = 0; y < height; y += 2) {
+ uint32_t *dstTop = (uint32_t *)dst;
+ uint32_t *dstBot = (uint32_t *)(dst + dstStride);
+ uint16_t *ySrcTop = (uint16_t *)srcY;
+ uint16_t *ySrcBot = (uint16_t *)(srcY + srcYStride);
+ uint16_t *uSrc = (uint16_t *)srcU;
+ uint16_t *vSrc = (uint16_t *)srcV;
+
+ uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
+ size_t x = 0;
+ for (; x < width - 3; x += 4) {
+ u01 = *((uint32_t *)uSrc);
+ uSrc += 2;
+ v01 = *((uint32_t *)vSrc);
+ vSrc += 2;
+
+ y01 = *((uint32_t *)ySrcTop);
+ ySrcTop += 2;
+ y23 = *((uint32_t *)ySrcTop);
+ ySrcTop += 2;
+ y45 = *((uint32_t *)ySrcBot);
+ ySrcBot += 2;
+ y67 = *((uint32_t *)ySrcBot);
+ ySrcBot += 2;
+
+ uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+ uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
+
+ *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
+ *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
+ *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
+ *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
+
+ *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
+ *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
+ *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
+ *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
+ }
+
+ // There should be at most 2 more pixels to process. Note that we don't
+ // need to consider odd case as the buffer is always aligned to even.
+ if (x < width) {
+ u01 = *uSrc;
+ v01 = *vSrc;
+ y01 = *((uint32_t *)ySrcTop);
+ y45 = *((uint32_t *)ySrcBot);
+ uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
+ *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
+ *dstTop++ = ((y01 >> 16) << 10) | uv0;
+ *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
+ *dstBot++ = ((y45 >> 16) << 10) | uv0;
+ }
+
+ srcY += srcYStride * 2;
+ srcU += srcUStride;
+ srcV += srcVStride;
+ dst += dstStride * 2;
+ }
+}
+
+void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, size_t width, size_t height,
+ bool isMonochrome) {
+ for (size_t y = 0; y < height; ++y) {
+ for (size_t x = 0; x < width; ++x) {
+ dstY[x] = (uint8_t)(srcY[x] >> 2);
+ }
+ srcY += srcYStride;
+ dstY += dstYStride;
+ }
+
+ if (isMonochrome) {
+ // Fill with neutral U/V values.
+ for (size_t y = 0; y < (height + 1) / 2; ++y) {
+ memset(dstV, kNeutralUVBitDepth8, (width + 1) / 2);
+ memset(dstU, kNeutralUVBitDepth8, (width + 1) / 2);
+ dstV += dstUVStride;
+ dstU += dstUVStride;
+ }
+ return;
+ }
+
+ for (size_t y = 0; y < (height + 1) / 2; ++y) {
+ for (size_t x = 0; x < (width + 1) / 2; ++x) {
+ dstU[x] = (uint8_t)(srcU[x] >> 2);
+ dstV[x] = (uint8_t)(srcV[x] >> 2);
+ }
+ srcU += srcUStride;
+ srcV += srcVStride;
+ dstU += dstUVStride;
+ dstV += dstUVStride;
+ }
+}
+
+void convertYUV420Planar16ToP010(uint16_t *dstY, uint16_t *dstUV, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, size_t width, size_t height,
+ bool isMonochrome) {
+ for (size_t y = 0; y < height; ++y) {
+ for (size_t x = 0; x < width; ++x) {
+ dstY[x] = srcY[x] << 6;
+ }
+ srcY += srcYStride;
+ dstY += dstYStride;
+ }
+
+ if (isMonochrome) {
+ // Fill with neutral U/V values.
+ for (size_t y = 0; y < (height + 1) / 2; ++y) {
+ for (size_t x = 0; x < (width + 1) / 2; ++x) {
+ dstUV[2 * x] = kNeutralUVBitDepth10 << 6;
+ dstUV[2 * x + 1] = kNeutralUVBitDepth10 << 6;
+ }
+ dstUV += dstUVStride;
+ }
+ return;
+ }
+
+ for (size_t y = 0; y < (height + 1) / 2; ++y) {
+ for (size_t x = 0; x < (width + 1) / 2; ++x) {
+ dstUV[2 * x] = srcU[x] << 6;
+ dstUV[2 * x + 1] = srcV[x] << 6;
+ }
+ srcU += srcUStride;
+ srcV += srcVStride;
+ dstUV += dstUVStride;
+ }
+}
std::unique_ptr<C2Work> SimpleC2Component::WorkQueue::pop_front() {
std::unique_ptr<C2Work> work = std::move(mQueue.front().work);
mQueue.pop_front();
@@ -591,6 +763,43 @@
return hasQueuedWork;
}
+int SimpleC2Component::getHalPixelFormatForBitDepth10(bool allowRGBA1010102) {
+ // Save supported hal pixel formats for bit depth of 10, the first time this is called
+ if (!mBitDepth10HalPixelFormats.size()) {
+ std::vector<int> halPixelFormats;
+ // TODO(b/178229371) Enable HAL_PIXEL_FORMAT_YCBCR_P010 once framework supports it
+ // halPixelFormats.push_back(HAL_PIXEL_FORMAT_YCBCR_P010);
+
+ // since allowRGBA1010102 can chance in each call, but mBitDepth10HalPixelFormats
+ // is populated only once, allowRGBA1010102 is not considered at this stage.
+ halPixelFormats.push_back(HAL_PIXEL_FORMAT_RGBA_1010102);
+
+ for (int halPixelFormat : halPixelFormats) {
+ std::shared_ptr<C2GraphicBlock> block;
+
+ uint32_t gpuConsumerFlags = halPixelFormat == HAL_PIXEL_FORMAT_RGBA_1010102
+ ? C2AndroidMemoryUsage::HW_TEXTURE_READ
+ : 0;
+ C2MemoryUsage usage = {C2MemoryUsage::CPU_READ | gpuConsumerFlags,
+ C2MemoryUsage::CPU_WRITE};
+ // TODO(b/214411172) Use AHardwareBuffer_isSupported once it supports P010
+ c2_status_t status =
+ mOutputBlockPool->fetchGraphicBlock(320, 240, halPixelFormat, usage, &block);
+ if (status == C2_OK) {
+ mBitDepth10HalPixelFormats.push_back(halPixelFormat);
+ }
+ }
+ // Add YV12 in the end as a fall-back option
+ mBitDepth10HalPixelFormats.push_back(HAL_PIXEL_FORMAT_YV12);
+ }
+ // When RGBA1010102 is not allowed and if the first supported hal pixel is format is
+ // HAL_PIXEL_FORMAT_RGBA_1010102, then return HAL_PIXEL_FORMAT_YV12
+ if (!allowRGBA1010102 && mBitDepth10HalPixelFormats[0] == HAL_PIXEL_FORMAT_RGBA_1010102) {
+ return HAL_PIXEL_FORMAT_YV12;
+ }
+ // Return the first entry from supported formats
+ return mBitDepth10HalPixelFormats[0];
+}
std::shared_ptr<C2Buffer> SimpleC2Component::createLinearBuffer(
const std::shared_ptr<C2LinearBlock> &block, size_t offset, size_t size) {
return C2Buffer::CreateLinearBuffer(block->share(offset, size, ::C2Fence()));
diff --git a/media/codec2/components/base/include/SimpleC2Component.h b/media/codec2/components/base/include/SimpleC2Component.h
index e5e16d8..d244f45 100644
--- a/media/codec2/components/base/include/SimpleC2Component.h
+++ b/media/codec2/components/base/include/SimpleC2Component.h
@@ -28,6 +28,24 @@
namespace android {
+void convertYUV420Planar8ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint8_t *srcY,
+ const uint8_t *srcU, const uint8_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, uint32_t width, uint32_t height,
+ bool isMonochrome = false);
+void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+ const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width, size_t height);
+void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, size_t width, size_t height,
+ bool isMonochrome = false);
+void convertYUV420Planar16ToP010(uint16_t *dstY, uint16_t *dstUV, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
+ size_t srcUStride, size_t srcVStride, size_t dstYStride,
+ size_t dstUVStride, size_t width, size_t height,
+ bool isMonochrome = false);
class SimpleC2Component
: public C2Component, public std::enable_shared_from_this<SimpleC2Component> {
public:
@@ -149,6 +167,7 @@
static constexpr uint32_t NO_DRAIN = ~0u;
C2ReadView mDummyReadView;
+ int getHalPixelFormatForBitDepth10(bool allowRGBA1010102);
private:
const std::shared_ptr<C2ComponentInterface> mIntf;
@@ -232,6 +251,7 @@
class BlockingBlockPool;
std::shared_ptr<BlockingBlockPool> mOutputBlockPool;
+ std::vector<int> mBitDepth10HalPixelFormats;
SimpleC2Component() = delete;
};
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index 475d863..0f59d76 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -28,11 +28,6 @@
#include <media/stagefright/foundation/MediaDefs.h>
namespace android {
-namespace {
-
-constexpr uint8_t NEUTRAL_UV_VALUE = 128;
-
-} // namespace
// codecname set and passed in as a compile flag from Android.bp
constexpr char COMPONENT_NAME[] = CODECNAME;
@@ -339,7 +334,6 @@
std::make_shared<SimpleInterface<IntfImpl>>(name, id, intfImpl)),
mIntf(intfImpl),
mCodecCtx(nullptr) {
- mIsFormatR10G10B10A2Supported = IsFormatR10G10B10A2SupportedForLegacyRendering();
gettimeofday(&mTimeStart, nullptr);
gettimeofday(&mTimeEnd, nullptr);
}
@@ -545,150 +539,6 @@
}
}
-static void copyOutputBufferToYV12Frame(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- uint32_t width, uint32_t height,
- bool isMonochrome) {
-
- for (size_t i = 0; i < height; ++i) {
- memcpy(dstY, srcY, width);
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- if (isMonochrome) {
- // Fill with neutral U/V values.
- for (size_t i = 0; i < height / 2; ++i) {
- memset(dstV, NEUTRAL_UV_VALUE, width / 2);
- memset(dstU, NEUTRAL_UV_VALUE, width / 2);
- dstV += dstUVStride;
- dstU += dstUVStride;
- }
- return;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstV, srcV, width / 2);
- srcV += srcVStride;
- dstV += dstUVStride;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstU, srcU, width / 2);
- srcU += srcUStride;
- dstU += dstUVStride;
- }
-}
-
-static void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY,
- const uint16_t *srcU,
- const uint16_t *srcV, size_t srcYStride,
- size_t srcUStride, size_t srcVStride,
- size_t dstStride, size_t width,
- size_t height) {
- // Converting two lines at a time, slightly faster
- for (size_t y = 0; y < height; y += 2) {
- uint32_t *dstTop = (uint32_t *)dst;
- uint32_t *dstBot = (uint32_t *)(dst + dstStride);
- uint16_t *ySrcTop = (uint16_t *)srcY;
- uint16_t *ySrcBot = (uint16_t *)(srcY + srcYStride);
- uint16_t *uSrc = (uint16_t *)srcU;
- uint16_t *vSrc = (uint16_t *)srcV;
-
- uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
- size_t x = 0;
- for (; x < width - 3; x += 4) {
- u01 = *((uint32_t *)uSrc);
- uSrc += 2;
- v01 = *((uint32_t *)vSrc);
- vSrc += 2;
-
- y01 = *((uint32_t *)ySrcTop);
- ySrcTop += 2;
- y23 = *((uint32_t *)ySrcTop);
- ySrcTop += 2;
- y45 = *((uint32_t *)ySrcBot);
- ySrcBot += 2;
- y67 = *((uint32_t *)ySrcBot);
- ySrcBot += 2;
-
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
-
- *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
- *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
-
- *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
- *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
- }
-
- // There should be at most 2 more pixels to process. Note that we don't
- // need to consider odd case as the buffer is always aligned to even.
- if (x < width) {
- u01 = *uSrc;
- v01 = *vSrc;
- y01 = *((uint32_t *)ySrcTop);
- y45 = *((uint32_t *)ySrcBot);
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = ((y01 >> 16) << 10) | uv0;
- *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = ((y45 >> 16) << 10) | uv0;
- }
-
- srcY += srcYStride * 2;
- srcU += srcUStride;
- srcV += srcVStride;
- dst += dstStride * 2;
- }
-}
-
-static void convertYUV420Planar16ToYUV420Planar(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- size_t width, size_t height, bool isMonochrome) {
-
- for (size_t y = 0; y < height; ++y) {
- for (size_t x = 0; x < width; ++x) {
- dstY[x] = (uint8_t)(srcY[x] >> 2);
- }
-
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- if (isMonochrome) {
- // Fill with neutral U/V values.
- for (size_t y = 0; y < (height + 1) / 2; ++y) {
- memset(dstV, NEUTRAL_UV_VALUE, (width + 1) / 2);
- memset(dstU, NEUTRAL_UV_VALUE, (width + 1) / 2);
- dstV += dstUVStride;
- dstU += dstUVStride;
- }
- return;
- }
-
- for (size_t y = 0; y < (height + 1) / 2; ++y) {
- for (size_t x = 0; x < (width + 1) / 2; ++x) {
- dstU[x] = (uint8_t)(srcU[x] >> 2);
- dstV[x] = (uint8_t)(srcV[x] >> 2);
- }
-
- srcU += srcUStride;
- srcV += srcVStride;
- dstU += dstUVStride;
- dstV += dstUVStride;
- }
-}
-
void C2SoftGav1Dec::getVuiParams(const libgav1::DecoderBuffer *buffer) {
VuiColorAspects vuiColorAspects;
vuiColorAspects.primaries = buffer->color_primary;
@@ -781,25 +631,20 @@
IntfImpl::Lock lock = mIntf->lock();
std::shared_ptr<C2StreamColorAspectsInfo::output> codedColorAspects =
mIntf->getColorAspects_l();
-
+ bool allowRGBA1010102 = false;
if (codedColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
codedColorAspects->matrix == C2Color::MATRIX_BT2020 &&
codedColorAspects->transfer == C2Color::TRANSFER_ST2084) {
- if (buffer->image_format != libgav1::kImageFormatYuv420) {
+ allowRGBA1010102 = true;
+ }
+ format = getHalPixelFormatForBitDepth10(allowRGBA1010102);
+ if ((format == HAL_PIXEL_FORMAT_RGBA_1010102) &&
+ (buffer->image_format != libgav1::kImageFormatYuv420)) {
ALOGE("Only YUV420 output is supported when targeting RGBA_1010102");
- mSignalledError = true;
- work->result = C2_OMITTED;
- work->workletsProcessed = 1u;
- return false;
- }
- // TODO (b/201787956) For devices that do not support HAL_PIXEL_FORMAT_RGBA_1010102,
- // HAL_PIXEL_FORMAT_YV12 is used as a temporary work around.
- if (!mIsFormatR10G10B10A2Supported) {
- ALOGE("HAL_PIXEL_FORMAT_RGBA_1010102 isn't supported");
- format = HAL_PIXEL_FORMAT_YV12;
- } else {
- format = HAL_PIXEL_FORMAT_RGBA_1010102;
- }
+ mSignalledError = true;
+ work->result = C2_OMITTED;
+ work->workletsProcessed = 1u;
+ return false;
}
}
C2MemoryUsage usage = {C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE};
@@ -841,22 +686,24 @@
const uint16_t *srcV = (const uint16_t *)buffer->plane[2];
if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
- convertYUV420Planar16ToY410(
- (uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
- srcVStride / 2, dstYStride / sizeof(uint32_t), mWidth, mHeight);
+ convertYUV420Planar16ToY410((uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2, dstYStride / sizeof(uint32_t),
+ mWidth, mHeight);
+ } else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
+ convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
+ srcYStride / 2, srcUStride / 2, srcVStride / 2, dstYStride / 2,
+ dstUVStride / 2, mWidth, mHeight, isMonochrome);
} else {
- convertYUV420Planar16ToYUV420Planar(
- dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
- srcVStride / 2, dstYStride, dstUVStride, mWidth, mHeight,
- isMonochrome);
+ convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride, mWidth,
+ mHeight, isMonochrome);
}
} else {
const uint8_t *srcY = (const uint8_t *)buffer->plane[0];
const uint8_t *srcU = (const uint8_t *)buffer->plane[1];
const uint8_t *srcV = (const uint8_t *)buffer->plane[2];
- copyOutputBufferToYV12Frame(
- dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride,
- dstYStride, dstUVStride, mWidth, mHeight, isMonochrome);
+ convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
+ srcVStride, dstYStride, dstUVStride, mWidth, mHeight, isMonochrome);
}
finishWork(buffer->user_private_data, work, std::move(block));
block = nullptr;
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.h b/media/codec2/components/gav1/C2SoftGav1Dec.h
index f82992d..134fa0d 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.h
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.h
@@ -82,7 +82,6 @@
struct timeval mTimeStart; // Time at the start of decode()
struct timeval mTimeEnd; // Time at the end of decode()
- bool mIsFormatR10G10B10A2Supported;
bool initDecoder();
void getVuiParams(const libgav1::DecoderBuffer *buffer);
diff --git a/media/codec2/components/hevc/C2SoftHevcEnc.cpp b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
index b7a5686..4f5caec 100644
--- a/media/codec2/components/hevc/C2SoftHevcEnc.cpp
+++ b/media/codec2/components/hevc/C2SoftHevcEnc.cpp
@@ -123,7 +123,7 @@
// matches size limits in codec library
addParameter(
DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
- .withDefault(new C2StreamPictureSizeInfo::input(0u, 320, 240))
+ .withDefault(new C2StreamPictureSizeInfo::input(0u, 64, 64))
.withFields({
C2F(mSize, width).inRange(2, 1920, 2),
C2F(mSize, height).inRange(2, 1088, 2),
@@ -133,7 +133,7 @@
addParameter(
DefineParam(mFrameRate, C2_PARAMKEY_FRAME_RATE)
- .withDefault(new C2StreamFrameRateInfo::output(0u, 30.))
+ .withDefault(new C2StreamFrameRateInfo::output(0u, 1.))
.withFields({C2F(mFrameRate, value).greaterThan(0.)})
.withSetter(
Setter<decltype(*mFrameRate)>::StrictValueWithNoDeps)
diff --git a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
index 81f4679..54a1d0e 100644
--- a/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
+++ b/media/codec2/components/mpeg4_h263/C2SoftMpeg4Dec.cpp
@@ -419,40 +419,6 @@
return resChanged;
}
-/* TODO: can remove temporary copy after library supports writing to display
- * buffer Y, U and V plane pointers using stride info. */
-static void copyOutputBufferToYuvPlanarFrame(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, uint8_t *src,
- size_t dstYStride, size_t dstUVStride,
- size_t srcYStride, uint32_t width,
- uint32_t height) {
- size_t srcUVStride = srcYStride / 2;
- uint8_t *srcStart = src;
-
- size_t vStride = align(height, 16);
- for (size_t i = 0; i < height; ++i) {
- memcpy(dstY, src, width);
- src += srcYStride;
- dstY += dstYStride;
- }
-
- /* U buffer */
- src = srcStart + vStride * srcYStride;
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstU, src, width / 2);
- src += srcUVStride;
- dstU += dstUVStride;
- }
-
- /* V buffer */
- src = srcStart + vStride * srcYStride * 5 / 4;
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstV, src, width / 2);
- src += srcUVStride;
- dstV += dstUVStride;
- }
-}
-
void C2SoftMpeg4Dec::process(
const std::unique_ptr<C2Work> &work,
const std::shared_ptr<C2BlockPool> &pool) {
@@ -636,11 +602,17 @@
C2PlanarLayout layout = wView.layout();
size_t dstYStride = layout.planes[C2PlanarLayout::PLANE_Y].rowInc;
size_t dstUVStride = layout.planes[C2PlanarLayout::PLANE_U].rowInc;
- (void)copyOutputBufferToYuvPlanarFrame(
- outputBufferY, outputBufferU, outputBufferV,
- mOutputBuffer[mNumSamplesOutput & 1],
- dstYStride, dstUVStride,
- align(mWidth, 16), mWidth, mHeight);
+ size_t srcYStride = align(mWidth, 16);
+ size_t srcUStride = srcYStride / 2;
+ size_t srcVStride = srcYStride / 2;
+ size_t vStride = align(mHeight, 16);
+ const uint8_t *srcY = (const uint8_t *)mOutputBuffer[mNumSamplesOutput & 1];
+ const uint8_t *srcU = (const uint8_t *)srcY + vStride * srcYStride;
+ const uint8_t *srcV = (const uint8_t *)srcY + vStride * srcYStride * 5 / 4;
+
+ convertYUV420Planar8ToYV12(outputBufferY, outputBufferU, outputBufferV, srcY, srcU, srcV,
+ srcYStride, srcUStride, srcVStride, dstYStride, dstUVStride,
+ mWidth, mHeight);
inPos += inSize - (size_t)tmpInSize;
finishWork(workIndex, work);
diff --git a/media/codec2/components/opus/C2SoftOpusEnc.cpp b/media/codec2/components/opus/C2SoftOpusEnc.cpp
index 370d33c..cdc3be0 100644
--- a/media/codec2/components/opus/C2SoftOpusEnc.cpp
+++ b/media/codec2/components/opus/C2SoftOpusEnc.cpp
@@ -245,7 +245,7 @@
mIsFirstFrame = true;
mEncoderFlushed = false;
mBufferAvailable = false;
- mAnchorTimeStamp = 0ull;
+ mAnchorTimeStamp = 0;
mProcessedSamples = 0;
mFilledLen = 0;
mFrameDurationMs = DEFAULT_FRAME_DURATION_MS;
@@ -266,7 +266,7 @@
mIsFirstFrame = true;
mEncoderFlushed = false;
mBufferAvailable = false;
- mAnchorTimeStamp = 0ull;
+ mAnchorTimeStamp = 0;
mProcessedSamples = 0u;
mFilledLen = 0;
if (mEncoder) {
@@ -363,7 +363,7 @@
}
}
if (mIsFirstFrame && inSize > 0) {
- mAnchorTimeStamp = work->input.ordinal.timestamp.peekull();
+ mAnchorTimeStamp = work->input.ordinal.timestamp.peekll();
mIsFirstFrame = false;
}
@@ -386,7 +386,7 @@
size_t inPos = 0;
size_t processSize = 0;
mBytesEncoded = 0;
- uint64_t outTimeStamp = 0u;
+ int64_t outTimeStamp = 0;
std::shared_ptr<C2Buffer> buffer;
uint64_t inputIndex = work->input.ordinal.frameIndex.peeku();
const uint8_t* inPtr = rView.data() + inOffset;
@@ -584,7 +584,7 @@
mOutputBlock.reset();
}
mProcessedSamples += (mNumPcmBytesPerInputFrame / sizeof(int16_t));
- uint64_t outTimeStamp =
+ int64_t outTimeStamp =
mProcessedSamples * 1000000ll / mChannelCount / mSampleRate;
outOrdinal.frameIndex = mOutIndex++;
outOrdinal.timestamp = mAnchorTimeStamp + outTimeStamp;
@@ -612,7 +612,7 @@
return C2_OMITTED;
}
mIsFirstFrame = true;
- mAnchorTimeStamp = 0ull;
+ mAnchorTimeStamp = 0;
mProcessedSamples = 0u;
return drainInternal(pool, nullptr);
}
diff --git a/media/codec2/components/opus/C2SoftOpusEnc.h b/media/codec2/components/opus/C2SoftOpusEnc.h
index 2b4d8f2..733a6bc 100644
--- a/media/codec2/components/opus/C2SoftOpusEnc.h
+++ b/media/codec2/components/opus/C2SoftOpusEnc.h
@@ -67,7 +67,7 @@
uint32_t mSampleRate;
uint32_t mChannelCount;
uint32_t mFrameDurationMs;
- uint64_t mAnchorTimeStamp;
+ int64_t mAnchorTimeStamp;
uint64_t mProcessedSamples;
// Codec delay in ns
uint64_t mCodecDelay;
diff --git a/media/codec2/components/tests/Android.bp b/media/codec2/components/tests/Android.bp
index 3c68eee..be2abf2 100644
--- a/media/codec2/components/tests/Android.bp
+++ b/media/codec2/components/tests/Android.bp
@@ -9,44 +9,13 @@
cc_defaults {
name: "C2SoftCodecTest-defaults",
+ defaults: [ "libcodec2-static-defaults" ],
gtest: true,
host_supported: false,
srcs: [
"C2SoftCodecTest.cpp",
],
- static_libs: [
- "liblog",
- "libion",
- "libfmq",
- "libbase",
- "libutils",
- "libcutils",
- "libcodec2",
- "libhidlbase",
- "libdmabufheap",
- "libcodec2_vndk",
- "libnativewindow",
- "libcodec2_soft_common",
- "libsfplugin_ccodec_utils",
- "libstagefright_foundation",
- "libstagefright_bufferpool@2.0.1",
- "android.hardware.graphics.mapper@2.0",
- "android.hardware.graphics.mapper@3.0",
- "android.hardware.media.bufferpool@2.0",
- "android.hardware.graphics.allocator@2.0",
- "android.hardware.graphics.allocator@3.0",
- "android.hardware.graphics.bufferqueue@2.0",
- ],
-
- shared_libs: [
- "libui",
- "libdl",
- "libhardware",
- "libvndksupport",
- "libprocessgroup",
- ],
-
cflags: [
"-Wall",
"-Werror",
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index 2da9d5b..5fc89be 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -352,7 +352,6 @@
mCodecCtx(nullptr),
mCoreCount(1),
mQueue(new Mutexed<ConversionQueue>) {
- mIsFormatR10G10B10A2Supported = IsFormatR10G10B10A2SupportedForLegacyRendering();
}
C2SoftVpxDec::~C2SoftVpxDec() {
@@ -640,125 +639,6 @@
}
}
-static void copyOutputBufferToYuvPlanarFrame(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint8_t *srcY, const uint8_t *srcU, const uint8_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- uint32_t width, uint32_t height) {
-
- for (size_t i = 0; i < height; ++i) {
- memcpy(dstY, srcY, width);
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstV, srcV, width / 2);
- srcV += srcVStride;
- dstV += dstUVStride;
- }
-
- for (size_t i = 0; i < height / 2; ++i) {
- memcpy(dstU, srcU, width / 2);
- srcU += srcUStride;
- dstU += dstUVStride;
- }
-
-}
-
-static void convertYUV420Planar16ToY410(uint32_t *dst,
- const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstStride, size_t width, size_t height) {
-
- // Converting two lines at a time, slightly faster
- for (size_t y = 0; y < height; y += 2) {
- uint32_t *dstTop = (uint32_t *) dst;
- uint32_t *dstBot = (uint32_t *) (dst + dstStride);
- uint16_t *ySrcTop = (uint16_t*) srcY;
- uint16_t *ySrcBot = (uint16_t*) (srcY + srcYStride);
- uint16_t *uSrc = (uint16_t*) srcU;
- uint16_t *vSrc = (uint16_t*) srcV;
-
- uint32_t u01, v01, y01, y23, y45, y67, uv0, uv1;
- size_t x = 0;
- for (; x < width - 3; x += 4) {
-
- u01 = *((uint32_t*)uSrc); uSrc += 2;
- v01 = *((uint32_t*)vSrc); vSrc += 2;
-
- y01 = *((uint32_t*)ySrcTop); ySrcTop += 2;
- y23 = *((uint32_t*)ySrcTop); ySrcTop += 2;
- y45 = *((uint32_t*)ySrcBot); ySrcBot += 2;
- y67 = *((uint32_t*)ySrcBot); ySrcBot += 2;
-
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- uv1 = (u01 >> 16) | ((v01 >> 16) << 20);
-
- *dstTop++ = 3 << 30 | ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y01 >> 16) << 10) | uv0;
- *dstTop++ = 3 << 30 | ((y23 & 0x3FF) << 10) | uv1;
- *dstTop++ = 3 << 30 | ((y23 >> 16) << 10) | uv1;
-
- *dstBot++ = 3 << 30 | ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y45 >> 16) << 10) | uv0;
- *dstBot++ = 3 << 30 | ((y67 & 0x3FF) << 10) | uv1;
- *dstBot++ = 3 << 30 | ((y67 >> 16) << 10) | uv1;
- }
-
- // There should be at most 2 more pixels to process. Note that we don't
- // need to consider odd case as the buffer is always aligned to even.
- if (x < width) {
- u01 = *uSrc;
- v01 = *vSrc;
- y01 = *((uint32_t*)ySrcTop);
- y45 = *((uint32_t*)ySrcBot);
- uv0 = (u01 & 0x3FF) | ((v01 & 0x3FF) << 20);
- *dstTop++ = ((y01 & 0x3FF) << 10) | uv0;
- *dstTop++ = ((y01 >> 16) << 10) | uv0;
- *dstBot++ = ((y45 & 0x3FF) << 10) | uv0;
- *dstBot++ = ((y45 >> 16) << 10) | uv0;
- }
-
- srcY += srcYStride * 2;
- srcU += srcUStride;
- srcV += srcVStride;
- dst += dstStride * 2;
- }
-
- return;
-}
-
-static void convertYUV420Planar16ToYUV420Planar(
- uint8_t *dstY, uint8_t *dstU, uint8_t *dstV,
- const uint16_t *srcY, const uint16_t *srcU, const uint16_t *srcV,
- size_t srcYStride, size_t srcUStride, size_t srcVStride,
- size_t dstYStride, size_t dstUVStride,
- size_t width, size_t height) {
-
- for (size_t y = 0; y < height; ++y) {
- for (size_t x = 0; x < width; ++x) {
- dstY[x] = (uint8_t)(srcY[x] >> 2);
- }
-
- srcY += srcYStride;
- dstY += dstYStride;
- }
-
- for (size_t y = 0; y < (height + 1) / 2; ++y) {
- for (size_t x = 0; x < (width + 1) / 2; ++x) {
- dstU[x] = (uint8_t)(srcU[x] >> 2);
- dstV[x] = (uint8_t)(srcV[x] >> 2);
- }
-
- srcU += srcUStride;
- srcV += srcVStride;
- dstU += dstUVStride;
- dstV += dstUVStride;
- }
- return;
-}
status_t C2SoftVpxDec::outputBuffer(
const std::shared_ptr<C2BlockPool> &pool,
const std::unique_ptr<C2Work> &work)
@@ -802,19 +682,13 @@
if (img->fmt == VPX_IMG_FMT_I42016) {
IntfImpl::Lock lock = mIntf->lock();
std::shared_ptr<C2StreamColorAspectsTuning::output> defaultColorAspects = mIntf->getDefaultColorAspects_l();
-
+ bool allowRGBA1010102 = false;
if (defaultColorAspects->primaries == C2Color::PRIMARIES_BT2020 &&
defaultColorAspects->matrix == C2Color::MATRIX_BT2020 &&
defaultColorAspects->transfer == C2Color::TRANSFER_ST2084) {
- // TODO (b/201787956) For devices that do not support HAL_PIXEL_FORMAT_RGBA_1010102,
- // HAL_PIXEL_FORMAT_YV12 is used as a temporary work around.
- if (!mIsFormatR10G10B10A2Supported) {
- ALOGE("HAL_PIXEL_FORMAT_RGBA_1010102 isn't supported");
- format = HAL_PIXEL_FORMAT_YV12;
- } else {
- format = HAL_PIXEL_FORMAT_RGBA_1010102;
- }
+ allowRGBA1010102 = true;
}
+ format = getHalPixelFormatForBitDepth10(allowRGBA1010102);
}
C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16), mHeight, format, usage, &block);
@@ -876,24 +750,22 @@
queue->cond.signal();
queue.waitForCondition(queue->cond);
}
+ } else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
+ convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
+ srcYStride / 2, srcUStride / 2, srcVStride / 2,
+ dstYStride / 2, dstUVStride / 2, mWidth, mHeight);
} else {
- convertYUV420Planar16ToYUV420Planar(dstY, dstU, dstV,
- srcY, srcU, srcV,
- srcYStride / 2, srcUStride / 2, srcVStride / 2,
- dstYStride, dstUVStride,
- mWidth, mHeight);
+ convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
+ mWidth, mHeight);
}
} else {
const uint8_t *srcY = (const uint8_t *)img->planes[VPX_PLANE_Y];
const uint8_t *srcU = (const uint8_t *)img->planes[VPX_PLANE_U];
const uint8_t *srcV = (const uint8_t *)img->planes[VPX_PLANE_V];
- copyOutputBufferToYuvPlanarFrame(
- dstY, dstU, dstV,
- srcY, srcU, srcV,
- srcYStride, srcUStride, srcVStride,
- dstYStride, dstUVStride,
- mWidth, mHeight);
+ convertYUV420Planar8ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride, srcUStride,
+ srcVStride, dstYStride, dstUVStride, mWidth, mHeight);
}
finishWork(((c2_cntr64_t *)img->user_priv)->peekull(), work, std::move(block));
return OK;
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.h b/media/codec2/components/vpx/C2SoftVpxDec.h
index ade162d..2065165 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.h
+++ b/media/codec2/components/vpx/C2SoftVpxDec.h
@@ -80,7 +80,7 @@
};
std::shared_ptr<Mutexed<ConversionQueue>> mQueue;
std::vector<sp<ConverterThread>> mConverterThreads;
- bool mIsFormatR10G10B10A2Supported;
+
status_t initDecoder();
status_t destroyDecoder();
void finishWork(uint64_t index, const std::unique_ptr<C2Work> &work,
diff --git a/media/codec2/core/include/C2Config.h b/media/codec2/core/include/C2Config.h
index feaa98c..70e742c 100644
--- a/media/codec2/core/include/C2Config.h
+++ b/media/codec2/core/include/C2Config.h
@@ -59,6 +59,7 @@
enum drc_compression_mode_t : int32_t; ///< DRC compression mode
enum drc_effect_type_t : int32_t; ///< DRC effect type
enum drc_album_mode_t : int32_t; ///< DRC album mode
+ enum hdr_dynamic_metadata_type_t : uint32_t; ///< HDR dynamic metadata type
enum intra_refresh_mode_t : uint32_t; ///< intra refresh modes
enum level_t : uint32_t; ///< coding level
enum ordinal_key_t : uint32_t; ///< work ordering keys
@@ -189,10 +190,13 @@
kParamIndexPictureTypeMask,
kParamIndexPictureType,
+ // deprecated
kParamIndexHdr10PlusMetadata,
kParamIndexPictureQuantization,
+ kParamIndexHdrDynamicMetadata,
+
/* ------------------------------------ video components ------------------------------------ */
kParamIndexFrameRate = C2_PARAM_INDEX_VIDEO_PARAM_START,
@@ -270,6 +274,9 @@
// encoding quality requirements
kParamIndexEncodingQualityLevel, // encoders, enum
+
+ // encoding statistics, average block qp of a frame
+ kParamIndexAverageBlockQuantization, // int32
};
}
@@ -680,6 +687,9 @@
LEVEL_DV_MAIN_UHD_30, ///< Dolby Vision main tier uhd30
LEVEL_DV_MAIN_UHD_48, ///< Dolby Vision main tier uhd48
LEVEL_DV_MAIN_UHD_60, ///< Dolby Vision main tier uhd60
+ LEVEL_DV_MAIN_UHD_120, ///< Dolby Vision main tier uhd120
+ LEVEL_DV_MAIN_8K_30, ///< Dolby Vision main tier 8k30
+ LEVEL_DV_MAIN_8K_60, ///< Dolby Vision main tier 8k60
LEVEL_DV_HIGH_HD_24 = _C2_PL_DV_BASE + 0x100, ///< Dolby Vision high tier hd24
LEVEL_DV_HIGH_HD_30, ///< Dolby Vision high tier hd30
@@ -690,6 +700,9 @@
LEVEL_DV_HIGH_UHD_30, ///< Dolby Vision high tier uhd30
LEVEL_DV_HIGH_UHD_48, ///< Dolby Vision high tier uhd48
LEVEL_DV_HIGH_UHD_60, ///< Dolby Vision high tier uhd60
+ LEVEL_DV_HIGH_UHD_120, ///< Dolby Vision high tier uhd120
+ LEVEL_DV_HIGH_8K_30, ///< Dolby Vision high tier 8k30
+ LEVEL_DV_HIGH_8K_60, ///< Dolby Vision high tier 8k60
// AV1 levels
LEVEL_AV1_2 = _C2_PL_AV1_BASE , ///< AV1 Level 2
@@ -1602,16 +1615,54 @@
C2FIELD(maxFall, "max-fall")
};
typedef C2StreamParam<C2Info, C2HdrStaticMetadataStruct, kParamIndexHdrStaticMetadata>
- C2StreamHdrStaticInfo;
+ C2StreamHdrStaticMetadataInfo;
+typedef C2StreamParam<C2Info, C2HdrStaticMetadataStruct, kParamIndexHdrStaticMetadata>
+ C2StreamHdrStaticInfo; // deprecated
constexpr char C2_PARAMKEY_HDR_STATIC_INFO[] = "raw.hdr-static-info";
/**
* HDR10+ Metadata Info.
+ *
+ * Deprecated. Use C2StreamHdrDynamicMetadataInfo with
+ * HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40
*/
typedef C2StreamParam<C2Info, C2BlobValue, kParamIndexHdr10PlusMetadata>
- C2StreamHdr10PlusInfo;
-constexpr char C2_PARAMKEY_INPUT_HDR10_PLUS_INFO[] = "input.hdr10-plus-info";
-constexpr char C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO[] = "output.hdr10-plus-info";
+ C2StreamHdr10PlusInfo; // deprecated
+constexpr char C2_PARAMKEY_INPUT_HDR10_PLUS_INFO[] = "input.hdr10-plus-info"; // deprecated
+constexpr char C2_PARAMKEY_OUTPUT_HDR10_PLUS_INFO[] = "output.hdr10-plus-info"; // deprecated
+
+/**
+ * HDR dynamic metadata types
+ */
+C2ENUM(C2Config::hdr_dynamic_metadata_type_t, uint32_t,
+ HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_10, ///< SMPTE ST 2094-10
+ HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40, ///< SMPTE ST 2094-40
+)
+
+struct C2HdrDynamicMetadataStruct {
+ inline C2HdrDynamicMetadataStruct() { memset(this, 0, sizeof(*this)); }
+
+ inline C2HdrDynamicMetadataStruct(
+ size_t flexCount, C2Config::hdr_dynamic_metadata_type_t type)
+ : type_(type) {
+ memset(data, 0, flexCount);
+ }
+
+ C2Config::hdr_dynamic_metadata_type_t type_;
+ uint8_t data[];
+
+ DEFINE_AND_DESCRIBE_FLEX_C2STRUCT(HdrDynamicMetadata, data)
+ C2FIELD(type_, "type")
+ C2FIELD(data, "data")
+};
+
+/**
+ * Dynamic HDR Metadata Info.
+ */
+typedef C2StreamParam<C2Info, C2HdrDynamicMetadataStruct, kParamIndexHdrDynamicMetadata>
+ C2StreamHdrDynamicMetadataInfo;
+constexpr char C2_PARAMKEY_INPUT_HDR_DYNAMIC_INFO[] = "input.hdr-dynamic-info";
+constexpr char C2_PARAMKEY_OUTPUT_HDR_DYNAMIC_INFO[] = "output.hdr-dynamic-info";
/* ------------------------------------ block-based coding ----------------------------------- */
@@ -2411,6 +2462,17 @@
S_HANDHELD = 1 // corresponds to VMAF=70
);
+/**
+ * Video Encoding Statistics Export
+ */
+
+/**
+ * Average block QP exported from video encoder.
+ */
+typedef C2StreamParam<C2Info, C2SimpleValueStruct<int32_t>, kParamIndexAverageBlockQuantization>
+ C2AndroidStreamAverageBlockQuantizationInfo;
+constexpr char C2_PARAMKEY_AVERAGE_QP[] = "coded.average-qp";
+
/// @}
#endif // C2CONFIG_H_
diff --git a/media/codec2/fuzzer/Android.bp b/media/codec2/fuzzer/Android.bp
index bd1fac6..3adc212 100644
--- a/media/codec2/fuzzer/Android.bp
+++ b/media/codec2/fuzzer/Android.bp
@@ -28,43 +28,12 @@
cc_defaults {
name: "C2Fuzzer-defaults",
+ defaults: [ "libcodec2-static-defaults" ],
+
srcs: [
"C2Fuzzer.cpp",
],
- static_libs: [
- "liblog",
- "libion",
- "libfmq",
- "libbase",
- "libutils",
- "libcutils",
- "libcodec2",
- "libhidlbase",
- "libdmabufheap",
- "libcodec2_vndk",
- "libnativewindow",
- "libcodec2_soft_common",
- "libsfplugin_ccodec_utils",
- "libstagefright_foundation",
- "libstagefright_bufferpool@2.0.1",
- "android.hardware.graphics.mapper@2.0",
- "android.hardware.graphics.mapper@3.0",
- "android.hardware.media.bufferpool@2.0",
- "android.hardware.graphics.allocator@2.0",
- "android.hardware.graphics.allocator@3.0",
- "android.hardware.graphics.bufferqueue@2.0",
- ],
-
- shared_libs: [
- "libui",
- "libdl",
- "libbinder",
- "libhardware",
- "libvndksupport",
- "libprocessgroup",
- ],
-
cflags: [
"-Wall",
"-Werror",
diff --git a/media/codec2/hidl/1.0/utils/Component.cpp b/media/codec2/hidl/1.0/utils/Component.cpp
index 082c5e3..df30dba 100644
--- a/media/codec2/hidl/1.0/utils/Component.cpp
+++ b/media/codec2/hidl/1.0/utils/Component.cpp
@@ -482,6 +482,37 @@
if (res != C2_OK) {
mInit = res;
}
+
+ struct ListenerDeathRecipient : public HwDeathRecipient {
+ ListenerDeathRecipient(const wp<Component>& comp)
+ : mComponent{comp} {
+ }
+
+ virtual void serviceDied(
+ uint64_t /* cookie */,
+ const wp<::android::hidl::base::V1_0::IBase>& /* who */
+ ) override {
+ auto strongComponent = mComponent.promote();
+ if (strongComponent) {
+ LOG(INFO) << "Client died ! release the component !!";
+ strongComponent->release();
+ } else {
+ LOG(ERROR) << "Client died ! no component to release !!";
+ }
+ }
+
+ wp<Component> mComponent;
+ };
+
+ mDeathRecipient = new ListenerDeathRecipient(self);
+ Return<bool> transStatus = mListener->linkToDeath(
+ mDeathRecipient, 0);
+ if (!transStatus.isOk()) {
+ LOG(ERROR) << "Listener linkToDeath() transaction failed.";
+ }
+ if (!static_cast<bool>(transStatus)) {
+ LOG(DEBUG) << "Listener linkToDeath() call failed.";
+ }
}
Component::~Component() {
diff --git a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
index 86dccd0..e343655 100644
--- a/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
+++ b/media/codec2/hidl/1.0/utils/include/codec2/hidl/1.0/Component.h
@@ -132,6 +132,9 @@
friend struct ComponentStore;
struct Listener;
+
+ using HwDeathRecipient = ::android::hardware::hidl_death_recipient;
+ sp<HwDeathRecipient> mDeathRecipient;
};
} // namespace utils
diff --git a/media/codec2/hidl/1.1/utils/Component.cpp b/media/codec2/hidl/1.1/utils/Component.cpp
index 1d7d3d8..2dd922f 100644
--- a/media/codec2/hidl/1.1/utils/Component.cpp
+++ b/media/codec2/hidl/1.1/utils/Component.cpp
@@ -489,6 +489,37 @@
if (res != C2_OK) {
mInit = res;
}
+
+ struct ListenerDeathRecipient : public HwDeathRecipient {
+ ListenerDeathRecipient(const wp<Component>& comp)
+ : component{comp} {
+ }
+
+ virtual void serviceDied(
+ uint64_t /* cookie */,
+ const wp<::android::hidl::base::V1_0::IBase>& /* who */
+ ) override {
+ auto strongComponent = component.promote();
+ if (strongComponent) {
+ LOG(INFO) << "Client died ! release the component !!";
+ strongComponent->release();
+ } else {
+ LOG(ERROR) << "Client died ! no component to release !!";
+ }
+ }
+
+ wp<Component> component;
+ };
+
+ mDeathRecipient = new ListenerDeathRecipient(self);
+ Return<bool> transStatus = mListener->linkToDeath(
+ mDeathRecipient, 0);
+ if (!transStatus.isOk()) {
+ LOG(ERROR) << "Listener linkToDeath() transaction failed.";
+ }
+ if (!static_cast<bool>(transStatus)) {
+ LOG(DEBUG) << "Listener linkToDeath() call failed.";
+ }
}
Component::~Component() {
diff --git a/media/codec2/hidl/1.1/utils/include/codec2/hidl/1.1/Component.h b/media/codec2/hidl/1.1/utils/include/codec2/hidl/1.1/Component.h
index 16c81d4..1c8c20c 100644
--- a/media/codec2/hidl/1.1/utils/include/codec2/hidl/1.1/Component.h
+++ b/media/codec2/hidl/1.1/utils/include/codec2/hidl/1.1/Component.h
@@ -137,6 +137,9 @@
friend struct ComponentStore;
struct Listener;
+
+ using HwDeathRecipient = ::android::hardware::hidl_death_recipient;
+ sp<HwDeathRecipient> mDeathRecipient;
};
} // namespace utils
diff --git a/media/codec2/hidl/1.2/utils/Component.cpp b/media/codec2/hidl/1.2/utils/Component.cpp
index 8924e6d..7994d32 100644
--- a/media/codec2/hidl/1.2/utils/Component.cpp
+++ b/media/codec2/hidl/1.2/utils/Component.cpp
@@ -520,6 +520,37 @@
if (res != C2_OK) {
mInit = res;
}
+
+ struct ListenerDeathRecipient : public HwDeathRecipient {
+ ListenerDeathRecipient(const wp<Component>& comp)
+ : component{comp} {
+ }
+
+ virtual void serviceDied(
+ uint64_t /* cookie */,
+ const wp<::android::hidl::base::V1_0::IBase>& /* who */
+ ) override {
+ auto strongComponent = component.promote();
+ if (strongComponent) {
+ LOG(INFO) << "Client died ! release the component !!";
+ strongComponent->release();
+ } else {
+ LOG(ERROR) << "Client died ! no component to release !!";
+ }
+ }
+
+ wp<Component> component;
+ };
+
+ mDeathRecipient = new ListenerDeathRecipient(self);
+ Return<bool> transStatus = mListener->linkToDeath(
+ mDeathRecipient, 0);
+ if (!transStatus.isOk()) {
+ LOG(ERROR) << "Listener linkToDeath() transaction failed.";
+ }
+ if (!static_cast<bool>(transStatus)) {
+ LOG(DEBUG) << "Listener linkToDeath() call failed.";
+ }
}
Component::~Component() {
diff --git a/media/codec2/hidl/1.2/utils/include/codec2/hidl/1.2/Component.h b/media/codec2/hidl/1.2/utils/include/codec2/hidl/1.2/Component.h
index 7937664..d0972ee 100644
--- a/media/codec2/hidl/1.2/utils/include/codec2/hidl/1.2/Component.h
+++ b/media/codec2/hidl/1.2/utils/include/codec2/hidl/1.2/Component.h
@@ -142,6 +142,10 @@
friend struct ComponentStore;
struct Listener;
+
+ using HwDeathRecipient = ::android::hardware::hidl_death_recipient;
+ sp<HwDeathRecipient> mDeathRecipient;
+
};
} // namespace utils
diff --git a/media/codec2/hidl/plugin/FilterWrapper.cpp b/media/codec2/hidl/plugin/FilterWrapper.cpp
index b6024ff..d5124fd 100644
--- a/media/codec2/hidl/plugin/FilterWrapper.cpp
+++ b/media/codec2/hidl/plugin/FilterWrapper.cpp
@@ -49,6 +49,11 @@
std::weak_ptr<FilterWrapper> filterWrapper)
: mIntf(intf), mFilterWrapper(filterWrapper) {
takeFilters(std::move(filters));
+ for (size_t i = 0; i < mFilters.size(); ++i) {
+ mControlParamTypes.insert(
+ mFilters[i].desc.controlParams.begin(),
+ mFilters[i].desc.controlParams.end());
+ }
}
~WrappedDecoderInterface() override = default;
@@ -187,7 +192,12 @@
}
std::vector<C2Param *> stackParamsForIntf;
- std::copy_n(stackParamsList.begin(), stackParamsList.size(), stackParamsForIntf.begin());
+ for (C2Param *param : stackParamsList) {
+ if (mControlParamTypes.count(param->type()) != 0) {
+ continue;
+ }
+ stackParamsForIntf.push_back(param);
+ }
// Gather heap params that did not get queried from the filter interfaces above.
// These need to be queried from the decoder interface.
@@ -197,6 +207,9 @@
if (mTypeToIndexForQuery.find(type) != mTypeToIndexForQuery.end()) {
continue;
}
+ if (mControlParamTypes.count(type) != 0) {
+ continue;
+ }
heapParamIndicesForIntf.push_back(heapParamIndices[j]);
}
@@ -251,11 +264,14 @@
std::vector<C2Param *> paramsForFilter;
for (C2Param* param : params) {
auto it = mTypeToIndexForConfig.find(param->type().type());
- if (it != mTypeToIndexForConfig.end() && it->second != i) {
+ if (it == mTypeToIndexForConfig.end() || it->second != i) {
continue;
}
paramsForFilter.push_back(param);
}
+ if (paramsForFilter.empty()) {
+ continue;
+ }
c2_status_t err = filter->config_vb(paramsForFilter, mayBlock, &filterFailures);
if (err != C2_OK) {
LOG(err == C2_BAD_INDEX ? VERBOSE : WARNING)
@@ -356,6 +372,7 @@
std::weak_ptr<FilterWrapper> mFilterWrapper;
std::map<uint32_t, size_t> mTypeToIndexForQuery;
std::map<uint32_t, size_t> mTypeToIndexForConfig;
+ std::set<C2Param::Type> mControlParamTypes;
c2_status_t transferParams_l(
const std::shared_ptr<C2ComponentInterface> &curr,
@@ -598,6 +615,8 @@
}
}
mRunningFilters.clear();
+ std::vector<FilterWrapper::Component> filters(mFilters);
+ mIntf->takeFilters(std::move(filters));
return result;
}
diff --git a/media/codec2/sfplugin/Android.bp b/media/codec2/sfplugin/Android.bp
index 2bc748f..feeddb5 100644
--- a/media/codec2/sfplugin/Android.bp
+++ b/media/codec2/sfplugin/Android.bp
@@ -42,12 +42,14 @@
"android.hardware.drm@1.0",
"android.hardware.media.c2@1.0",
"android.hardware.media.omx@1.0",
+ "android.hardware.graphics.mapper@4.0",
"libbase",
"libbinder",
"libcodec2",
"libcodec2_client",
"libcodec2_vndk",
"libcutils",
+ "libgralloctypes",
"libgui",
"libhidlallocatorutils",
"libhidlbase",
diff --git a/media/codec2/sfplugin/C2OMXNode.cpp b/media/codec2/sfplugin/C2OMXNode.cpp
index c049187..ed7d69c 100644
--- a/media/codec2/sfplugin/C2OMXNode.cpp
+++ b/media/codec2/sfplugin/C2OMXNode.cpp
@@ -42,6 +42,7 @@
#include "utils/Codec2Mapper.h"
#include "C2OMXNode.h"
+#include "Codec2Buffer.h"
namespace android {
@@ -466,6 +467,18 @@
new Buffer2D(block->share(
C2Rect(block->width(), block->height()), ::C2Fence())));
work->input.buffers.push_back(c2Buffer);
+ std::shared_ptr<C2StreamHdrStaticInfo::input> staticInfo;
+ std::shared_ptr<C2StreamHdrDynamicMetadataInfo::input> dynamicInfo;
+ GetHdrMetadataFromGralloc4Handle(
+ block->handle(),
+ &staticInfo,
+ &dynamicInfo);
+ if (staticInfo && *staticInfo) {
+ c2Buffer->setInfo(staticInfo);
+ }
+ if (dynamicInfo && *dynamicInfo) {
+ c2Buffer->setInfo(dynamicInfo);
+ }
}
work->worklets.clear();
work->worklets.emplace_back(new C2Worklet);
diff --git a/media/codec2/sfplugin/CCodec.cpp b/media/codec2/sfplugin/CCodec.cpp
index 5389339..2b9ec7d 100644
--- a/media/codec2/sfplugin/CCodec.cpp
+++ b/media/codec2/sfplugin/CCodec.cpp
@@ -212,9 +212,8 @@
(OMX_INDEXTYPE)OMX_IndexParamConsumerUsageBits,
&usage, sizeof(usage));
- mSource->configure(
- mOmxNode, static_cast<hardware::graphics::common::V1_0::Dataspace>(mDataSpace));
- return OK;
+ return GetStatus(mSource->configure(
+ mOmxNode, static_cast<hardware::graphics::common::V1_0::Dataspace>(mDataSpace)));
}
void disconnect() override {
@@ -1018,29 +1017,31 @@
} else {
pixelFormatInfo = nullptr;
}
- std::optional<uint32_t> flexPixelFormat{};
- std::optional<uint32_t> flexPlanarPixelFormat{};
- std::optional<uint32_t> flexSemiPlanarPixelFormat{};
+ // bit depth -> format
+ std::map<uint32_t, uint32_t> flexPixelFormat;
+ std::map<uint32_t, uint32_t> flexPlanarPixelFormat;
+ std::map<uint32_t, uint32_t> flexSemiPlanarPixelFormat;
if (pixelFormatInfo && *pixelFormatInfo) {
for (size_t i = 0; i < pixelFormatInfo->flexCount(); ++i) {
const C2FlexiblePixelFormatDescriptorStruct &desc =
pixelFormatInfo->m.values[i];
- if (desc.bitDepth != 8
- || desc.subsampling != C2Color::YUV_420
+ if (desc.subsampling != C2Color::YUV_420
// TODO(b/180076105): some device report wrong layout
// || desc.layout == C2Color::INTERLEAVED_PACKED
// || desc.layout == C2Color::INTERLEAVED_ALIGNED
|| desc.layout == C2Color::UNKNOWN_LAYOUT) {
continue;
}
- if (!flexPixelFormat) {
- flexPixelFormat = desc.pixelFormat;
+ if (flexPixelFormat.count(desc.bitDepth) == 0) {
+ flexPixelFormat.emplace(desc.bitDepth, desc.pixelFormat);
}
- if (desc.layout == C2Color::PLANAR_PACKED && !flexPlanarPixelFormat) {
- flexPlanarPixelFormat = desc.pixelFormat;
+ if (desc.layout == C2Color::PLANAR_PACKED
+ && flexPlanarPixelFormat.count(desc.bitDepth) == 0) {
+ flexPlanarPixelFormat.emplace(desc.bitDepth, desc.pixelFormat);
}
- if (desc.layout == C2Color::SEMIPLANAR_PACKED && !flexSemiPlanarPixelFormat) {
- flexSemiPlanarPixelFormat = desc.pixelFormat;
+ if (desc.layout == C2Color::SEMIPLANAR_PACKED
+ && flexSemiPlanarPixelFormat.count(desc.bitDepth) == 0) {
+ flexSemiPlanarPixelFormat.emplace(desc.bitDepth, desc.pixelFormat);
}
}
}
@@ -1050,7 +1051,7 @@
if (!(config->mDomain & Config::IS_ENCODER)) {
if (surface == nullptr) {
const char *prefix = "";
- if (flexSemiPlanarPixelFormat) {
+ if (flexSemiPlanarPixelFormat.count(8) != 0) {
format = COLOR_FormatYUV420SemiPlanar;
prefix = "semi-";
} else {
@@ -1067,17 +1068,34 @@
if ((config->mDomain & Config::IS_ENCODER) || !surface) {
switch (format) {
case COLOR_FormatYUV420Flexible:
- format = flexPixelFormat.value_or(COLOR_FormatYUV420Planar);
+ format = COLOR_FormatYUV420Planar;
+ if (flexPixelFormat.count(8) != 0) {
+ format = flexPixelFormat[8];
+ }
break;
case COLOR_FormatYUV420Planar:
case COLOR_FormatYUV420PackedPlanar:
- format = flexPlanarPixelFormat.value_or(
- flexPixelFormat.value_or(format));
+ if (flexPlanarPixelFormat.count(8) != 0) {
+ format = flexPlanarPixelFormat[8];
+ } else if (flexPixelFormat.count(8) != 0) {
+ format = flexPixelFormat[8];
+ }
break;
case COLOR_FormatYUV420SemiPlanar:
case COLOR_FormatYUV420PackedSemiPlanar:
- format = flexSemiPlanarPixelFormat.value_or(
- flexPixelFormat.value_or(format));
+ if (flexSemiPlanarPixelFormat.count(8) != 0) {
+ format = flexSemiPlanarPixelFormat[8];
+ } else if (flexPixelFormat.count(8) != 0) {
+ format = flexPixelFormat[8];
+ }
+ break;
+ case COLOR_FormatYUVP010:
+ format = COLOR_FormatYUVP010;
+ if (flexSemiPlanarPixelFormat.count(10) != 0) {
+ format = flexSemiPlanarPixelFormat[10];
+ } else if (flexPixelFormat.count(10) != 0) {
+ format = flexPixelFormat[10];
+ }
break;
default:
// No-op
@@ -1443,6 +1461,27 @@
config->mOutputFormat->setInt32("android._tunneled", 1);
}
+ // Convert an encoding statistics level to corresponding encoding statistics
+ // kinds
+ int32_t encodingStatisticsLevel = VIDEO_ENCODING_STATISTICS_LEVEL_NONE;
+ if ((config->mDomain & Config::IS_ENCODER)
+ && (config->mDomain & Config::IS_VIDEO)
+ && msg->findInt32(KEY_VIDEO_ENCODING_STATISTICS_LEVEL, &encodingStatisticsLevel)) {
+ // Higher level include all the enc stats belong to lower level.
+ switch (encodingStatisticsLevel) {
+ // case VIDEO_ENCODING_STATISTICS_LEVEL_2: // reserved for the future level 2
+ // with more enc stat kinds
+ // Future extended encoding statistics for the level 2 should be added here
+ case VIDEO_ENCODING_STATISTICS_LEVEL_1:
+ config->subscribeToConfigUpdate(comp,
+ {kParamIndexAverageBlockQuantization, kParamIndexPictureType});
+ break;
+ case VIDEO_ENCODING_STATISTICS_LEVEL_NONE:
+ break;
+ }
+ }
+ ALOGD("encoding statistics level = %d", encodingStatisticsLevel);
+
ALOGD("setup formats input: %s",
config->mInputFormat->debugString().c_str());
ALOGD("setup formats output: %s",
@@ -2579,7 +2618,10 @@
std::vector<std::unique_ptr<C2Param>> params;
err = intf->query(
{&mApiFeatures},
- {C2PortAllocatorsTuning::input::PARAM_TYPE},
+ {
+ C2StreamBufferTypeSetting::input::PARAM_TYPE,
+ C2PortAllocatorsTuning::input::PARAM_TYPE
+ },
C2_MAY_BLOCK,
¶ms);
if (err != C2_OK && err != C2_BAD_INDEX) {
@@ -2592,7 +2634,10 @@
if (!param) {
continue;
}
- if (param->type() == C2PortAllocatorsTuning::input::PARAM_TYPE) {
+ if (param->type() == C2StreamBufferTypeSetting::input::PARAM_TYPE) {
+ mInputStreamFormat.reset(
+ C2StreamBufferTypeSetting::input::From(param));
+ } else if (param->type() == C2PortAllocatorsTuning::input::PARAM_TYPE) {
mInputAllocators.reset(
C2PortAllocatorsTuning::input::From(param));
}
@@ -2612,6 +2657,16 @@
return mApiFeatures;
}
+ const C2StreamBufferTypeSetting::input &getInputStreamFormat() const {
+ static std::unique_ptr<C2StreamBufferTypeSetting::input> sInvalidated = []{
+ std::unique_ptr<C2StreamBufferTypeSetting::input> param;
+ param.reset(new C2StreamBufferTypeSetting::input(0u, C2BufferData::INVALID));
+ param->invalidate();
+ return param;
+ }();
+ return mInputStreamFormat ? *mInputStreamFormat : *sInvalidated;
+ }
+
const C2PortAllocatorsTuning::input &getInputAllocators() const {
static std::unique_ptr<C2PortAllocatorsTuning::input> sInvalidated = []{
std::unique_ptr<C2PortAllocatorsTuning::input> param =
@@ -2627,6 +2682,7 @@
std::vector<C2FieldSupportedValuesQuery> mFields;
C2ApiFeaturesSetting mApiFeatures;
+ std::unique_ptr<C2StreamBufferTypeSetting::input> mInputStreamFormat;
std::unique_ptr<C2PortAllocatorsTuning::input> mInputAllocators;
};
@@ -2668,6 +2724,24 @@
if (intfCache.initCheck() != OK) {
continue;
}
+ const C2StreamBufferTypeSetting::input &streamFormat = intfCache.getInputStreamFormat();
+ if (streamFormat) {
+ C2Allocator::type_t allocatorType = C2Allocator::LINEAR;
+ if (streamFormat.value == C2BufferData::GRAPHIC
+ || streamFormat.value == C2BufferData::GRAPHIC_CHUNKS) {
+ allocatorType = C2Allocator::GRAPHIC;
+ }
+
+ if (type != allocatorType) {
+ // requested type is not supported at input allocators
+ ids->clear();
+ ids->insert(defaultAllocatorId);
+ ALOGV("name(%s) does not support a type(0x%x) as input allocator."
+ " uses default allocator id(%d)", name.c_str(), type, defaultAllocatorId);
+ break;
+ }
+ }
+
const C2PortAllocatorsTuning::input &allocators = intfCache.getInputAllocators();
if (firstIteration) {
firstIteration = false;
diff --git a/media/codec2/sfplugin/CCodecBufferChannel.cpp b/media/codec2/sfplugin/CCodecBufferChannel.cpp
index 0de0b77..99aa593 100644
--- a/media/codec2/sfplugin/CCodecBufferChannel.cpp
+++ b/media/codec2/sfplugin/CCodecBufferChannel.cpp
@@ -840,6 +840,35 @@
hdr10PlusInfo.reset();
}
+ // HDR dynamic info
+ std::shared_ptr<const C2StreamHdrDynamicMetadataInfo::output> hdrDynamicInfo =
+ std::static_pointer_cast<const C2StreamHdrDynamicMetadataInfo::output>(
+ c2Buffer->getInfo(C2StreamHdrDynamicMetadataInfo::output::PARAM_TYPE));
+ // TODO: make this sticky & enable unset
+ if (hdrDynamicInfo && hdrDynamicInfo->flexCount() == 0) {
+ hdrDynamicInfo.reset();
+ }
+
+ if (hdr10PlusInfo) {
+ // C2StreamHdr10PlusInfo is deprecated; components should use
+ // C2StreamHdrDynamicMetadataInfo
+ // TODO: #metric
+ if (hdrDynamicInfo) {
+ // It is unexpected that C2StreamHdr10PlusInfo and
+ // C2StreamHdrDynamicMetadataInfo is both present.
+ // C2StreamHdrDynamicMetadataInfo takes priority.
+ // TODO: #metric
+ } else {
+ std::shared_ptr<C2StreamHdrDynamicMetadataInfo::output> info =
+ C2StreamHdrDynamicMetadataInfo::output::AllocShared(
+ hdr10PlusInfo->flexCount(),
+ 0u,
+ C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40);
+ memcpy(info->m.data, hdr10PlusInfo->m.value, hdr10PlusInfo->flexCount());
+ hdrDynamicInfo = info;
+ }
+ }
+
std::vector<C2ConstGraphicBlock> blocks = c2Buffer->data().graphicBlocks();
if (blocks.size() != 1u) {
ALOGD("[%s] expected 1 graphic block, but got %zu", mName, blocks.size());
@@ -859,7 +888,7 @@
videoScalingMode,
transform,
Fence::NO_FENCE, 0);
- if (hdrStaticInfo || hdr10PlusInfo) {
+ if (hdrStaticInfo || hdrDynamicInfo) {
HdrMetadata hdr;
if (hdrStaticInfo) {
// If mastering max and min luminance fields are 0, do not use them.
@@ -896,13 +925,16 @@
hdr.cta8613 = cta861_meta;
}
}
- if (hdr10PlusInfo) {
+ if (hdrDynamicInfo
+ && hdrDynamicInfo->m.type_ == C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40) {
hdr.validTypes |= HdrMetadata::HDR10PLUS;
hdr.hdr10plus.assign(
- hdr10PlusInfo->m.value,
- hdr10PlusInfo->m.value + hdr10PlusInfo->flexCount());
+ hdrDynamicInfo->m.data,
+ hdrDynamicInfo->m.data + hdrDynamicInfo->flexCount());
}
qbi.setHdrMetadata(hdr);
+
+ SetHdrMetadataToGralloc4Handle(hdrStaticInfo, hdrDynamicInfo, block.handle());
}
// we don't have dirty regions
qbi.setSurfaceDamage(Region::INVALID_REGION);
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 03418d9..242eeaf 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -953,6 +953,12 @@
return value == 0 ? C2_FALSE : C2_TRUE;
}));
+ add(ConfigMapper(KEY_VIDEO_QP_AVERAGE, C2_PARAMKEY_AVERAGE_QP, "value")
+ .limitTo(D::ENCODER & D::VIDEO & D::READ));
+
+ add(ConfigMapper(KEY_PICTURE_TYPE, C2_PARAMKEY_PICTURE_TYPE, "value")
+ .limitTo(D::ENCODER & D::VIDEO & D::READ));
+
/* still to do
constexpr char KEY_PUSH_BLANK_BUFFERS_ON_STOP[] = "push-blank-buffers-on-shutdown";
@@ -1887,7 +1893,9 @@
names->clear();
// TODO: expand to standard params
for (const auto &[key, desc] : mVendorParams) {
- names->push_back(key);
+ if (desc->isVisible()) {
+ names->push_back(key);
+ }
}
return OK;
}
diff --git a/media/codec2/sfplugin/CCodecConfig.h b/media/codec2/sfplugin/CCodecConfig.h
index 417b773..88e6239 100644
--- a/media/codec2/sfplugin/CCodecConfig.h
+++ b/media/codec2/sfplugin/CCodecConfig.h
@@ -363,11 +363,6 @@
const std::vector<std::string> &names,
c2_blocking_t blocking = C2_DONT_BLOCK);
-private:
-
- /// initializes the standard MediaCodec to Codec 2.0 params mapping
- void initializeStandardParams();
-
/// Adds indices to the subscribed indices, and updated subscription to component
/// \param blocking blocking mode to use with the component
status_t subscribeToConfigUpdate(
@@ -375,6 +370,11 @@
const std::vector<C2Param::Index> &indices,
c2_blocking_t blocking = C2_DONT_BLOCK);
+private:
+
+ /// initializes the standard MediaCodec to Codec 2.0 params mapping
+ void initializeStandardParams();
+
/// Gets SDK format from codec 2.0 reflected configuration
/// \param domain input/output bitmask
sp<AMessage> getFormatForDomain(
diff --git a/media/codec2/sfplugin/Codec2Buffer.cpp b/media/codec2/sfplugin/Codec2Buffer.cpp
index 4070478..2d3c70a 100644
--- a/media/codec2/sfplugin/Codec2Buffer.cpp
+++ b/media/codec2/sfplugin/Codec2Buffer.cpp
@@ -18,9 +18,14 @@
#define LOG_TAG "Codec2Buffer"
#include <utils/Log.h>
+#include <aidl/android/hardware/graphics/common/Cta861_3.h>
+#include <aidl/android/hardware/graphics/common/Smpte2086.h>
#include <android-base/properties.h>
#include <android/hardware/cas/native/1.0/types.h>
#include <android/hardware/drm/1.0/types.h>
+#include <android/hardware/graphics/common/1.2/types.h>
+#include <android/hardware/graphics/mapper/4.0/IMapper.h>
+#include <gralloctypes/Gralloc4.h>
#include <hidlmemory/FrameworkUtils.h>
#include <media/hardware/HardwareAPI.h>
#include <media/stagefright/CodecBase.h>
@@ -358,21 +363,22 @@
break;
case COLOR_FormatYUVP010:
+ // stride is in bytes
mediaImage->mPlane[mediaImage->Y].mOffset = 0;
mediaImage->mPlane[mediaImage->Y].mColInc = 2;
- mediaImage->mPlane[mediaImage->Y].mRowInc = stride * 2;
+ mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
- mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride * 2;
+ mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
mediaImage->mPlane[mediaImage->U].mColInc = 4;
- mediaImage->mPlane[mediaImage->U].mRowInc = stride * 2;
+ mediaImage->mPlane[mediaImage->U].mRowInc = stride;
mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
- mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride * 2 + 2;
+ mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride + 2;
mediaImage->mPlane[mediaImage->V].mColInc = 4;
- mediaImage->mPlane[mediaImage->V].mRowInc = stride * 2;
+ mediaImage->mPlane[mediaImage->V].mRowInc = stride;
mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
if (tryWrapping) {
@@ -533,8 +539,8 @@
mInitCheck = BAD_VALUE;
return;
}
- bufferSize += stride * vStride
- / plane.rowSampling / plane.colSampling * divUp(mAllocatedDepth, 8u);
+ // stride is in bytes
+ bufferSize += stride * vStride / plane.rowSampling / plane.colSampling;
}
mBackBufferSize = bufferSize;
@@ -787,8 +793,14 @@
ALOGD("format had no width / height");
return nullptr;
}
- // NOTE: we currently only support YUV420 formats for byte-buffer mode.
- sp<ABuffer> aBuffer(alloc(align(width, 16) * align(height, 16) * 3 / 2));
+ int32_t colorFormat = COLOR_FormatYUV420Flexible;
+ int32_t bpp = 12; // 8(Y) + 2(U) + 2(V)
+ if (format->findInt32(KEY_COLOR_FORMAT, &colorFormat)) {
+ if (colorFormat == COLOR_FormatYUVP010) {
+ bpp = 24; // 16(Y) + 4(U) + 4(V)
+ }
+ }
+ sp<ABuffer> aBuffer(alloc(align(width, 16) * align(height, 16) * bpp / 8));
return new ConstGraphicBlockBuffer(
format,
aBuffer,
@@ -941,4 +953,218 @@
return const_cast<native_handle_t *>(mBlock->handle());
}
+using ::aidl::android::hardware::graphics::common::Cta861_3;
+using ::aidl::android::hardware::graphics::common::Smpte2086;
+
+using ::android::gralloc4::MetadataType_Cta861_3;
+using ::android::gralloc4::MetadataType_Smpte2086;
+using ::android::gralloc4::MetadataType_Smpte2094_40;
+
+using ::android::hardware::Return;
+using ::android::hardware::hidl_vec;
+
+using Error4 = ::android::hardware::graphics::mapper::V4_0::Error;
+using IMapper4 = ::android::hardware::graphics::mapper::V4_0::IMapper;
+
+namespace {
+
+sp<IMapper4> GetMapper4() {
+ static sp<IMapper4> sMapper = IMapper4::getService();
+ return sMapper;
+}
+
+class NativeHandleDeleter {
+public:
+ explicit NativeHandleDeleter(native_handle_t *handle) : mHandle(handle) {}
+ ~NativeHandleDeleter() {
+ if (mHandle) {
+ native_handle_delete(mHandle);
+ }
+ }
+private:
+ native_handle_t *mHandle;
+};
+
+} // namspace
+
+c2_status_t GetHdrMetadataFromGralloc4Handle(
+ const C2Handle *const handle,
+ std::shared_ptr<C2StreamHdrStaticMetadataInfo::input> *staticInfo,
+ std::shared_ptr<C2StreamHdrDynamicMetadataInfo::input> *dynamicInfo) {
+ c2_status_t err = C2_OK;
+ native_handle_t *nativeHandle = UnwrapNativeCodec2GrallocHandle(handle);
+ if (nativeHandle == nullptr) {
+ // Nothing to do
+ return err;
+ }
+ // TRICKY: UnwrapNativeCodec2GrallocHandle creates a new handle but
+ // does not clone the fds. Thus we need to delete the handle
+ // without closing it when going out of scope.
+ // NativeHandle cannot solve this problem, as it would close and
+ // delete the handle, while we need delete only.
+ NativeHandleDeleter nhd(nativeHandle);
+ sp<IMapper4> mapper = GetMapper4();
+ if (!mapper) {
+ // Gralloc4 not supported; nothing to do
+ return err;
+ }
+ Error4 mapperErr = Error4::NONE;
+ if (staticInfo) {
+ staticInfo->reset(new C2StreamHdrStaticMetadataInfo::input(0u));
+ memset(&(*staticInfo)->mastering, 0, sizeof((*staticInfo)->mastering));
+ (*staticInfo)->maxCll = 0;
+ (*staticInfo)->maxFall = 0;
+ IMapper4::get_cb cb = [&mapperErr, staticInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
+ mapperErr = err;
+ if (err != Error4::NONE) {
+ return;
+ }
+
+ std::optional<Smpte2086> smpte2086;
+ gralloc4::decodeSmpte2086(vec, &smpte2086);
+ if (smpte2086) {
+ (*staticInfo)->mastering.red.x = smpte2086->primaryRed.x;
+ (*staticInfo)->mastering.red.y = smpte2086->primaryRed.y;
+ (*staticInfo)->mastering.green.x = smpte2086->primaryGreen.x;
+ (*staticInfo)->mastering.green.y = smpte2086->primaryGreen.y;
+ (*staticInfo)->mastering.blue.x = smpte2086->primaryBlue.x;
+ (*staticInfo)->mastering.blue.y = smpte2086->primaryBlue.y;
+ (*staticInfo)->mastering.white.x = smpte2086->whitePoint.x;
+ (*staticInfo)->mastering.white.y = smpte2086->whitePoint.y;
+
+ (*staticInfo)->mastering.maxLuminance = smpte2086->maxLuminance;
+ (*staticInfo)->mastering.minLuminance = smpte2086->minLuminance;
+ } else {
+ mapperErr = Error4::BAD_VALUE;
+ }
+ };
+ Return<void> ret = mapper->get(nativeHandle, MetadataType_Smpte2086, cb);
+ if (!ret.isOk()) {
+ err = C2_REFUSED;
+ } else if (mapperErr != Error4::NONE) {
+ err = C2_CORRUPTED;
+ }
+ cb = [&mapperErr, staticInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
+ mapperErr = err;
+ if (err != Error4::NONE) {
+ return;
+ }
+
+ std::optional<Cta861_3> cta861_3;
+ gralloc4::decodeCta861_3(vec, &cta861_3);
+ if (cta861_3) {
+ (*staticInfo)->maxCll = cta861_3->maxContentLightLevel;
+ (*staticInfo)->maxFall = cta861_3->maxFrameAverageLightLevel;
+ } else {
+ mapperErr = Error4::BAD_VALUE;
+ }
+ };
+ ret = mapper->get(nativeHandle, MetadataType_Cta861_3, cb);
+ if (!ret.isOk()) {
+ err = C2_REFUSED;
+ } else if (mapperErr != Error4::NONE) {
+ err = C2_CORRUPTED;
+ }
+ }
+ if (dynamicInfo) {
+ dynamicInfo->reset();
+ IMapper4::get_cb cb = [&mapperErr, dynamicInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
+ mapperErr = err;
+ if (err != Error4::NONE) {
+ return;
+ }
+ if (!dynamicInfo) {
+ return;
+ }
+ *dynamicInfo = C2StreamHdrDynamicMetadataInfo::input::AllocShared(
+ vec.size(), 0u, C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40);
+ memcpy((*dynamicInfo)->m.data, vec.data(), vec.size());
+ };
+ Return<void> ret = mapper->get(nativeHandle, MetadataType_Smpte2094_40, cb);
+ if (!ret.isOk() || mapperErr != Error4::NONE) {
+ dynamicInfo->reset();
+ }
+ }
+
+ return err;
+}
+
+c2_status_t SetHdrMetadataToGralloc4Handle(
+ const std::shared_ptr<const C2StreamHdrStaticMetadataInfo::output> &staticInfo,
+ const std::shared_ptr<const C2StreamHdrDynamicMetadataInfo::output> &dynamicInfo,
+ const C2Handle *const handle) {
+ c2_status_t err = C2_OK;
+ native_handle_t *nativeHandle = UnwrapNativeCodec2GrallocHandle(handle);
+ if (nativeHandle == nullptr) {
+ // Nothing to do
+ return err;
+ }
+ // TRICKY: UnwrapNativeCodec2GrallocHandle creates a new handle but
+ // does not clone the fds. Thus we need to delete the handle
+ // without closing it when going out of scope.
+ NativeHandleDeleter nhd(nativeHandle);
+ sp<IMapper4> mapper = GetMapper4();
+ if (!mapper) {
+ // Gralloc4 not supported; nothing to do
+ return err;
+ }
+ if (staticInfo && *staticInfo) {
+ std::optional<Smpte2086> smpte2086 = Smpte2086{
+ {staticInfo->mastering.red.x, staticInfo->mastering.red.y},
+ {staticInfo->mastering.green.x, staticInfo->mastering.green.y},
+ {staticInfo->mastering.blue.x, staticInfo->mastering.blue.y},
+ {staticInfo->mastering.white.x, staticInfo->mastering.white.y},
+ staticInfo->mastering.maxLuminance,
+ staticInfo->mastering.minLuminance,
+ };
+ hidl_vec<uint8_t> vec;
+ if (gralloc4::encodeSmpte2086(smpte2086, &vec) == OK) {
+ Return<Error4> ret = mapper->set(nativeHandle, MetadataType_Smpte2086, vec);
+ if (!ret.isOk()) {
+ err = C2_REFUSED;
+ } else if (ret != Error4::NONE) {
+ err = C2_CORRUPTED;
+ }
+ }
+ std::optional<Cta861_3> cta861_3 = Cta861_3{
+ staticInfo->maxCll,
+ staticInfo->maxFall,
+ };
+ if (gralloc4::encodeCta861_3(cta861_3, &vec) == OK) {
+ Return<Error4> ret = mapper->set(nativeHandle, MetadataType_Cta861_3, vec);
+ if (!ret.isOk()) {
+ err = C2_REFUSED;
+ } else if (ret != Error4::NONE) {
+ err = C2_CORRUPTED;
+ }
+ }
+ }
+ if (dynamicInfo && *dynamicInfo) {
+ hidl_vec<uint8_t> vec;
+ vec.resize(dynamicInfo->flexCount());
+ memcpy(vec.data(), dynamicInfo->m.data, dynamicInfo->flexCount());
+ std::optional<IMapper4::MetadataType> metadataType;
+ switch (dynamicInfo->m.type_) {
+ case C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_10:
+ // TODO
+ break;
+ case C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40:
+ metadataType = MetadataType_Smpte2094_40;
+ break;
+ }
+ if (metadataType) {
+ Return<Error4> ret = mapper->set(nativeHandle, *metadataType, vec);
+ if (!ret.isOk()) {
+ err = C2_REFUSED;
+ } else if (ret != Error4::NONE) {
+ err = C2_CORRUPTED;
+ }
+ } else {
+ err = C2_BAD_VALUE;
+ }
+ }
+
+ return err;
+}
+
} // namespace android
diff --git a/media/codec2/sfplugin/Codec2Buffer.h b/media/codec2/sfplugin/Codec2Buffer.h
index dc788cd..b02b042 100644
--- a/media/codec2/sfplugin/Codec2Buffer.h
+++ b/media/codec2/sfplugin/Codec2Buffer.h
@@ -19,6 +19,7 @@
#define CODEC2_BUFFER_H_
#include <C2Buffer.h>
+#include <C2Config.h>
#include <binder/IMemory.h>
#include <media/hardware/VideoAPI.h>
@@ -391,6 +392,36 @@
int32_t mHeapSeqNum;
};
+/**
+ * Get HDR metadata from Gralloc4 handle.
+ *
+ * \param[in] handle handle of the allocation
+ * \param[out] staticInfo HDR static info to be filled. Ignored if null;
+ * if |handle| is invalid or does not contain the metadata,
+ * the shared_ptr is reset.
+ * \param[out] dynamicInfo HDR dynamic info to be filled. Ignored if null;
+ * if |handle| is invalid or does not contain the metadata,
+ * the shared_ptr is reset.
+ * \return C2_OK if successful
+ */
+c2_status_t GetHdrMetadataFromGralloc4Handle(
+ const C2Handle *const handle,
+ std::shared_ptr<C2StreamHdrStaticMetadataInfo::input> *staticInfo,
+ std::shared_ptr<C2StreamHdrDynamicMetadataInfo::input> *dynamicInfo);
+
+/**
+ * Set HDR metadata to Gralloc4 handle.
+ *
+ * \param[in] staticInfo HDR static info to set. Ignored if null or invalid.
+ * \param[in] dynamicInfo HDR dynamic info to set. Ignored if null or invalid.
+ * \param[out] handle handle of the allocation.
+ * \return C2_OK if successful
+ */
+c2_status_t SetHdrMetadataToGralloc4Handle(
+ const std::shared_ptr<const C2StreamHdrStaticMetadataInfo::output> &staticInfo,
+ const std::shared_ptr<const C2StreamHdrDynamicMetadataInfo::output> &dynamicInfo,
+ const C2Handle *const handle);
+
} // namespace android
#endif // CODEC2_BUFFER_H_
diff --git a/media/codec2/sfplugin/Codec2InfoBuilder.cpp b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
index 67d7ed2..2b8a160 100644
--- a/media/codec2/sfplugin/Codec2InfoBuilder.cpp
+++ b/media/codec2/sfplugin/Codec2InfoBuilder.cpp
@@ -54,6 +54,9 @@
using Traits = C2Component::Traits;
+// HAL pixel format -> framework color format
+typedef std::map<uint32_t, int32_t> PixelFormatMap;
+
namespace /* unnamed */ {
bool hasPrefix(const std::string& s, const char* prefix) {
@@ -67,6 +70,26 @@
s.compare(s.size() - suffixLen, suffixLen, suffix) == 0;
}
+std::optional<int32_t> findFrameworkColorFormat(
+ const C2FlexiblePixelFormatDescriptorStruct &desc) {
+ switch (desc.bitDepth) {
+ case 8u:
+ if (desc.layout == C2Color::PLANAR_PACKED
+ || desc.layout == C2Color::SEMIPLANAR_PACKED) {
+ return COLOR_FormatYUV420Flexible;
+ }
+ break;
+ case 10u:
+ if (desc.layout == C2Color::SEMIPLANAR_PACKED) {
+ return COLOR_FormatYUVP010;
+ }
+ break;
+ default:
+ break;
+ }
+ return std::nullopt;
+}
+
// returns true if component advertised supported profile level(s)
bool addSupportedProfileLevels(
std::shared_ptr<Codec2Client::Interface> intf,
@@ -211,27 +234,73 @@
void addSupportedColorFormats(
std::shared_ptr<Codec2Client::Interface> intf,
MediaCodecInfo::CapabilitiesWriter *caps,
- const Traits& trait, const std::string &mediaType) {
- (void)intf;
-
+ const Traits& trait, const std::string &mediaType,
+ const PixelFormatMap &pixelFormatMap) {
// TODO: get this from intf() as well, but how do we map them to
// MediaCodec color formats?
bool encoder = trait.kind == C2Component::KIND_ENCODER;
if (mediaType.find("video") != std::string::npos
|| mediaType.find("image") != std::string::npos) {
+
+ std::vector<C2FieldSupportedValuesQuery> query;
+ if (encoder) {
+ C2StreamPixelFormatInfo::input pixelFormat;
+ query.push_back(C2FieldSupportedValuesQuery::Possible(
+ C2ParamField::Make(pixelFormat, pixelFormat.value)));
+ } else {
+ C2StreamPixelFormatInfo::output pixelFormat;
+ query.push_back(C2FieldSupportedValuesQuery::Possible(
+ C2ParamField::Make(pixelFormat, pixelFormat.value)));
+ }
+ std::list<int32_t> supportedColorFormats;
+ if (intf->querySupportedValues(query, C2_DONT_BLOCK) == C2_OK) {
+ if (query[0].status == C2_OK) {
+ const C2FieldSupportedValues &fsv = query[0].values;
+ if (fsv.type == C2FieldSupportedValues::VALUES) {
+ for (C2Value::Primitive value : fsv.values) {
+ auto it = pixelFormatMap.find(value.u32);
+ if (it != pixelFormatMap.end()) {
+ auto it2 = std::find(
+ supportedColorFormats.begin(),
+ supportedColorFormats.end(),
+ it->second);
+ if (it2 == supportedColorFormats.end()) {
+ supportedColorFormats.push_back(it->second);
+ }
+ }
+ }
+ }
+ }
+ }
+ auto addDefaultColorFormat = [caps, &supportedColorFormats](int32_t colorFormat) {
+ caps->addColorFormat(colorFormat);
+ auto it = std::find(
+ supportedColorFormats.begin(), supportedColorFormats.end(), colorFormat);
+ if (it != supportedColorFormats.end()) {
+ supportedColorFormats.erase(it);
+ }
+ };
+
+ // The color format is ordered by preference. The intention here is to advertise:
+ // c2.android.* codecs: YUV420s, Surface, <the rest>
+ // all other codecs: Surface, YUV420s, <the rest>
+ // TODO: get this preference via Codec2 API
+
// vendor video codecs prefer opaque format
if (trait.name.find("android") == std::string::npos) {
- caps->addColorFormat(COLOR_FormatSurface);
+ addDefaultColorFormat(COLOR_FormatSurface);
}
- caps->addColorFormat(COLOR_FormatYUV420Flexible);
- caps->addColorFormat(COLOR_FormatYUV420Planar);
- caps->addColorFormat(COLOR_FormatYUV420SemiPlanar);
- caps->addColorFormat(COLOR_FormatYUV420PackedPlanar);
- caps->addColorFormat(COLOR_FormatYUV420PackedSemiPlanar);
- // framework video encoders must support surface format, though it is unclear
- // that they will be able to map it if it is opaque
- if (encoder && trait.name.find("android") != std::string::npos) {
- caps->addColorFormat(COLOR_FormatSurface);
+ addDefaultColorFormat(COLOR_FormatYUV420Flexible);
+ addDefaultColorFormat(COLOR_FormatYUV420Planar);
+ addDefaultColorFormat(COLOR_FormatYUV420SemiPlanar);
+ addDefaultColorFormat(COLOR_FormatYUV420PackedPlanar);
+ addDefaultColorFormat(COLOR_FormatYUV420PackedSemiPlanar);
+ // Android video codecs prefer CPU-readable formats
+ if (trait.name.find("android") != std::string::npos) {
+ addDefaultColorFormat(COLOR_FormatSurface);
+ }
+ for (int32_t colorFormat : supportedColorFormats) {
+ caps->addColorFormat(colorFormat);
}
}
}
@@ -423,6 +492,7 @@
}
}
+ std::map<std::string, PixelFormatMap> nameToPixelFormatMap;
for (const Traits& trait : traits) {
C2Component::rank_t rank = trait.rank;
@@ -436,8 +506,9 @@
nameAndAliases.insert(nameAndAliases.begin(), trait.name);
for (const std::string &nameOrAlias : nameAndAliases) {
bool isAlias = trait.name != nameOrAlias;
+ std::shared_ptr<Codec2Client> client;
std::shared_ptr<Codec2Client::Interface> intf =
- Codec2Client::CreateInterfaceByName(nameOrAlias.c_str());
+ Codec2Client::CreateInterfaceByName(nameOrAlias.c_str(), &client);
if (!intf) {
ALOGD("could not create interface for %s'%s'",
isAlias ? "alias " : "",
@@ -631,7 +702,40 @@
caps->addProfileLevel(VP8ProfileMain, VP8Level_Version0);
}
}
- addSupportedColorFormats(intf, caps.get(), trait, mediaType);
+
+ auto it = nameToPixelFormatMap.find(client->getServiceName());
+ if (it == nameToPixelFormatMap.end()) {
+ it = nameToPixelFormatMap.try_emplace(client->getServiceName()).first;
+ PixelFormatMap &pixelFormatMap = it->second;
+ pixelFormatMap[HAL_PIXEL_FORMAT_YCBCR_420_888] = COLOR_FormatYUV420Flexible;
+ pixelFormatMap[HAL_PIXEL_FORMAT_YCBCR_P010] = COLOR_FormatYUVP010;
+ pixelFormatMap[HAL_PIXEL_FORMAT_RGBA_1010102] = COLOR_Format32bitABGR2101010;
+ pixelFormatMap[HAL_PIXEL_FORMAT_RGBA_FP16] = COLOR_Format64bitABGRFloat;
+
+ std::shared_ptr<C2StoreFlexiblePixelFormatDescriptorsInfo> pixelFormatInfo;
+ std::vector<std::unique_ptr<C2Param>> heapParams;
+ if (client->query(
+ {},
+ {C2StoreFlexiblePixelFormatDescriptorsInfo::PARAM_TYPE},
+ C2_MAY_BLOCK,
+ &heapParams) == C2_OK
+ && heapParams.size() == 1u) {
+ pixelFormatInfo.reset(C2StoreFlexiblePixelFormatDescriptorsInfo::From(
+ heapParams[0].release()));
+ }
+ if (pixelFormatInfo && *pixelFormatInfo) {
+ for (size_t i = 0; i < pixelFormatInfo->flexCount(); ++i) {
+ C2FlexiblePixelFormatDescriptorStruct &desc =
+ pixelFormatInfo->m.values[i];
+ std::optional<int32_t> colorFormat = findFrameworkColorFormat(desc);
+ if (colorFormat) {
+ pixelFormatMap[desc.pixelFormat] = *colorFormat;
+ }
+ }
+ }
+ }
+ addSupportedColorFormats(
+ intf, caps.get(), trait, mediaType, it->second);
}
}
}
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
index 2213001..bff9db5 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.cpp
@@ -88,7 +88,7 @@
uint32_t planeW = img->mWidth / plane.colSampling;
uint32_t planeH = img->mHeight / plane.rowSampling;
- bool canCopyByRow = (plane.colInc == 1) && (img->mPlane[i].mColInc == 1);
+ bool canCopyByRow = (plane.colInc == bpp) && (img->mPlane[i].mColInc == bpp);
bool canCopyByPlane = canCopyByRow && (plane.rowInc == img->mPlane[i].mRowInc);
if (canCopyByPlane) {
MemCopier<ToMediaImage, 0>::copy(imgRow, viewRow, plane.rowInc * planeH);
@@ -118,22 +118,6 @@
} // namespace
-bool IsFormatR10G10B10A2SupportedForLegacyRendering() {
- const AHardwareBuffer_Desc desc = {
- .width = 320,
- .height = 240,
- .format = AHARDWAREBUFFER_FORMAT_R10G10B10A2_UNORM,
- .layers = 1,
- .usage = AHARDWAREBUFFER_USAGE_CPU_READ_RARELY | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
- AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE,
- .stride = 0,
- .rfu0 = 0,
- .rfu1 = 0,
- };
-
- return AHardwareBuffer_isSupported(&desc);
-}
-
status_t ImageCopy(uint8_t *imgBase, const MediaImage2 *img, const C2GraphicView &view) {
if (view.crop().width != img->mWidth || view.crop().height != img->mHeight) {
return BAD_VALUE;
diff --git a/media/codec2/sfplugin/utils/Codec2BufferUtils.h b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
index c4651a4..9fa642d 100644
--- a/media/codec2/sfplugin/utils/Codec2BufferUtils.h
+++ b/media/codec2/sfplugin/utils/Codec2BufferUtils.h
@@ -27,11 +27,6 @@
namespace android {
/**
- * Check if R10G10B10A2 is supported in legacy rendering path that involves GPU
- */
-bool IsFormatR10G10B10A2SupportedForLegacyRendering();
-
-/**
* Converts an RGB view to planar YUV 420 media image.
*
* \param dstY pointer to media image buffer
diff --git a/media/codec2/sfplugin/utils/Codec2Mapper.cpp b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
index f557830..93f29ca 100644
--- a/media/codec2/sfplugin/utils/Codec2Mapper.cpp
+++ b/media/codec2/sfplugin/utils/Codec2Mapper.cpp
@@ -18,6 +18,9 @@
#define LOG_TAG "Codec2Mapper"
#include <utils/Log.h>
+#include <map>
+#include <optional>
+
#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/SurfaceUtils.h>
#include <media/stagefright/foundation/ALookup.h>
@@ -167,6 +170,9 @@
{ C2Config::LEVEL_DV_MAIN_UHD_30, DolbyVisionLevelUhd30 },
{ C2Config::LEVEL_DV_MAIN_UHD_48, DolbyVisionLevelUhd48 },
{ C2Config::LEVEL_DV_MAIN_UHD_60, DolbyVisionLevelUhd60 },
+ { C2Config::LEVEL_DV_MAIN_UHD_120, DolbyVisionLevelUhd120 },
+ { C2Config::LEVEL_DV_MAIN_8K_30, DolbyVisionLevel8k30 },
+ { C2Config::LEVEL_DV_MAIN_8K_60, DolbyVisionLevel8k60 },
// high tiers are not yet supported on android, for now map them to main tier
{ C2Config::LEVEL_DV_HIGH_HD_24, DolbyVisionLevelHd24 },
@@ -178,6 +184,9 @@
{ C2Config::LEVEL_DV_HIGH_UHD_30, DolbyVisionLevelUhd30 },
{ C2Config::LEVEL_DV_HIGH_UHD_48, DolbyVisionLevelUhd48 },
{ C2Config::LEVEL_DV_HIGH_UHD_60, DolbyVisionLevelUhd60 },
+ { C2Config::LEVEL_DV_HIGH_UHD_120, DolbyVisionLevelUhd120 },
+ { C2Config::LEVEL_DV_HIGH_8K_30, DolbyVisionLevel8k30 },
+ { C2Config::LEVEL_DV_HIGH_8K_60, DolbyVisionLevel8k60 },
};
ALookup<C2Config::profile_t, int32_t> sDolbyVisionProfiles = {
@@ -402,6 +411,30 @@
{ C2Config::PROFILE_AV1_0, AV1ProfileMain10HDR10Plus },
};
+// HAL_PIXEL_FORMAT_* -> COLOR_Format*
+ALookup<uint32_t, int32_t> sPixelFormats = {
+ { HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED, COLOR_FormatSurface },
+
+ // YCBCR_420_888 maps to YUV420Flexible and vice versa
+ { HAL_PIXEL_FORMAT_YCBCR_420_888, COLOR_FormatYUV420Flexible },
+
+ // Fallback matches for YCBCR_420_888
+ { HAL_PIXEL_FORMAT_YCBCR_420_888, COLOR_FormatYUV420Planar },
+ { HAL_PIXEL_FORMAT_YCBCR_420_888, COLOR_FormatYUV420SemiPlanar },
+ { HAL_PIXEL_FORMAT_YCBCR_420_888, COLOR_FormatYUV420PackedPlanar },
+ { HAL_PIXEL_FORMAT_YCBCR_420_888, COLOR_FormatYUV420PackedSemiPlanar },
+
+ // Fallback matches for YUV420Flexible
+ { HAL_PIXEL_FORMAT_YCRCB_420_SP, COLOR_FormatYUV420Flexible },
+ { HAL_PIXEL_FORMAT_YV12, COLOR_FormatYUV420Flexible },
+
+ { HAL_PIXEL_FORMAT_YCBCR_422_SP, COLOR_FormatYUV422PackedSemiPlanar },
+ { HAL_PIXEL_FORMAT_YCBCR_422_I, COLOR_FormatYUV422PackedPlanar },
+ { HAL_PIXEL_FORMAT_YCBCR_P010, COLOR_FormatYUVP010 },
+ { HAL_PIXEL_FORMAT_RGBA_1010102, COLOR_Format32bitABGR2101010 },
+ { HAL_PIXEL_FORMAT_RGBA_FP16, COLOR_Format64bitABGRFloat },
+};
+
/**
* A helper that passes through vendor extension profile and level values.
*/
@@ -975,41 +1008,19 @@
// static
bool C2Mapper::mapPixelFormatFrameworkToCodec(
int32_t frameworkValue, uint32_t *c2Value) {
- switch (frameworkValue) {
- case COLOR_FormatSurface:
- *c2Value = HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED;
- return true;
- case COLOR_FormatYUV420Flexible:
- case COLOR_FormatYUV420Planar:
- case COLOR_FormatYUV420SemiPlanar:
- case COLOR_FormatYUV420PackedPlanar:
- case COLOR_FormatYUV420PackedSemiPlanar:
- *c2Value = HAL_PIXEL_FORMAT_YCBCR_420_888;
- return true;
- default:
- // Passthrough
- *c2Value = uint32_t(frameworkValue);
- return true;
+ if (!sPixelFormats.map(frameworkValue, c2Value)) {
+ // passthrough if not mapped
+ *c2Value = uint32_t(frameworkValue);
}
+ return true;
}
// static
bool C2Mapper::mapPixelFormatCodecToFramework(
uint32_t c2Value, int32_t *frameworkValue) {
- switch (c2Value) {
- case HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED:
- *frameworkValue = COLOR_FormatSurface;
- return true;
- case HAL_PIXEL_FORMAT_YCBCR_422_SP:
- case HAL_PIXEL_FORMAT_YCRCB_420_SP:
- case HAL_PIXEL_FORMAT_YCBCR_422_I:
- case HAL_PIXEL_FORMAT_YCBCR_420_888:
- case HAL_PIXEL_FORMAT_YV12:
- *frameworkValue = COLOR_FormatYUV420Flexible;
- return true;
- default:
- // Passthrough
- *frameworkValue = int32_t(c2Value);
- return true;
+ if (!sPixelFormats.map(c2Value, frameworkValue)) {
+ // passthrough if not mapped
+ *frameworkValue = int32_t(c2Value);
}
+ return true;
}
diff --git a/media/codec2/vndk/Android.bp b/media/codec2/vndk/Android.bp
index be81c84..27cd1f8 100644
--- a/media/codec2/vndk/Android.bp
+++ b/media/codec2/vndk/Android.bp
@@ -73,11 +73,12 @@
"libbase",
"libcutils",
"libdl",
+ "libdmabufheap",
+ "libfmq",
+ "libgralloctypes",
"libhardware",
"libhidlbase",
"libion",
- "libdmabufheap",
- "libfmq",
"liblog",
"libnativewindow",
"libstagefright_foundation",
@@ -92,6 +93,44 @@
],
}
+// public dependency for statically linking to libcodec2_vndk for unit tests
+cc_defaults {
+ name: "libcodec2-static-defaults",
+
+ static_libs: [
+ "liblog",
+ "libion",
+ "libfmq",
+ "libbase",
+ "libutils",
+ "libcutils",
+ "libcodec2",
+ "libhidlbase",
+ "libdmabufheap",
+ "libcodec2_vndk",
+ "libnativewindow",
+ "libcodec2_soft_common",
+ "libsfplugin_ccodec_utils",
+ "libstagefright_foundation",
+ "libstagefright_bufferpool@2.0.1",
+ "libgralloctypes",
+ "android.hardware.graphics.mapper@2.0",
+ "android.hardware.graphics.mapper@3.0",
+ "android.hardware.media.bufferpool@2.0",
+ "android.hardware.graphics.allocator@2.0",
+ "android.hardware.graphics.allocator@3.0",
+ "android.hardware.graphics.bufferqueue@2.0",
+ ],
+
+ shared_libs: [
+ "libui",
+ "libdl",
+ "libhardware",
+ "libvndksupport",
+ "libprocessgroup",
+ ],
+}
+
// public dependency for implementing Codec 2 components
cc_defaults {
name: "libcodec2-impl-defaults",
diff --git a/media/codec2/vndk/C2AllocatorGralloc.cpp b/media/codec2/vndk/C2AllocatorGralloc.cpp
index 6a7f19c..bc4053d 100644
--- a/media/codec2/vndk/C2AllocatorGralloc.cpp
+++ b/media/codec2/vndk/C2AllocatorGralloc.cpp
@@ -20,8 +20,10 @@
#include <mutex>
+#include <aidl/android/hardware/graphics/common/PlaneLayoutComponentType.h>
#include <android/hardware/graphics/common/1.2/types.h>
#include <cutils/native_handle.h>
+#include <gralloctypes/Gralloc4.h>
#include <hardware/gralloc.h>
#include <ui/GraphicBufferAllocator.h>
#include <ui/GraphicBufferMapper.h>
@@ -29,6 +31,7 @@
#include <C2AllocatorGralloc.h>
#include <C2Buffer.h>
+#include <C2Debug.h>
#include <C2PlatformSupport.h>
using ::android::hardware::hidl_handle;
@@ -230,8 +233,90 @@
}
};
+static
+c2_status_t Gralloc4Mapper_lock(native_handle_t *handle, uint64_t usage, const Rect& bounds,
+ C2PlanarLayout *layout, uint8_t **addr) {
+ GraphicBufferMapper &mapper = GraphicBufferMapper::get();
+
+ std::vector<ui::PlaneLayout> planes;
+ // this method is only supported on Gralloc 4 or later
+ status_t err = mapper.getPlaneLayouts(handle, &planes);
+ if (err != NO_ERROR || planes.empty()) {
+ return C2_CANNOT_DO;
+ }
+
+ uint8_t *pointer = nullptr;
+ err = mapper.lock(handle, usage, bounds, (void **)&pointer, nullptr, nullptr);
+ if (err != NO_ERROR || pointer == nullptr) {
+ return C2_CORRUPTED;
+ }
+
+ using aidl::android::hardware::graphics::common::PlaneLayoutComponentType;
+ using aidl::android::hardware::graphics::common::PlaneLayoutComponent;
+
+ layout->type = C2PlanarLayout::TYPE_YUV;
+ layout->numPlanes = 0;
+ layout->rootPlanes = 0;
+
+ for (const ui::PlaneLayout &plane : planes) {
+ layout->rootPlanes++;
+ uint32_t lastOffsetInBits = 0;
+ uint32_t rootIx = layout->numPlanes;
+
+ for (const PlaneLayoutComponent &component : plane.components) {
+ if (!gralloc4::isStandardPlaneLayoutComponentType(component.type)) {
+ mapper.unlock(handle);
+ return C2_CANNOT_DO;
+ }
+
+ uint32_t rightShiftBits = component.offsetInBits - lastOffsetInBits;
+ uint32_t allocatedDepthInBits = component.sizeInBits + rightShiftBits;
+ C2PlanarLayout::plane_index_t planeId;
+ C2PlaneInfo::channel_t channel;
+
+ switch (static_cast<PlaneLayoutComponentType>(component.type.value)) {
+ case PlaneLayoutComponentType::Y:
+ planeId = C2PlanarLayout::PLANE_Y;
+ channel = C2PlaneInfo::CHANNEL_Y;
+ break;
+ case PlaneLayoutComponentType::CB:
+ planeId = C2PlanarLayout::PLANE_U;
+ channel = C2PlaneInfo::CHANNEL_CB;
+ break;
+ case PlaneLayoutComponentType::CR:
+ planeId = C2PlanarLayout::PLANE_V;
+ channel = C2PlaneInfo::CHANNEL_CR;
+ break;
+ default:
+ mapper.unlock(handle);
+ return C2_CORRUPTED;
+ }
+
+ addr[planeId] = pointer + plane.offsetInBytes + (component.offsetInBits / 8);
+ layout->planes[planeId] = {
+ channel, // channel
+ static_cast<int32_t>(plane.sampleIncrementInBits / 8), // colInc
+ static_cast<int32_t>(plane.strideInBytes), // rowInc
+ static_cast<uint32_t>(plane.horizontalSubsampling), // mColSampling
+ static_cast<uint32_t>(plane.verticalSubsampling), // mRowSampling
+ allocatedDepthInBits, // allocatedDepth (bits)
+ static_cast<uint32_t>(component.sizeInBits), // bitDepth (bits)
+ rightShiftBits, // rightShift (bits)
+ C2PlaneInfo::NATIVE, // endianness
+ rootIx, // rootIx
+ static_cast<uint32_t>(component.offsetInBits / 8), // offset (bytes)
+ };
+
+ layout->numPlanes++;
+ lastOffsetInBits = component.offsetInBits + component.sizeInBits;
+ }
+ }
+ return C2_OK;
+}
+
} // unnamed namespace
+
native_handle_t *UnwrapNativeCodec2GrallocHandle(const C2Handle *const handle) {
return C2HandleGralloc::UnwrapNativeHandle(handle);
}
@@ -385,6 +470,10 @@
mBuffer, mWidth, mHeight, mFormat, mGrallocUsage,
mStride, generation, igbp_id, igbp_slot);
}
+
+ // 'NATIVE' on Android means LITTLE_ENDIAN
+ constexpr C2PlaneInfo::endianness_t kEndianness = C2PlaneInfo::NATIVE;
+
switch (mFormat) {
case static_cast<uint32_t>(PixelFormat4::RGBA_1010102): {
// TRICKY: this is used for media as YUV444 in the case when it is queued directly to a
@@ -609,17 +698,6 @@
C2PlanarLayout::PLANE_V, // rootIx
0, // offset
};
- // handle interleaved formats
- intptr_t uvOffset = addr[C2PlanarLayout::PLANE_V] - addr[C2PlanarLayout::PLANE_U];
- if (uvOffset > 0 && uvOffset < (intptr_t)ycbcrLayout.chroma_step) {
- layout->rootPlanes = 2;
- layout->planes[C2PlanarLayout::PLANE_V].rootIx = C2PlanarLayout::PLANE_U;
- layout->planes[C2PlanarLayout::PLANE_V].offset = uvOffset;
- } else if (uvOffset < 0 && uvOffset > -(intptr_t)ycbcrLayout.chroma_step) {
- layout->rootPlanes = 2;
- layout->planes[C2PlanarLayout::PLANE_U].rootIx = C2PlanarLayout::PLANE_V;
- layout->planes[C2PlanarLayout::PLANE_U].offset = -uvOffset;
- }
break;
}
@@ -646,7 +724,7 @@
16, // allocatedDepth
10, // bitDepth
6, // rightShift
- C2PlaneInfo::LITTLE_END, // endianness
+ kEndianness, // endianness
C2PlanarLayout::PLANE_Y, // rootIx
0, // offset
};
@@ -659,7 +737,7 @@
16, // allocatedDepth
10, // bitDepth
6, // rightShift
- C2PlaneInfo::LITTLE_END, // endianness
+ kEndianness, // endianness
C2PlanarLayout::PLANE_U, // rootIx
0, // offset
};
@@ -672,7 +750,7 @@
16, // allocatedDepth
10, // bitDepth
6, // rightShift
- C2PlaneInfo::LITTLE_END, // endianness
+ kEndianness, // endianness
C2PlanarLayout::PLANE_U, // rootIx
2, // offset
};
@@ -680,9 +758,15 @@
}
default: {
- // We don't know what it is, but let's try to lock it.
+ // We don't know what it is, let's try to lock it with gralloc4
android_ycbcr ycbcrLayout;
+ c2_status_t status = Gralloc4Mapper_lock(
+ const_cast<native_handle_t*>(mBuffer), grallocUsage, rect, layout, addr);
+ if (status == C2_OK) {
+ break;
+ }
+ // fallback to lockYCbCr
status_t err = GraphicBufferMapper::get().lockYCbCr(
const_cast<native_handle_t*>(mBuffer), grallocUsage, rect, &ycbcrLayout);
if (err == OK && ycbcrLayout.y && ycbcrLayout.cb && ycbcrLayout.cr
@@ -734,17 +818,6 @@
C2PlanarLayout::PLANE_V, // rootIx
0, // offset
};
- // handle interleaved formats
- intptr_t uvOffset = addr[C2PlanarLayout::PLANE_V] - addr[C2PlanarLayout::PLANE_U];
- if (uvOffset > 0 && uvOffset < (intptr_t)ycbcrLayout.chroma_step) {
- layout->rootPlanes = 2;
- layout->planes[C2PlanarLayout::PLANE_V].rootIx = C2PlanarLayout::PLANE_U;
- layout->planes[C2PlanarLayout::PLANE_V].offset = uvOffset;
- } else if (uvOffset < 0 && uvOffset > -(intptr_t)ycbcrLayout.chroma_step) {
- layout->rootPlanes = 2;
- layout->planes[C2PlanarLayout::PLANE_U].rootIx = C2PlanarLayout::PLANE_V;
- layout->planes[C2PlanarLayout::PLANE_U].offset = -uvOffset;
- }
break;
}
@@ -790,6 +863,29 @@
}
mLocked = true;
+ // handle interleaved formats
+ if (layout->type == C2PlanarLayout::TYPE_YUV && layout->rootPlanes == 3) {
+ intptr_t uvOffset = addr[C2PlanarLayout::PLANE_V] - addr[C2PlanarLayout::PLANE_U];
+ intptr_t uvColInc = layout->planes[C2PlanarLayout::PLANE_U].colInc;
+ if (uvOffset > 0 && uvOffset < uvColInc) {
+ layout->rootPlanes = 2;
+ layout->planes[C2PlanarLayout::PLANE_V].rootIx = C2PlanarLayout::PLANE_U;
+ layout->planes[C2PlanarLayout::PLANE_V].offset = uvOffset;
+ } else if (uvOffset < 0 && uvOffset > -uvColInc) {
+ layout->rootPlanes = 2;
+ layout->planes[C2PlanarLayout::PLANE_U].rootIx = C2PlanarLayout::PLANE_V;
+ layout->planes[C2PlanarLayout::PLANE_U].offset = -uvOffset;
+ }
+ }
+
+ ALOGV("C2AllocationGralloc::map: layout: type=%d numPlanes=%d rootPlanes=%d",
+ layout->type, layout->numPlanes, layout->rootPlanes);
+ for (int i = 0; i < layout->numPlanes; ++i) {
+ const C2PlaneInfo &plane = layout->planes[i];
+ ALOGV("C2AllocationGralloc::map: plane[%d]: colInc=%d rowInc=%d rootIx=%u offset=%u",
+ i, plane.colInc, plane.rowInc, plane.rootIx, plane.offset);
+ }
+
return C2_OK;
}
diff --git a/media/codec2/vndk/C2Store.cpp b/media/codec2/vndk/C2Store.cpp
index 1660c38..dfdd84d 100644
--- a/media/codec2/vndk/C2Store.cpp
+++ b/media/codec2/vndk/C2Store.cpp
@@ -301,13 +301,21 @@
std::lock_guard<std::mutex> lock(_mComponentStoreReadLock);
_mComponentStore = store;
}
- std::shared_ptr<C2AllocatorIon> allocator;
+ std::shared_ptr<C2AllocatorIon> ionAllocator;
{
std::lock_guard<std::mutex> lock(gIonAllocatorMutex);
- allocator = gIonAllocator.lock();
+ ionAllocator = gIonAllocator.lock();
}
- if (allocator) {
- UseComponentStoreForIonAllocator(allocator, store);
+ if (ionAllocator) {
+ UseComponentStoreForIonAllocator(ionAllocator, store);
+ }
+ std::shared_ptr<C2DmaBufAllocator> dmaAllocator;
+ {
+ std::lock_guard<std::mutex> lock(gDmaBufAllocatorMutex);
+ dmaAllocator = gDmaBufAllocator.lock();
+ }
+ if (dmaAllocator) {
+ UseComponentStoreForDmaBufAllocator(dmaAllocator, store);
}
}
diff --git a/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp b/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp
index e55bdc0..2115cc3 100644
--- a/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp
+++ b/media/codec2/vndk/platform/C2SurfaceSyncObj.cpp
@@ -228,10 +228,10 @@
tv.tv_nsec = timeoutNs % 1000000000;
int ret = syscall(__NR_futex, &mCond, FUTEX_WAIT, waitId, &tv, NULL, 0);
- if (ret == 0 || ret == EAGAIN) {
+ if (ret == 0 || errno == EAGAIN) {
return C2_OK;
}
- if (ret == EINTR || ret == ETIMEDOUT) {
+ if (errno == EINTR || errno == ETIMEDOUT) {
return C2_TIMED_OUT;
}
return C2_BAD_VALUE;
diff --git a/media/extractors/flac/Android.bp b/media/extractors/flac/Android.bp
index 9a2a76b..f6ce969 100644
--- a/media/extractors/flac/Android.bp
+++ b/media/extractors/flac/Android.bp
@@ -19,7 +19,7 @@
cc_library {
name: "libflacextractor",
- defaults: ["extractor-defaults", "libbinder_ndk_host_user"],
+ defaults: ["extractor-defaults"],
srcs: ["FLACExtractor.cpp"],
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index 8836c47..eccbf46 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -1132,10 +1132,10 @@
&& size >= 5) {
const uint8_t *ptr = (const uint8_t *)data;
const uint8_t profile = ptr[2] >> 1;
- const uint8_t bl_compatibility_id = (ptr[4]) >> 4;
+ const uint8_t blCompatibilityId = (ptr[4]) >> 4;
bool create_two_tracks = false;
- if (bl_compatibility_id && bl_compatibility_id != 15) {
+ if (blCompatibilityId && blCompatibilityId != 15) {
create_two_tracks = true;
}
@@ -1147,13 +1147,15 @@
track_b->timescale = mLastTrack->timescale;
track_b->sampleTable = mLastTrack->sampleTable;
- track_b->includes_expensive_metadata = mLastTrack->includes_expensive_metadata;
+ track_b->includes_expensive_metadata =
+ mLastTrack->includes_expensive_metadata;
track_b->skipTrack = mLastTrack->skipTrack;
track_b->elst_needs_processing = mLastTrack->elst_needs_processing;
track_b->elst_media_time = mLastTrack->elst_media_time;
track_b->elst_segment_duration = mLastTrack->elst_segment_duration;
track_b->elst_shift_start_ticks = mLastTrack->elst_shift_start_ticks;
- track_b->elst_initial_empty_edit_ticks = mLastTrack->elst_initial_empty_edit_ticks;
+ track_b->elst_initial_empty_edit_ticks =
+ mLastTrack->elst_initial_empty_edit_ticks;
track_b->subsample_encryption = mLastTrack->subsample_encryption;
track_b->mTx3gBuffer = mLastTrack->mTx3gBuffer;
@@ -2591,9 +2593,11 @@
*offset += chunk_size;
break;
}
- case FOURCC("dvcC"):
- case FOURCC("dvvC"): {
+ case FOURCC("dvcC"):
+ case FOURCC("dvvC"):
+ case FOURCC("dvwC"):
+ {
if (chunk_data_size != 24) {
return ERROR_MALFORMED;
}
@@ -2613,13 +2617,14 @@
return ERROR_MALFORMED;
AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_2,
- buffer.get(), chunk_data_size);
+ buffer.get(), chunk_data_size);
AMediaFormat_setString(mLastTrack->meta, AMEDIAFORMAT_KEY_MIME,
MEDIA_MIMETYPE_VIDEO_DOLBY_VISION);
*offset += chunk_size;
break;
}
+
case FOURCC("d263"):
{
*offset += chunk_size;
@@ -3480,7 +3485,7 @@
}
unsigned mask = br.getBits(8);
for (unsigned i = 0; i < 8; i++) {
- if (((0x1 << i) && mask) == 0)
+ if (((0x1 << i) & mask) == 0)
continue;
if (br.numBitsLeft() < 8) {
@@ -4458,7 +4463,6 @@
if (!AMediaFormat_getString(track->meta, AMEDIAFORMAT_KEY_MIME, &mime)) {
return NULL;
}
-
sp<ItemTable> itemTable;
if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC)) {
void *data;
@@ -4491,14 +4495,14 @@
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
void *data;
size_t size;
- if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_2, &data, &size)) {
+ if (!AMediaFormat_getBuffer(track->meta, AMEDIAFORMAT_KEY_CSD_2, &data, &size)
+ || size != 24) {
return NULL;
}
const uint8_t *ptr = (const uint8_t *)data;
-
// dv_major.dv_minor Should be 1.0 or 2.1
- if (size != 24 || ((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1))) {
+ if ((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1)) {
return NULL;
}
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)
diff --git a/media/extractors/mpeg2/Android.bp b/media/extractors/mpeg2/Android.bp
index 7e6247b..4f0796e 100644
--- a/media/extractors/mpeg2/Android.bp
+++ b/media/extractors/mpeg2/Android.bp
@@ -61,6 +61,7 @@
"libhidlbase",
"libhidlmemory",
"libjsoncpp",
+ "libmedia_helper",
"libprocessgroup",
"libstagefright_esds",
"libstagefright_foundation_without_imemory",
diff --git a/media/extractors/wav/Android.bp b/media/extractors/wav/Android.bp
index cc5e1c7..76546b8 100644
--- a/media/extractors/wav/Android.bp
+++ b/media/extractors/wav/Android.bp
@@ -18,7 +18,7 @@
cc_library {
name: "libwavextractor",
- defaults: ["extractor-defaults", "libbinder_ndk_host_user"],
+ defaults: ["extractor-defaults"],
srcs: ["WAVExtractor.cpp"],
diff --git a/media/janitors/media_leads_OWNERS b/media/janitors/media_leads_OWNERS
new file mode 100644
index 0000000..b7dbdee
--- /dev/null
+++ b/media/janitors/media_leads_OWNERS
@@ -0,0 +1,9 @@
+# gerrit owner/approvers corresponding to the TLs within the media team
+# loosely (as of 2022/3) fgoldfain@ and direct reports
+arifdikici@google.com
+elaurent@google.com
+fgoldfain@google.com #{LAST_RESORT_SUGGESTION}
+lajos@google.com
+nchalko@google.com
+olly@google.com
+robertshih@google.com
diff --git a/media/janitors/reliability_mainline_OWNERS b/media/janitors/reliability_mainline_OWNERS
index e4c4fc2..cced19c 100644
--- a/media/janitors/reliability_mainline_OWNERS
+++ b/media/janitors/reliability_mainline_OWNERS
@@ -1,5 +1,5 @@
# Bug component: 1051309
-# go/android-media-relaibility
+# go/android-media-reliability
essick@google.com
nchalko@google.com
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 4b08295..212a787 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -444,6 +444,22 @@
};
typedef int32_t aaudio_content_type_t;
+enum {
+
+ /**
+ * Constant indicating the audio content associated with these attributes will follow the
+ * default platform behavior with regards to which content will be spatialized or not.
+ */
+ AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO = 1,
+
+ /**
+ * Constant indicating the audio content associated with these attributes should never
+ * be spatialized.
+ */
+ AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER = 2,
+};
+typedef int32_t aaudio_spatialization_behavior_t;
+
/**
* Defines the audio source.
* An audio source defines both a default physical source of audio signal, and a recording
@@ -565,6 +581,145 @@
};
typedef int32_t aaudio_session_id_t;
+/**
+ * Defines the audio channel mask.
+ * Channel masks are used to describe the samples and their
+ * arrangement in the audio frame. They are also used in the endpoint
+ * (e.g. a USB audio interface, a DAC connected to headphones) to
+ * specify allowable configurations of a particular device.
+ *
+ * Added in API level 32.
+ */
+enum {
+ /**
+ * Invalid channel mask
+ */
+ AAUDIO_CHANNEL_INVALID = -1,
+
+ /**
+ * Output audio channel mask
+ */
+ AAUDIO_CHANNEL_FRONT_LEFT = 1 << 0,
+ AAUDIO_CHANNEL_FRONT_RIGHT = 1 << 1,
+ AAUDIO_CHANNEL_FRONT_CENTER = 1 << 2,
+ AAUDIO_CHANNEL_LOW_FREQUENCY = 1 << 3,
+ AAUDIO_CHANNEL_BACK_LEFT = 1 << 4,
+ AAUDIO_CHANNEL_BACK_RIGHT = 1 << 5,
+ AAUDIO_CHANNEL_FRONT_LEFT_OF_CENTER = 1 << 6,
+ AAUDIO_CHANNEL_FRONT_RIGHT_OF_CENTER = 1 << 7,
+ AAUDIO_CHANNEL_BACK_CENTER = 1 << 8,
+ AAUDIO_CHANNEL_SIDE_LEFT = 1 << 9,
+ AAUDIO_CHANNEL_SIDE_RIGHT = 1 << 10,
+ AAUDIO_CHANNEL_TOP_CENTER = 1 << 11,
+ AAUDIO_CHANNEL_TOP_FRONT_LEFT = 1 << 12,
+ AAUDIO_CHANNEL_TOP_FRONT_CENTER = 1 << 13,
+ AAUDIO_CHANNEL_TOP_FRONT_RIGHT = 1 << 14,
+ AAUDIO_CHANNEL_TOP_BACK_LEFT = 1 << 15,
+ AAUDIO_CHANNEL_TOP_BACK_CENTER = 1 << 16,
+ AAUDIO_CHANNEL_TOP_BACK_RIGHT = 1 << 17,
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT = 1 << 18,
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT = 1 << 19,
+ AAUDIO_CHANNEL_BOTTOM_FRONT_LEFT = 1 << 20,
+ AAUDIO_CHANNEL_BOTTOM_FRONT_CENTER = 1 << 21,
+ AAUDIO_CHANNEL_BOTTOM_FRONT_RIGHT = 1 << 22,
+ AAUDIO_CHANNEL_LOW_FREQUENCY_2 = 1 << 23,
+ AAUDIO_CHANNEL_FRONT_WIDE_LEFT = 1 << 24,
+ AAUDIO_CHANNEL_FRONT_WIDE_RIGHT = 1 << 25,
+
+ AAUDIO_CHANNEL_MONO = AAUDIO_CHANNEL_FRONT_LEFT,
+ AAUDIO_CHANNEL_STEREO = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT,
+ AAUDIO_CHANNEL_2POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_TRI = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER,
+ AAUDIO_CHANNEL_TRI_BACK = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_BACK_CENTER,
+ AAUDIO_CHANNEL_3POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_2POINT0POINT2 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_2POINT1POINT2 = AAUDIO_CHANNEL_2POINT0POINT2 |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_3POINT0POINT2 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_3POINT1POINT2 = AAUDIO_CHANNEL_3POINT0POINT2 |
+ AAUDIO_CHANNEL_LOW_FREQUENCY,
+ AAUDIO_CHANNEL_QUAD = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_BACK_LEFT |
+ AAUDIO_CHANNEL_BACK_RIGHT,
+ AAUDIO_CHANNEL_QUAD_SIDE = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_SIDE_LEFT |
+ AAUDIO_CHANNEL_SIDE_RIGHT,
+ AAUDIO_CHANNEL_SURROUND = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_BACK_CENTER,
+ AAUDIO_CHANNEL_PENTA = AAUDIO_CHANNEL_QUAD |
+ AAUDIO_CHANNEL_FRONT_CENTER,
+ // aka 5POINT1_BACK
+ AAUDIO_CHANNEL_5POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY |
+ AAUDIO_CHANNEL_BACK_LEFT |
+ AAUDIO_CHANNEL_BACK_RIGHT,
+ AAUDIO_CHANNEL_5POINT1_SIDE = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY |
+ AAUDIO_CHANNEL_SIDE_LEFT |
+ AAUDIO_CHANNEL_SIDE_RIGHT,
+ AAUDIO_CHANNEL_6POINT1 = AAUDIO_CHANNEL_FRONT_LEFT |
+ AAUDIO_CHANNEL_FRONT_RIGHT |
+ AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_LOW_FREQUENCY |
+ AAUDIO_CHANNEL_BACK_LEFT |
+ AAUDIO_CHANNEL_BACK_RIGHT |
+ AAUDIO_CHANNEL_BACK_CENTER,
+ AAUDIO_CHANNEL_7POINT1 = AAUDIO_CHANNEL_5POINT1 |
+ AAUDIO_CHANNEL_SIDE_LEFT |
+ AAUDIO_CHANNEL_SIDE_RIGHT,
+ AAUDIO_CHANNEL_5POINT1POINT2 = AAUDIO_CHANNEL_5POINT1 |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_5POINT1POINT4 = AAUDIO_CHANNEL_5POINT1 |
+ AAUDIO_CHANNEL_TOP_FRONT_LEFT |
+ AAUDIO_CHANNEL_TOP_FRONT_RIGHT |
+ AAUDIO_CHANNEL_TOP_BACK_LEFT |
+ AAUDIO_CHANNEL_TOP_BACK_RIGHT,
+ AAUDIO_CHANNEL_7POINT1POINT2 = AAUDIO_CHANNEL_7POINT1 |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+ AAUDIO_CHANNEL_7POINT1POINT4 = AAUDIO_CHANNEL_7POINT1 |
+ AAUDIO_CHANNEL_TOP_FRONT_LEFT |
+ AAUDIO_CHANNEL_TOP_FRONT_RIGHT |
+ AAUDIO_CHANNEL_TOP_BACK_LEFT |
+ AAUDIO_CHANNEL_TOP_BACK_RIGHT,
+ AAUDIO_CHANNEL_9POINT1POINT4 = AAUDIO_CHANNEL_7POINT1POINT4 |
+ AAUDIO_CHANNEL_FRONT_WIDE_LEFT |
+ AAUDIO_CHANNEL_FRONT_WIDE_RIGHT,
+ AAUDIO_CHANNEL_9POINT1POINT6 = AAUDIO_CHANNEL_9POINT1POINT4 |
+ AAUDIO_CHANNEL_TOP_SIDE_LEFT |
+ AAUDIO_CHANNEL_TOP_SIDE_RIGHT,
+
+ AAUDIO_CHANNEL_FRONT_BACK = AAUDIO_CHANNEL_FRONT_CENTER |
+ AAUDIO_CHANNEL_BACK_CENTER,
+};
+typedef uint32_t aaudio_channel_mask_t;
+
typedef struct AAudioStreamStruct AAudioStream;
typedef struct AAudioStreamBuilderStruct AAudioStreamBuilder;
@@ -699,6 +854,11 @@
* If an exact value is specified then an opened stream will use that value.
* If a stream cannot be opened with the specified value then the open will fail.
*
+ * As the channel count provided here may be different from the corresponding channel count
+ * of channel mask used in {@link AAudioStreamBuilder_setChannelMask}, the last called function
+ * will be respected if both this function and {@link AAudioStreamBuilder_setChannelMask} are
+ * called.
+ *
* Available since API level 26.
*
* @param builder reference provided by AAudio_createStreamBuilder()
@@ -714,6 +874,8 @@
*
* @param builder reference provided by AAudio_createStreamBuilder()
* @param samplesPerFrame Number of samples in a frame.
+ *
+ * @deprecated use {@link AAudioStreamBuilder_setChannelCount}
*/
AAUDIO_API void AAudioStreamBuilder_setSamplesPerFrame(AAudioStreamBuilder* builder,
int32_t samplesPerFrame) __INTRODUCED_IN(26);
@@ -836,6 +998,37 @@
aaudio_content_type_t contentType) __INTRODUCED_IN(28);
/**
+ * Sets the behavior affecting whether spatialization will be used.
+ *
+ * The AAudio system will use this information to select whether the stream will go
+ * through a spatializer effect or not when the effect is supported and enabled.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param spatializationBehavior the desired behavior with regards to spatialization, eg.
+ * {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
+ */
+AAUDIO_API void AAudioStreamBuilder_setSpatializationBehavior(AAudioStreamBuilder* builder,
+ aaudio_spatialization_behavior_t spatializationBehavior) __INTRODUCED_IN(32);
+
+/**
+ * Specifies whether the audio data of this output stream has already been processed for
+ * spatialization.
+ *
+ * If the stream has been processed for spatialization, setting this to true will prevent
+ * issues such as double-processing on platforms that will spatialize audio data.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param isSpatialized true if the content is already processed for binaural or transaural spatial
+ * rendering, false otherwise.
+ */
+AAUDIO_API void AAudioStreamBuilder_setIsContentSpatialized(AAudioStreamBuilder* builder,
+ bool isSpatialized) __INTRODUCED_IN(32);
+
+/**
* Set the input (capture) preset for the stream.
*
* The AAudio system will use this information to optimize the
@@ -1136,6 +1329,32 @@
AAUDIO_API aaudio_result_t AAudioStreamBuilder_delete(AAudioStreamBuilder* builder)
__INTRODUCED_IN(26);
+/**
+ * Set audio channel mask for the stream.
+ *
+ * The default, if you do not call this function, is {@link #AAUDIO_UNSPECIFIED}.
+ * If both channel mask and count are not set, then stereo will then be chosen when the
+ * stream is opened.
+ * After opening a stream with an unspecified value, the application must query for the
+ * actual value, which may vary by device.
+ *
+ * If an exact value is specified then an opened stream will use that value.
+ * If a stream cannot be opened with the specified value then the open will fail.
+ *
+ * As the corresponding channel count of provided channel mask here may be different
+ * from the channel count used in {@link AAudioStreamBuilder_setChannelCount} or
+ * {@link AAudioStreamBuilder_setSamplesPerFrame}, the last called function will be
+ * respected if this function and {@link AAudioStreamBuilder_setChannelCount} or
+ * {@link AAudioStreamBuilder_setSamplesPerFrame} are called.
+ *
+ * Available since API level 32.
+ *
+ * @param builder reference provided by AAudio_createStreamBuilder()
+ * @param channelMask Audio channel mask desired.
+ */
+AAUDIO_API void AAudioStreamBuilder_setChannelMask(AAudioStreamBuilder* builder,
+ aaudio_channel_mask_t channelMask) __INTRODUCED_IN(32);
+
// ============================================================
// Stream Control
// ============================================================
@@ -1616,6 +1835,31 @@
__INTRODUCED_IN(28);
/**
+ * Return the spatialization behavior for the stream.
+ *
+ * If none was explicitly set, it will return the default
+ * {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO} behavior.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return spatialization behavior, for example {@link #AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO}
+ */
+AAUDIO_API aaudio_spatialization_behavior_t AAudioStream_getSpatializationBehavior(
+ AAudioStream* stream) __INTRODUCED_IN(32);
+
+/**
+ * Return whether the content of the stream is spatialized.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return true if the content is spatialized
+ */
+AAUDIO_API bool AAudioStream_isContentSpatialized(AAudioStream* stream) __INTRODUCED_IN(32);
+
+
+/**
* Return the input preset for the stream.
*
* Available since API level 28.
@@ -1652,6 +1896,18 @@
AAUDIO_API bool AAudioStream_isPrivacySensitive(AAudioStream* stream)
__INTRODUCED_IN(30);
+/**
+ * Return the channel mask for the stream. This will be the mask set using
+ * {@link #AAudioStreamBuilder_setChannelMask}, or {@link #AAUDIO_UNSPECIFIED} otherwise.
+ *
+ * Available since API level 32.
+ *
+ * @param stream reference provided by AAudioStreamBuilder_openStream()
+ * @return actual channel mask
+ */
+AAUDIO_API aaudio_channel_mask_t AAudioStream_getChannelMask(AAudioStream* stream)
+ __INTRODUCED_IN(32);
+
#ifdef __cplusplus
}
#endif
diff --git a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
index 2d501ef..bec4393 100644
--- a/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
+++ b/media/libaaudio/src/binding/AAudioStreamConfiguration.cpp
@@ -30,7 +30,7 @@
using android::media::audio::common::AudioFormat;
AAudioStreamConfiguration::AAudioStreamConfiguration(const StreamParameters& parcelable) {
- setSamplesPerFrame(parcelable.samplesPerFrame);
+ setChannelMask(parcelable.channelMask);
setSampleRate(parcelable.sampleRate);
setDeviceId(parcelable.deviceId);
static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(parcelable.sharingMode));
@@ -43,6 +43,13 @@
setUsage(parcelable.usage);
static_assert(sizeof(aaudio_content_type_t) == sizeof(parcelable.contentType));
setContentType(parcelable.contentType);
+
+ static_assert(sizeof(aaudio_spatialization_behavior_t) ==
+ sizeof(parcelable.spatializationBehavior));
+ setSpatializationBehavior(parcelable.spatializationBehavior);
+ setIsContentSpatialized(parcelable.isContentSpatialized);
+
+
static_assert(sizeof(aaudio_input_preset_t) == sizeof(parcelable.inputPreset));
setInputPreset(parcelable.inputPreset);
setBufferCapacity(parcelable.bufferCapacity);
@@ -63,7 +70,7 @@
StreamParameters AAudioStreamConfiguration::parcelable() const {
StreamParameters result;
- result.samplesPerFrame = getSamplesPerFrame();
+ result.channelMask = getChannelMask();
result.sampleRate = getSampleRate();
result.deviceId = getDeviceId();
static_assert(sizeof(aaudio_sharing_mode_t) == sizeof(result.sharingMode));
diff --git a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
index b7c4f70..a6541e1 100644
--- a/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
+++ b/media/libaaudio/src/binding/aidl/aaudio/StreamParameters.aidl
@@ -19,7 +19,7 @@
import android.media.audio.common.AudioFormat;
parcelable StreamParameters {
- int samplesPerFrame; // = AAUDIO_UNSPECIFIED;
+ int channelMask; // = AAUDIO_UNSPECIFIED;
int sampleRate; // = AAUDIO_UNSPECIFIED;
int deviceId; // = AAUDIO_UNSPECIFIED;
int /* aaudio_sharing_mode_t */ sharingMode; // = AAUDIO_SHARING_MODE_SHARED;
@@ -27,6 +27,8 @@
int /* aaudio_direction_t */ direction; // = AAUDIO_DIRECTION_OUTPUT;
int /* aaudio_usage_t */ usage; // = AAUDIO_UNSPECIFIED;
int /* aaudio_content_type_t */ contentType; // = AAUDIO_UNSPECIFIED;
+ int /* aaudio_spatialization_behavior_t */spatializationBehavior; //= AAUDIO_UNSPECIFIED;
+ boolean isContentSpatialized; // = false;
int /* aaudio_input_preset_t */ inputPreset; // = AAUDIO_UNSPECIFIED;
int bufferCapacity; // = AAUDIO_UNSPECIFIED;
int /* aaudio_allowed_capture_policy_t */ allowedCapturePolicy; // = AAUDIO_UNSPECIFIED;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 6d2d464..f933b29 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -123,12 +123,14 @@
request.getConfiguration().setDeviceId(getDeviceId());
request.getConfiguration().setSampleRate(getSampleRate());
- request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
request.getConfiguration().setDirection(getDirection());
request.getConfiguration().setSharingMode(getSharingMode());
+ request.getConfiguration().setChannelMask(getChannelMask());
request.getConfiguration().setUsage(getUsage());
request.getConfiguration().setContentType(getContentType());
+ request.getConfiguration().setSpatializationBehavior(getSpatializationBehavior());
+ request.getConfiguration().setIsContentSpatialized(isContentSpatialized());
request.getConfiguration().setInputPreset(getInputPreset());
request.getConfiguration().setPrivacySensitive(isPrivacySensitive());
@@ -138,7 +140,8 @@
mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
if (mServiceStreamHandle < 0
- && request.getConfiguration().getSamplesPerFrame() == 1 // mono?
+ && (request.getConfiguration().getSamplesPerFrame() == 1
+ || request.getConfiguration().getChannelMask() == AAUDIO_CHANNEL_MONO)
&& getDirection() == AAUDIO_DIRECTION_OUTPUT
&& !isInService()) {
// if that failed then try switching from mono to stereo if OUTPUT.
@@ -146,7 +149,7 @@
// that writes to a stereo MMAP stream.
ALOGD("%s() - openStream() returned %d, try switching from MONO to STEREO",
__func__, mServiceStreamHandle);
- request.getConfiguration().setSamplesPerFrame(2); // stereo
+ request.getConfiguration().setChannelMask(AAUDIO_CHANNEL_STEREO);
mServiceStreamHandle = mServiceInterface.openStream(request, configurationOutput);
}
if (mServiceStreamHandle < 0) {
@@ -174,9 +177,10 @@
goto error;
}
// Save results of the open.
- if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
- setSamplesPerFrame(configurationOutput.getSamplesPerFrame());
+ if (getChannelMask() == AAUDIO_UNSPECIFIED) {
+ setChannelMask(configurationOutput.getChannelMask());
}
+
mDeviceChannelCount = configurationOutput.getSamplesPerFrame();
setSampleRate(configurationOutput.getSampleRate());
@@ -186,6 +190,8 @@
setUsage(configurationOutput.getUsage());
setContentType(configurationOutput.getContentType());
+ setSpatializationBehavior(configurationOutput.getSpatializationBehavior());
+ setIsContentSpatialized(configurationOutput.isContentSpatialized());
setInputPreset(configurationOutput.getInputPreset());
// Save device format so we can do format conversion and volume scaling together.
diff --git a/media/libaaudio/src/core/AAudioAudio.cpp b/media/libaaudio/src/core/AAudioAudio.cpp
index d103aca..f07e66e 100644
--- a/media/libaaudio/src/core/AAudioAudio.cpp
+++ b/media/libaaudio/src/core/AAudioAudio.cpp
@@ -128,7 +128,8 @@
int32_t samplesPerFrame)
{
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
- streamBuilder->setSamplesPerFrame(samplesPerFrame);
+ const aaudio_channel_mask_t channelMask = AAudioConvert_channelCountToMask(samplesPerFrame);
+ streamBuilder->setChannelMask(channelMask);
}
AAUDIO_API void AAudioStreamBuilder_setDirection(AAudioStreamBuilder* builder,
@@ -166,6 +167,18 @@
streamBuilder->setContentType(contentType);
}
+AAUDIO_API void AAudioStreamBuilder_setSpatializationBehavior(AAudioStreamBuilder* builder,
+ aaudio_spatialization_behavior_t spatializationBehavior) {
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setSpatializationBehavior(spatializationBehavior);
+}
+
+AAUDIO_API void AAudioStreamBuilder_setIsContentSpatialized(AAudioStreamBuilder* builder,
+ bool isSpatialized) {
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setIsContentSpatialized(isSpatialized);
+}
+
AAUDIO_API void AAudioStreamBuilder_setInputPreset(AAudioStreamBuilder* builder,
aaudio_input_preset_t inputPreset) {
AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
@@ -223,6 +236,13 @@
streamBuilder->setFramesPerDataCallback(frames);
}
+AAUDIO_API void AAudioStreamBuilder_setChannelMask(AAudioStreamBuilder* builder,
+ aaudio_channel_mask_t channelMask)
+{
+ AudioStreamBuilder *streamBuilder = convertAAudioBuilderToStreamBuilder(builder);
+ streamBuilder->setChannelMask(channelMask);
+}
+
AAUDIO_API aaudio_result_t AAudioStreamBuilder_openStream(AAudioStreamBuilder* builder,
AAudioStream** streamPtr)
{
@@ -495,6 +515,19 @@
return audioStream->getContentType();
}
+AAUDIO_API aaudio_spatialization_behavior_t AAudioStream_getSpatializationBehavior(
+ AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->getSpatializationBehavior();
+}
+
+AAUDIO_API bool AAudioStream_isContentSpatialized(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ return audioStream->isContentSpatialized();
+}
+
AAUDIO_API aaudio_input_preset_t AAudioStream_getInputPreset(AAudioStream* stream)
{
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
@@ -562,3 +595,11 @@
AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
return audioStream->isPrivacySensitive();
}
+
+AAUDIO_API aaudio_channel_mask_t AAudioStream_getChannelMask(AAudioStream* stream)
+{
+ AudioStream *audioStream = convertAAudioStreamToAudioStream(stream);
+ const aaudio_channel_mask_t channelMask = audioStream->getChannelMask();
+ // Do not return channel index masks as they are not public.
+ return AAudio_isChannelIndexMask(channelMask) ? AAUDIO_UNSPECIFIED : channelMask;
+}
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.cpp b/media/libaaudio/src/core/AAudioStreamParameters.cpp
index acfac24..dc242b8 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.cpp
+++ b/media/libaaudio/src/core/AAudioStreamParameters.cpp
@@ -44,11 +44,14 @@
mBufferCapacity = other.mBufferCapacity;
mUsage = other.mUsage;
mContentType = other.mContentType;
+ mSpatializationBehavior = other.mSpatializationBehavior;
+ mIsContentSpatialized = other.mIsContentSpatialized;
mInputPreset = other.mInputPreset;
mAllowedCapturePolicy = other.mAllowedCapturePolicy;
mIsPrivacySensitive = other.mIsPrivacySensitive;
mOpPackageName = other.mOpPackageName;
mAttributionTag = other.mAttributionTag;
+ mChannelMask = other.mChannelMask;
}
static aaudio_result_t isFormatValid(audio_format_t format) {
@@ -160,6 +163,19 @@
// break;
}
+ switch (mSpatializationBehavior) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO:
+ case AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER:
+ break; // valid
+ default:
+ ALOGD("spatialization behavior not valid = %d", mSpatializationBehavior);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ // break;
+ }
+
+ // no validation required for mIsContentSpatialized
+
switch (mInputPreset) {
case AAUDIO_UNSPECIFIED:
case AAUDIO_INPUT_PRESET_GENERIC:
@@ -187,7 +203,94 @@
// break;
}
- return AAUDIO_OK;
+ return validateChannelMask();
+}
+
+bool AAudioStreamParameters::validateChannelMask() const {
+ if (mChannelMask == AAUDIO_UNSPECIFIED) {
+ return AAUDIO_OK;
+ }
+
+ if (mChannelMask & AAUDIO_CHANNEL_BIT_INDEX) {
+ switch (mChannelMask) {
+ case AAUDIO_CHANNEL_INDEX_MASK_1:
+ case AAUDIO_CHANNEL_INDEX_MASK_2:
+ case AAUDIO_CHANNEL_INDEX_MASK_3:
+ case AAUDIO_CHANNEL_INDEX_MASK_4:
+ case AAUDIO_CHANNEL_INDEX_MASK_5:
+ case AAUDIO_CHANNEL_INDEX_MASK_6:
+ case AAUDIO_CHANNEL_INDEX_MASK_7:
+ case AAUDIO_CHANNEL_INDEX_MASK_8:
+ case AAUDIO_CHANNEL_INDEX_MASK_9:
+ case AAUDIO_CHANNEL_INDEX_MASK_10:
+ case AAUDIO_CHANNEL_INDEX_MASK_11:
+ case AAUDIO_CHANNEL_INDEX_MASK_12:
+ case AAUDIO_CHANNEL_INDEX_MASK_13:
+ case AAUDIO_CHANNEL_INDEX_MASK_14:
+ case AAUDIO_CHANNEL_INDEX_MASK_15:
+ case AAUDIO_CHANNEL_INDEX_MASK_16:
+ case AAUDIO_CHANNEL_INDEX_MASK_17:
+ case AAUDIO_CHANNEL_INDEX_MASK_18:
+ case AAUDIO_CHANNEL_INDEX_MASK_19:
+ case AAUDIO_CHANNEL_INDEX_MASK_20:
+ case AAUDIO_CHANNEL_INDEX_MASK_21:
+ case AAUDIO_CHANNEL_INDEX_MASK_22:
+ case AAUDIO_CHANNEL_INDEX_MASK_23:
+ case AAUDIO_CHANNEL_INDEX_MASK_24:
+ return AAUDIO_OK;
+ default:
+ ALOGD("Invalid channel index mask %#x", mChannelMask);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ }
+
+ if (getDirection() == AAUDIO_DIRECTION_INPUT) {
+ switch (mChannelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ case AAUDIO_CHANNEL_STEREO:
+ case AAUDIO_CHANNEL_FRONT_BACK:
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ case AAUDIO_CHANNEL_5POINT1:
+ return AAUDIO_OK;
+ default:
+ ALOGD("Invalid channel mask %#x, IN", mChannelMask);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ } else {
+ switch (mChannelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ case AAUDIO_CHANNEL_STEREO:
+ case AAUDIO_CHANNEL_2POINT1:
+ case AAUDIO_CHANNEL_TRI:
+ case AAUDIO_CHANNEL_TRI_BACK:
+ case AAUDIO_CHANNEL_3POINT1:
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ case AAUDIO_CHANNEL_QUAD:
+ case AAUDIO_CHANNEL_QUAD_SIDE:
+ case AAUDIO_CHANNEL_SURROUND:
+ case AAUDIO_CHANNEL_PENTA:
+ case AAUDIO_CHANNEL_5POINT1:
+ case AAUDIO_CHANNEL_5POINT1_SIDE:
+ case AAUDIO_CHANNEL_5POINT1POINT2:
+ case AAUDIO_CHANNEL_5POINT1POINT4:
+ case AAUDIO_CHANNEL_6POINT1:
+ case AAUDIO_CHANNEL_7POINT1:
+ case AAUDIO_CHANNEL_7POINT1POINT2:
+ case AAUDIO_CHANNEL_7POINT1POINT4:
+ case AAUDIO_CHANNEL_9POINT1POINT4:
+ case AAUDIO_CHANNEL_9POINT1POINT6:
+ return AAUDIO_OK;
+ default:
+ ALOGD("Invalid channel mask %#x. OUT", mChannelMask);
+ return AAUDIO_ERROR_ILLEGAL_ARGUMENT;
+ }
+ }
}
void AAudioStreamParameters::dump() const {
@@ -195,12 +298,15 @@
ALOGD("mSessionId = %6d", mSessionId);
ALOGD("mSampleRate = %6d", mSampleRate);
ALOGD("mSamplesPerFrame = %6d", mSamplesPerFrame);
+ ALOGD("mChannelMask = %#x", mChannelMask);
ALOGD("mSharingMode = %6d", (int)mSharingMode);
ALOGD("mAudioFormat = %6d", (int)mAudioFormat);
ALOGD("mDirection = %6d", mDirection);
ALOGD("mBufferCapacity = %6d", mBufferCapacity);
ALOGD("mUsage = %6d", mUsage);
ALOGD("mContentType = %6d", mContentType);
+ ALOGD("mSpatializationBehavior = %6d", mSpatializationBehavior);
+ ALOGD("mIsContentSpatialized = %s", mIsContentSpatialized ? "true" : "false");
ALOGD("mInputPreset = %6d", mInputPreset);
ALOGD("mAllowedCapturePolicy = %6d", mAllowedCapturePolicy);
ALOGD("mIsPrivacySensitive = %s", mIsPrivacySensitive ? "true" : "false");
diff --git a/media/libaaudio/src/core/AAudioStreamParameters.h b/media/libaaudio/src/core/AAudioStreamParameters.h
index 5737052..fed036b 100644
--- a/media/libaaudio/src/core/AAudioStreamParameters.h
+++ b/media/libaaudio/src/core/AAudioStreamParameters.h
@@ -49,13 +49,6 @@
return mSamplesPerFrame;
}
- /**
- * This is also known as channelCount.
- */
- void setSamplesPerFrame(int32_t samplesPerFrame) {
- mSamplesPerFrame = samplesPerFrame;
- }
-
audio_format_t getFormat() const {
return mAudioFormat;
}
@@ -104,6 +97,22 @@
mContentType = contentType;
}
+ aaudio_spatialization_behavior_t getSpatializationBehavior() const {
+ return mSpatializationBehavior;
+ }
+
+ void setSpatializationBehavior(aaudio_spatialization_behavior_t spatializationBehavior) {
+ mSpatializationBehavior = spatializationBehavior;
+ }
+
+ bool isContentSpatialized() const {
+ return mIsContentSpatialized;
+ }
+
+ void setIsContentSpatialized(bool isSpatialized) {
+ mIsContentSpatialized = isSpatialized;
+ }
+
aaudio_input_preset_t getInputPreset() const {
return mInputPreset;
}
@@ -153,6 +162,15 @@
mAttributionTag = attributionTag;
}
+ aaudio_channel_mask_t getChannelMask() const {
+ return mChannelMask;
+ }
+
+ void setChannelMask(aaudio_channel_mask_t channelMask) {
+ mChannelMask = channelMask;
+ mSamplesPerFrame = AAudioConvert_channelMaskToCount(channelMask);
+ }
+
/**
* @return bytes per frame of getFormat()
*/
@@ -171,6 +189,8 @@
void dump() const;
private:
+ bool validateChannelMask() const;
+
int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
@@ -179,6 +199,9 @@
aaudio_direction_t mDirection = AAUDIO_DIRECTION_OUTPUT;
aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
+ aaudio_spatialization_behavior_t mSpatializationBehavior
+ = AAUDIO_UNSPECIFIED;
+ bool mIsContentSpatialized = false;
aaudio_input_preset_t mInputPreset = AAUDIO_UNSPECIFIED;
int32_t mBufferCapacity = AAUDIO_UNSPECIFIED;
aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_UNSPECIFIED;
@@ -186,6 +209,7 @@
bool mIsPrivacySensitive = false;
std::optional<std::string> mOpPackageName = {};
std::optional<std::string> mAttributionTag = {};
+ aaudio_channel_mask_t mChannelMask = AAUDIO_UNSPECIFIED;
};
} /* namespace aaudio */
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 09d9535..06f05b0 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -76,6 +76,7 @@
// Copy parameters from the Builder because the Builder may be deleted after this call.
// TODO AudioStream should be a subclass of AudioStreamParameters
mSamplesPerFrame = builder.getSamplesPerFrame();
+ mChannelMask = builder.getChannelMask();
mSampleRate = builder.getSampleRate();
mDeviceId = builder.getDeviceId();
mFormat = builder.getFormat();
@@ -91,6 +92,12 @@
if (mContentType == AAUDIO_UNSPECIFIED) {
mContentType = AAUDIO_CONTENT_TYPE_MUSIC;
}
+ mSpatializationBehavior = builder.getSpatializationBehavior();
+ // for consistency with other properties, note UNSPECIFIED is the same as AUTO
+ if (mSpatializationBehavior == AAUDIO_UNSPECIFIED) {
+ mSpatializationBehavior = AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO;
+ }
+ mIsContentSpatialized = builder.isContentSpatialized();
mInputPreset = builder.getInputPreset();
if (mInputPreset == AAUDIO_UNSPECIFIED) {
mInputPreset = AAUDIO_INPUT_PRESET_VOICE_RECOGNITION;
@@ -595,6 +602,7 @@
void AudioStream::setDuckAndMuteVolume(float duckAndMuteVolume) {
ALOGD("%s() to %f", __func__, duckAndMuteVolume);
+ std::lock_guard<std::mutex> lock(mStreamLock);
mDuckAndMuteVolume = duckAndMuteVolume;
doSetVolume(); // apply this change
}
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 9835c8c..7896e63 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -253,6 +253,14 @@
return mContentType;
}
+ aaudio_spatialization_behavior_t getSpatializationBehavior() const {
+ return mSpatializationBehavior;
+ }
+
+ bool isContentSpatialized() const {
+ return mIsContentSpatialized;
+ }
+
aaudio_input_preset_t getInputPreset() const {
return mInputPreset;
}
@@ -270,7 +278,8 @@
}
/**
- * This is only valid after setSamplesPerFrame() and setFormat() have been called.
+ * This is only valid after setChannelMask() and setFormat()
+ * have been called.
*/
int32_t getBytesPerFrame() const {
return mSamplesPerFrame * getBytesPerSample();
@@ -284,7 +293,7 @@
}
/**
- * This is only valid after setSamplesPerFrame() and setDeviceFormat() have been called.
+ * This is only valid after setChannelMask() and setDeviceFormat() have been called.
*/
int32_t getBytesPerDeviceFrame() const {
return getSamplesPerFrame() * audio_bytes_per_sample(getDeviceFormat());
@@ -318,6 +327,15 @@
return mFramesPerDataCallback;
}
+ aaudio_channel_mask_t getChannelMask() const {
+ return mChannelMask;
+ }
+
+ void setChannelMask(aaudio_channel_mask_t channelMask) {
+ mChannelMask = channelMask;
+ mSamplesPerFrame = AAudioConvert_channelMaskToCount(channelMask);
+ }
+
/**
* @return true if data callback has been specified
*/
@@ -495,11 +513,6 @@
}
// This should not be called after the open() call.
- void setSamplesPerFrame(int32_t samplesPerFrame) {
- mSamplesPerFrame = samplesPerFrame;
- }
-
- // This should not be called after the open() call.
void setFramesPerBurst(int32_t framesPerBurst) {
mFramesPerBurst = framesPerBurst;
}
@@ -589,6 +602,14 @@
mContentType = contentType;
}
+ void setSpatializationBehavior(aaudio_spatialization_behavior_t spatializationBehavior) {
+ mSpatializationBehavior = spatializationBehavior;
+ }
+
+ void setIsContentSpatialized(bool isContentSpatialized) {
+ mIsContentSpatialized = isContentSpatialized;
+ }
+
/**
* This should not be called after the open() call.
*/
@@ -633,6 +654,7 @@
// These do not change after open().
int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
+ aaudio_channel_mask_t mChannelMask = AAUDIO_UNSPECIFIED;
int32_t mSampleRate = AAUDIO_UNSPECIFIED;
int32_t mDeviceId = AAUDIO_UNSPECIFIED;
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
@@ -645,6 +667,8 @@
aaudio_usage_t mUsage = AAUDIO_UNSPECIFIED;
aaudio_content_type_t mContentType = AAUDIO_UNSPECIFIED;
+ aaudio_spatialization_behavior_t mSpatializationBehavior = AAUDIO_UNSPECIFIED;
+ bool mIsContentSpatialized = false;
aaudio_input_preset_t mInputPreset = AAUDIO_UNSPECIFIED;
aaudio_allowed_capture_policy_t mAllowedCapturePolicy = AAUDIO_ALLOW_CAPTURE_BY_ALL;
bool mIsPrivacySensitive = false;
diff --git a/media/libaaudio/src/core/AudioStreamBuilder.cpp b/media/libaaudio/src/core/AudioStreamBuilder.cpp
index e015592..5e1e007 100644
--- a/media/libaaudio/src/core/AudioStreamBuilder.cpp
+++ b/media/libaaudio/src/core/AudioStreamBuilder.cpp
@@ -268,8 +268,8 @@
void AudioStreamBuilder::logParameters() const {
// This is very helpful for debugging in the future. Please leave it in.
- ALOGI("rate = %6d, channels = %d, format = %d, sharing = %s, dir = %s",
- getSampleRate(), getSamplesPerFrame(), getFormat(),
+ ALOGI("rate = %6d, channels = %d, channelMask = %#x, format = %d, sharing = %s, dir = %s",
+ getSampleRate(), getSamplesPerFrame(), getChannelMask(), getFormat(),
AAudio_convertSharingModeToShortText(getSharingMode()),
AAudio_convertDirectionToText(getDirection()));
ALOGI("device = %6d, sessionId = %d, perfMode = %d, callback: %s with frames = %d",
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index dc66742..fe8fb19 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -65,11 +65,8 @@
const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
// TODO Support UNSPECIFIED in AudioRecord. For now, use stereo if unspecified.
- int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
- ? 2 : getSamplesPerFrame();
- audio_channel_mask_t channelMask = samplesPerFrame <= 2 ?
- audio_channel_in_mask_from_count(samplesPerFrame) :
- audio_channel_mask_for_index_assignment_from_count(samplesPerFrame);
+ audio_channel_mask_t channelMask =
+ AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), true /*isInput*/);
size_t frameCount = (builder.getBufferCapacity() == AAUDIO_UNSPECIFIED) ? 0
: builder.getBufferCapacity();
@@ -115,7 +112,7 @@
constexpr int32_t kMostLikelySampleRateForFast = 48000;
if (getFormat() == AUDIO_FORMAT_PCM_FLOAT
&& perfMode == AAUDIO_PERFORMANCE_MODE_LOW_LATENCY
- && (samplesPerFrame <= 2) // FAST only for mono and stereo
+ && (audio_channel_count_from_in_mask(channelMask) <= 2) // FAST only for mono and stereo
&& (getSampleRate() == kMostLikelySampleRateForFast
|| getSampleRate() == AAUDIO_UNSPECIFIED)) {
setDeviceFormat(AUDIO_FORMAT_PCM_16_BIT);
@@ -228,7 +225,9 @@
.set(AMEDIAMETRICS_PROP_ENCODINGCLIENT, toString(requestedFormat).c_str()).record();
// Get the actual values from the AudioRecord.
- setSamplesPerFrame(mAudioRecord->channelCount());
+ setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+ mAudioRecord->channelMask(), true /*isInput*/,
+ AAudio_isChannelIndexMask(getChannelMask())));
setSampleRate(mAudioRecord->getSampleRate());
setBufferCapacity(getBufferCapacityFromDevice());
setFramesPerBurst(getFramesPerBurstFromDevice());
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 1d412c0..17736fc 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -66,13 +66,8 @@
const aaudio_session_id_t requestedSessionId = builder.getSessionId();
const audio_session_t sessionId = AAudioConvert_aaudioToAndroidSessionId(requestedSessionId);
- // Try to create an AudioTrack
- // Use stereo if unspecified.
- int32_t samplesPerFrame = (getSamplesPerFrame() == AAUDIO_UNSPECIFIED)
- ? 2 : getSamplesPerFrame();
- audio_channel_mask_t channelMask = samplesPerFrame <= 2 ?
- audio_channel_out_mask_from_count(samplesPerFrame) :
- audio_channel_mask_for_index_assignment_from_count(samplesPerFrame);
+ audio_channel_mask_t channelMask =
+ AAudio_getChannelMaskForOpen(getChannelMask(), getSamplesPerFrame(), false /*isInput*/);
audio_output_flags_t flags;
aaudio_performance_mode_t perfMode = getPerformanceMode();
@@ -140,7 +135,9 @@
const audio_usage_t usage =
AAudioConvert_usageToInternal(builder.getUsage());
const audio_flags_mask_t attributesFlags =
- AAudioConvert_allowCapturePolicyToAudioFlagsMask(builder.getAllowedCapturePolicy());
+ AAudioConvert_allowCapturePolicyToAudioFlagsMask(builder.getAllowedCapturePolicy(),
+ builder.getSpatializationBehavior(),
+ builder.isContentSpatialized());
const audio_attributes_t attributes = {
.content_type = contentType,
@@ -199,7 +196,9 @@
doSetVolume();
// Get the actual values from the AudioTrack.
- setSamplesPerFrame(mAudioTrack->channelCount());
+ setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+ mAudioTrack->channelMask(), false /*isInput*/,
+ AAudio_isChannelIndexMask(getChannelMask())));
setFormat(mAudioTrack->format());
setDeviceFormat(mAudioTrack->format());
setSampleRate(mAudioTrack->getSampleRate());
diff --git a/media/libaaudio/src/libaaudio.map.txt b/media/libaaudio/src/libaaudio.map.txt
index 1dd44d1..f45b816 100644
--- a/media/libaaudio/src/libaaudio.map.txt
+++ b/media/libaaudio/src/libaaudio.map.txt
@@ -25,6 +25,9 @@
AAudioStreamBuilder_setPrivacySensitive; # introduced=30
AAudioStreamBuilder_setPackageName; # introduced=31
AAudioStreamBuilder_setAttributionTag; # introduced=31
+ AAudioStreamBuilder_setChannelMask; # introduced=32
+ AAudioStreamBuilder_setSpatializationBehavior; # introduced=32
+ AAudioStreamBuilder_setIsContentSpatialized; # introduced=32
AAudioStreamBuilder_openStream;
AAudioStreamBuilder_delete;
AAudioStream_close;
@@ -61,6 +64,9 @@
AAudioStream_isMMapUsed;
AAudioStream_isPrivacySensitive; # introduced=30
AAudioStream_release; # introduced=30
+ AAudioStream_getChannelMask; # introduced=32
+ AAudioStream_getSpatializationBehavior; # introduced=32
+ AAudioStream_isContentSpatialized; # introduced=32
local:
*;
};
diff --git a/media/libaaudio/src/utility/AAudioUtilities.cpp b/media/libaaudio/src/utility/AAudioUtilities.cpp
index d795725..e44ccee 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.cpp
+++ b/media/libaaudio/src/utility/AAudioUtilities.cpp
@@ -235,20 +235,46 @@
}
audio_flags_mask_t AAudioConvert_allowCapturePolicyToAudioFlagsMask(
- aaudio_allowed_capture_policy_t policy) {
+ aaudio_allowed_capture_policy_t policy,
+ aaudio_spatialization_behavior_t spatializationBehavior,
+ bool isContentSpatialized) {
+ audio_flags_mask_t flagsMask = AUDIO_FLAG_NONE;
switch (policy) {
case AAUDIO_UNSPECIFIED:
case AAUDIO_ALLOW_CAPTURE_BY_ALL:
- return AUDIO_FLAG_NONE;
+ // flagsMask is not modified
+ break;
case AAUDIO_ALLOW_CAPTURE_BY_SYSTEM:
- return AUDIO_FLAG_NO_MEDIA_PROJECTION;
+ flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_NO_MEDIA_PROJECTION);
+ break;
case AAUDIO_ALLOW_CAPTURE_BY_NONE:
- return static_cast<audio_flags_mask_t>(
+ flagsMask = static_cast<audio_flags_mask_t>(flagsMask |
AUDIO_FLAG_NO_MEDIA_PROJECTION | AUDIO_FLAG_NO_SYSTEM_CAPTURE);
+ break;
default:
- ALOGE("%s() 0x%08X unrecognized", __func__, policy);
- return AUDIO_FLAG_NONE; //
+ ALOGE("%s() 0x%08X unrecognized capture policy", __func__, policy);
+ // flagsMask is not modified
}
+
+ switch (spatializationBehavior) {
+ case AAUDIO_UNSPECIFIED:
+ case AAUDIO_SPATIALIZATION_BEHAVIOR_AUTO:
+ // flagsMask is not modified
+ break;
+ case AAUDIO_SPATIALIZATION_BEHAVIOR_NEVER:
+ flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_NEVER_SPATIALIZE);
+ break;
+ default:
+ ALOGE("%s() 0x%08X unrecognized spatialization behavior",
+ __func__, spatializationBehavior);
+ // flagsMask is not modified
+ }
+
+ if (isContentSpatialized) {
+ flagsMask = static_cast<audio_flags_mask_t>(flagsMask | AUDIO_FLAG_CONTENT_SPATIALIZED);
+ }
+
+ return flagsMask;
}
audio_flags_mask_t AAudioConvert_privacySensitiveToAudioFlagsMask(
@@ -256,6 +282,248 @@
return privacySensitive ? AUDIO_FLAG_CAPTURE_PRIVATE : AUDIO_FLAG_NONE;
}
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelLayoutMask(
+ aaudio_channel_mask_t channelMask, bool isInput) {
+ if (isInput) {
+ switch (channelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ return AUDIO_CHANNEL_IN_MONO;
+ case AAUDIO_CHANNEL_STEREO:
+ return AUDIO_CHANNEL_IN_STEREO;
+ case AAUDIO_CHANNEL_FRONT_BACK:
+ return AUDIO_CHANNEL_IN_FRONT_BACK;
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ return AUDIO_CHANNEL_IN_2POINT0POINT2;
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ return AUDIO_CHANNEL_IN_2POINT1POINT2;
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ return AUDIO_CHANNEL_IN_3POINT0POINT2;
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ return AUDIO_CHANNEL_IN_3POINT1POINT2;
+ case AAUDIO_CHANNEL_5POINT1:
+ return AUDIO_CHANNEL_IN_5POINT1;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AUDIO_CHANNEL_INVALID;
+ }
+ } else {
+ switch (channelMask) {
+ case AAUDIO_CHANNEL_MONO:
+ return AUDIO_CHANNEL_OUT_MONO;
+ case AAUDIO_CHANNEL_STEREO:
+ return AUDIO_CHANNEL_OUT_STEREO;
+ case AAUDIO_CHANNEL_2POINT1:
+ return AUDIO_CHANNEL_OUT_2POINT1;
+ case AAUDIO_CHANNEL_TRI:
+ return AUDIO_CHANNEL_OUT_TRI;
+ case AAUDIO_CHANNEL_TRI_BACK:
+ return AUDIO_CHANNEL_OUT_TRI_BACK;
+ case AAUDIO_CHANNEL_3POINT1:
+ return AUDIO_CHANNEL_OUT_3POINT1;
+ case AAUDIO_CHANNEL_2POINT0POINT2:
+ return AUDIO_CHANNEL_OUT_2POINT0POINT2;
+ case AAUDIO_CHANNEL_2POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_2POINT1POINT2;
+ case AAUDIO_CHANNEL_3POINT0POINT2:
+ return AUDIO_CHANNEL_OUT_3POINT0POINT2;
+ case AAUDIO_CHANNEL_3POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_3POINT1POINT2;
+ case AAUDIO_CHANNEL_QUAD:
+ return AUDIO_CHANNEL_OUT_QUAD;
+ case AAUDIO_CHANNEL_QUAD_SIDE:
+ return AUDIO_CHANNEL_OUT_QUAD_SIDE;
+ case AAUDIO_CHANNEL_SURROUND:
+ return AUDIO_CHANNEL_OUT_SURROUND;
+ case AAUDIO_CHANNEL_PENTA:
+ return AUDIO_CHANNEL_OUT_PENTA;
+ case AAUDIO_CHANNEL_5POINT1:
+ return AUDIO_CHANNEL_OUT_5POINT1;
+ case AAUDIO_CHANNEL_5POINT1_SIDE:
+ return AUDIO_CHANNEL_OUT_5POINT1_SIDE;
+ case AAUDIO_CHANNEL_5POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_5POINT1POINT2;
+ case AAUDIO_CHANNEL_5POINT1POINT4:
+ return AUDIO_CHANNEL_OUT_5POINT1POINT4;
+ case AAUDIO_CHANNEL_6POINT1:
+ return AUDIO_CHANNEL_OUT_6POINT1;
+ case AAUDIO_CHANNEL_7POINT1:
+ return AUDIO_CHANNEL_OUT_7POINT1;
+ case AAUDIO_CHANNEL_7POINT1POINT2:
+ return AUDIO_CHANNEL_OUT_7POINT1POINT2;
+ case AAUDIO_CHANNEL_7POINT1POINT4:
+ return AUDIO_CHANNEL_OUT_7POINT1POINT4;
+ // TODO: add 9point1point4 and 9point1point6 when they are added in audio-hal-enums.h
+ // case AAUDIO_CHANNEL_9POINT1POINT4:
+ // return AUDIO_CHANNEL_OUT_9POINT1POINT4;
+ // case AAUDIO_CHANNEL_9POINT1POINT6:
+ // return AUDIO_CHANNEL_OUT_9POINT1POINT6;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AUDIO_CHANNEL_INVALID;
+ }
+ }
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelLayoutMask(
+ audio_channel_mask_t channelMask, bool isInput) {
+ if (isInput) {
+ switch (channelMask) {
+ case AUDIO_CHANNEL_IN_MONO:
+ return AAUDIO_CHANNEL_MONO;
+ case AUDIO_CHANNEL_IN_STEREO:
+ return AAUDIO_CHANNEL_STEREO;
+ case AUDIO_CHANNEL_IN_FRONT_BACK:
+ return AAUDIO_CHANNEL_FRONT_BACK;
+ case AUDIO_CHANNEL_IN_2POINT0POINT2:
+ return AAUDIO_CHANNEL_2POINT0POINT2;
+ case AUDIO_CHANNEL_IN_2POINT1POINT2:
+ return AAUDIO_CHANNEL_2POINT1POINT2;
+ case AUDIO_CHANNEL_IN_3POINT0POINT2:
+ return AAUDIO_CHANNEL_3POINT0POINT2;
+ case AUDIO_CHANNEL_IN_3POINT1POINT2:
+ return AAUDIO_CHANNEL_3POINT1POINT2;
+ case AUDIO_CHANNEL_IN_5POINT1:
+ return AAUDIO_CHANNEL_5POINT1;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AAUDIO_CHANNEL_INVALID;
+ }
+ } else {
+ switch (channelMask) {
+ case AUDIO_CHANNEL_OUT_MONO:
+ return AAUDIO_CHANNEL_MONO;
+ case AUDIO_CHANNEL_OUT_STEREO:
+ return AAUDIO_CHANNEL_STEREO;
+ case AUDIO_CHANNEL_OUT_2POINT1:
+ return AAUDIO_CHANNEL_2POINT1;
+ case AUDIO_CHANNEL_OUT_TRI:
+ return AAUDIO_CHANNEL_TRI;
+ case AUDIO_CHANNEL_OUT_TRI_BACK:
+ return AAUDIO_CHANNEL_TRI_BACK;
+ case AUDIO_CHANNEL_OUT_3POINT1:
+ return AAUDIO_CHANNEL_3POINT1;
+ case AUDIO_CHANNEL_OUT_2POINT0POINT2:
+ return AAUDIO_CHANNEL_2POINT0POINT2;
+ case AUDIO_CHANNEL_OUT_2POINT1POINT2:
+ return AAUDIO_CHANNEL_2POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_3POINT0POINT2:
+ return AAUDIO_CHANNEL_3POINT0POINT2;
+ case AUDIO_CHANNEL_OUT_3POINT1POINT2:
+ return AAUDIO_CHANNEL_3POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_QUAD:
+ return AAUDIO_CHANNEL_QUAD;
+ case AUDIO_CHANNEL_OUT_QUAD_SIDE:
+ return AAUDIO_CHANNEL_QUAD_SIDE;
+ case AUDIO_CHANNEL_OUT_SURROUND:
+ return AAUDIO_CHANNEL_SURROUND;
+ case AUDIO_CHANNEL_OUT_PENTA:
+ return AAUDIO_CHANNEL_PENTA;
+ case AUDIO_CHANNEL_OUT_5POINT1:
+ return AAUDIO_CHANNEL_5POINT1;
+ case AUDIO_CHANNEL_OUT_5POINT1_SIDE:
+ return AAUDIO_CHANNEL_5POINT1_SIDE;
+ case AUDIO_CHANNEL_OUT_5POINT1POINT2:
+ return AAUDIO_CHANNEL_5POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_5POINT1POINT4:
+ return AAUDIO_CHANNEL_5POINT1POINT4;
+ case AUDIO_CHANNEL_OUT_6POINT1:
+ return AAUDIO_CHANNEL_6POINT1;
+ case AUDIO_CHANNEL_OUT_7POINT1:
+ return AAUDIO_CHANNEL_7POINT1;
+ case AUDIO_CHANNEL_OUT_7POINT1POINT2:
+ return AAUDIO_CHANNEL_7POINT1POINT2;
+ case AUDIO_CHANNEL_OUT_7POINT1POINT4:
+ return AAUDIO_CHANNEL_7POINT1POINT4;
+ // TODO: add 9point1point4 and 9point1point6 when they are added in audio-hal-enums.h
+ // case AUDIO_CHANNEL_OUT_9POINT1POINT4:
+ // return AAUDIO_CHANNEL_9POINT1POINT4;
+ // case AUDIO_CHANNEL_OUT_9POINT1POINT6:
+ // return AAUDIO_CHANNEL_9POINT1POINT6;
+ default:
+ ALOGE("%s() %#x unrecognized", __func__, channelMask);
+ return AAUDIO_CHANNEL_INVALID;
+ }
+ }
+}
+
+int32_t AAudioConvert_channelMaskToCount(aaudio_channel_mask_t channelMask) {
+ return __builtin_popcount(channelMask & ~AAUDIO_CHANNEL_BIT_INDEX);
+}
+
+aaudio_channel_mask_t AAudioConvert_channelCountToMask(int32_t channelCount) {
+ if (channelCount < 0 || channelCount > AUDIO_CHANNEL_COUNT_MAX) {
+ return AAUDIO_CHANNEL_INVALID;
+ }
+
+ if (channelCount == 0) {
+ return AAUDIO_UNSPECIFIED;
+ }
+
+ // Return index mask if the channel count is greater than 2.
+ return AAUDIO_CHANNEL_BIT_INDEX | ((1 << channelCount) - 1);
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelIndexMask(
+ audio_channel_mask_t channelMask) {
+ if (audio_channel_mask_get_representation(channelMask) != AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ ALOGE("%s() %#x not an index mask", __func__, channelMask);
+ return AAUDIO_CHANNEL_INVALID;
+ }
+ return (channelMask & ~AUDIO_CHANNEL_INDEX_HDR) | AAUDIO_CHANNEL_BIT_INDEX;
+}
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelIndexMask(
+ aaudio_channel_mask_t channelMask) {
+ if (!AAudio_isChannelIndexMask(channelMask)) {
+ ALOGE("%s() %#x not an index mask", __func__, channelMask);
+ return AUDIO_CHANNEL_INVALID;
+ }
+ return audio_channel_mask_for_index_assignment_from_count(
+ AAudioConvert_channelMaskToCount(channelMask));
+}
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelMask(
+ audio_channel_mask_t channelMask, bool isInput, bool indexMaskRequired) {
+ if (audio_channel_mask_get_representation(channelMask) == AUDIO_CHANNEL_REPRESENTATION_INDEX) {
+ return AAudioConvert_androidToAAudioChannelIndexMask(channelMask);
+ }
+ if (indexMaskRequired) {
+ // Require index mask, `channelMask` here is a position mask.
+ const int channelCount = isInput ? audio_channel_count_from_in_mask(channelMask)
+ : audio_channel_count_from_out_mask(channelMask);
+ return AAudioConvert_channelCountToMask(channelCount);
+ }
+ return AAudioConvert_androidToAAudioChannelLayoutMask(channelMask, isInput);
+}
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelMask(
+ aaudio_channel_mask_t channelMask, bool isInput) {
+ return AAudio_isChannelIndexMask(channelMask)
+ ? AAudioConvert_aaudioToAndroidChannelIndexMask(channelMask)
+ : AAudioConvert_aaudioToAndroidChannelLayoutMask(channelMask, isInput);
+}
+
+bool AAudio_isChannelIndexMask(aaudio_channel_mask_t channelMask) {
+ return (channelMask & AAUDIO_CHANNEL_BIT_INDEX) == AAUDIO_CHANNEL_BIT_INDEX;
+}
+
+audio_channel_mask_t AAudio_getChannelMaskForOpen(
+ aaudio_channel_mask_t channelMask, int32_t samplesPerFrame, bool isInput) {
+ if (channelMask != AAUDIO_UNSPECIFIED) {
+ if (AAudio_isChannelIndexMask(channelMask) && samplesPerFrame <= 2) {
+ // When it is index mask and the count is less than 3, use position mask
+ // instead of index mask for opening a stream. This may need to be revisited
+ // when making channel index mask public.
+ return isInput ? audio_channel_in_mask_from_count(samplesPerFrame)
+ : audio_channel_out_mask_from_count(samplesPerFrame);
+ }
+ return AAudioConvert_aaudioToAndroidChannelMask(channelMask, isInput);
+ }
+
+ // Return stereo when unspecified.
+ return isInput ? AUDIO_CHANNEL_IN_STEREO : AUDIO_CHANNEL_OUT_STEREO;
+}
+
int32_t AAudioConvert_framesToBytes(int32_t numFrames,
int32_t bytesPerFrame,
int32_t *sizeInBytes) {
diff --git a/media/libaaudio/src/utility/AAudioUtilities.h b/media/libaaudio/src/utility/AAudioUtilities.h
index 82eb77d..f24df46 100644
--- a/media/libaaudio/src/utility/AAudioUtilities.h
+++ b/media/libaaudio/src/utility/AAudioUtilities.h
@@ -91,11 +91,40 @@
* @return internal audio flags mask
*/
audio_flags_mask_t AAudioConvert_allowCapturePolicyToAudioFlagsMask(
- aaudio_allowed_capture_policy_t policy);
+ aaudio_allowed_capture_policy_t policy,
+ aaudio_spatialization_behavior_t spatializationBehavior,
+ bool isContentSpatialized);
audio_flags_mask_t AAudioConvert_privacySensitiveToAudioFlagsMask(
bool privacySensitive);
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelLayoutMask(
+ aaudio_channel_mask_t channelMask, bool isInput);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelLayoutMask(
+ audio_channel_mask_t channelMask, bool isInput);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelIndexMask(
+ audio_channel_mask_t channelMask);
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelIndexMask(
+ aaudio_channel_mask_t channelMask);
+
+aaudio_channel_mask_t AAudioConvert_androidToAAudioChannelMask(
+ audio_channel_mask_t channelMask, bool isInput, bool indexMaskRequired);
+
+audio_channel_mask_t AAudioConvert_aaudioToAndroidChannelMask(
+ aaudio_channel_mask_t channelMask, bool isInput);
+
+bool AAudio_isChannelIndexMask(aaudio_channel_mask_t channelMask);
+
+int32_t AAudioConvert_channelMaskToCount(aaudio_channel_mask_t channelMask);
+
+aaudio_channel_mask_t AAudioConvert_channelCountToMask(int32_t channelCount);
+
+audio_channel_mask_t AAudio_getChannelMaskForOpen(
+ aaudio_channel_mask_t channelMask, int32_t samplesPerFrame, bool isInput);
+
// Note that this code may be replaced by Settings or by some other system configuration tool.
/**
@@ -318,4 +347,36 @@
std::atomic<int> mRequested{0};
std::atomic<int> mAcknowledged{0};
};
+
+enum {
+ /**
+ * Audio channel index mask, only used internally.
+ */
+ AAUDIO_CHANNEL_BIT_INDEX = 0x80000000,
+ AAUDIO_CHANNEL_INDEX_MASK_1 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 1) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_2 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 2) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_3 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 3) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_4 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 4) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_5 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 5) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_6 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 6) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_7 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 7) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_8 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 8) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_9 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 9) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_10 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 10) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_11 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 11) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_12 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 12) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_13 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 13) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_14 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 14) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_15 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 15) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_16 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 16) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_17 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 17) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_18 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 18) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_19 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 19) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_20 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 20) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_21 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 21) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_22 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 22) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_23 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 23) - 1,
+ AAUDIO_CHANNEL_INDEX_MASK_24 = AAUDIO_CHANNEL_BIT_INDEX | (1 << 24) - 1,
+};
+
#endif //UTILITY_AAUDIO_UTILITIES_H
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 321e7f9..a9ac3d9 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -520,6 +520,8 @@
return AUDIO_INPUT_FLAG_HW_AV_SYNC;
case media::AudioInputFlags::DIRECT:
return AUDIO_INPUT_FLAG_DIRECT;
+ case media::AudioInputFlags::ULTRASOUND:
+ return AUDIO_INPUT_FLAG_ULTRASOUND;
}
return unexpected(BAD_VALUE);
}
@@ -545,6 +547,8 @@
return media::AudioInputFlags::HW_AV_SYNC;
case AUDIO_INPUT_FLAG_DIRECT:
return media::AudioInputFlags::DIRECT;
+ case AUDIO_INPUT_FLAG_ULTRASOUND:
+ return media::AudioInputFlags::ULTRASOUND;
}
return unexpected(BAD_VALUE);
}
@@ -584,6 +588,10 @@
return AUDIO_OUTPUT_FLAG_INCALL_MUSIC;
case media::AudioOutputFlags::GAPLESS_OFFLOAD:
return AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD;
+ case media::AudioOutputFlags::SPATIALIZER:
+ return AUDIO_OUTPUT_FLAG_SPATIALIZER;
+ case media::AudioOutputFlags::ULTRASOUND:
+ return AUDIO_OUTPUT_FLAG_ULTRASOUND;
}
return unexpected(BAD_VALUE);
}
@@ -625,6 +633,10 @@
return media::AudioOutputFlags::INCALL_MUSIC;
case AUDIO_OUTPUT_FLAG_GAPLESS_OFFLOAD:
return media::AudioOutputFlags::GAPLESS_OFFLOAD;
+ case AUDIO_OUTPUT_FLAG_SPATIALIZER:
+ return media::AudioOutputFlags::SPATIALIZER;
+ case AUDIO_OUTPUT_FLAG_ULTRASOUND:
+ return media::AudioOutputFlags::ULTRASOUND;
}
return unexpected(BAD_VALUE);
}
@@ -850,6 +862,8 @@
return AUDIO_SOURCE_FM_TUNER;
case media::AudioSourceType::HOTWORD:
return AUDIO_SOURCE_HOTWORD;
+ case media::AudioSourceType::ULTRASOUND:
+ return AUDIO_SOURCE_ULTRASOUND;
}
return unexpected(BAD_VALUE);
}
@@ -887,6 +901,8 @@
return media::AudioSourceType::FM_TUNER;
case AUDIO_SOURCE_HOTWORD:
return media::AudioSourceType::HOTWORD;
+ case AUDIO_SOURCE_ULTRASOUND:
+ return media::AudioSourceType::ULTRASOUND;
}
return unexpected(BAD_VALUE);
}
@@ -1207,6 +1223,8 @@
return AUDIO_CONTENT_TYPE_MOVIE;
case media::AudioContentType::SONIFICATION:
return AUDIO_CONTENT_TYPE_SONIFICATION;
+ case media::AudioContentType::ULTRASOUND:
+ return AUDIO_CONTENT_TYPE_ULTRASOUND;
}
return unexpected(BAD_VALUE);
}
@@ -1224,6 +1242,8 @@
return media::AudioContentType::MOVIE;
case AUDIO_CONTENT_TYPE_SONIFICATION:
return media::AudioContentType::SONIFICATION;
+ case AUDIO_CONTENT_TYPE_ULTRASOUND:
+ return media::AudioContentType::ULTRASOUND;
}
return unexpected(BAD_VALUE);
}
@@ -1361,6 +1381,10 @@
return AUDIO_FLAG_NO_SYSTEM_CAPTURE;
case media::AudioFlag::CAPTURE_PRIVATE:
return AUDIO_FLAG_CAPTURE_PRIVATE;
+ case media::AudioFlag::CONTENT_SPATIALIZED:
+ return AUDIO_FLAG_CONTENT_SPATIALIZED;
+ case media::AudioFlag::NEVER_SPATIALIZE:
+ return AUDIO_FLAG_NEVER_SPATIALIZE;
}
return unexpected(BAD_VALUE);
}
@@ -1398,6 +1422,10 @@
return media::AudioFlag::NO_SYSTEM_CAPTURE;
case AUDIO_FLAG_CAPTURE_PRIVATE:
return media::AudioFlag::CAPTURE_PRIVATE;
+ case AUDIO_FLAG_CONTENT_SPATIALIZED:
+ return media::AudioFlag::CONTENT_SPATIALIZED;
+ case AUDIO_FLAG_NEVER_SPATIALIZE:
+ return media::AudioFlag::NEVER_SPATIALIZE;
}
return unexpected(BAD_VALUE);
}
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index b0f155a..a00cb79 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -25,11 +25,13 @@
static_libs: [
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
+ "spatializer-aidl-cpp",
"av-types-aidl-cpp",
],
export_static_lib_headers: [
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
+ "spatializer-aidl-cpp",
"av-types-aidl-cpp",
],
target: {
@@ -39,7 +41,7 @@
},
}
-cc_library_shared {
+cc_library {
name: "libaudiopolicy",
srcs: [
"AudioAttributes.cpp",
@@ -112,6 +114,7 @@
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
+ "spatializer-aidl-cpp",
"audiopolicy-types-aidl-cpp",
"av-types-aidl-cpp",
"capture_state_listener-aidl-cpp",
@@ -138,6 +141,7 @@
export_shared_lib_headers: [
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
+ "spatializer-aidl-cpp",
"framework-permission-aidl-cpp",
"libbinder",
],
@@ -390,6 +394,9 @@
"aidl/android/media/AudioVolumeGroup.aidl",
"aidl/android/media/DeviceRole.aidl",
"aidl/android/media/SoundTriggerSession.aidl",
+ "aidl/android/media/SpatializationLevel.aidl",
+ "aidl/android/media/SpatializationMode.aidl",
+ "aidl/android/media/SpatializerHeadTrackingMode.aidl",
],
imports: [
"audio_common-aidl",
@@ -460,6 +467,7 @@
srcs: [
"aidl/android/media/GetInputForAttrResponse.aidl",
"aidl/android/media/GetOutputForAttrResponse.aidl",
+ "aidl/android/media/GetSpatializerResponse.aidl",
"aidl/android/media/Int.aidl",
"aidl/android/media/RecordClientInfo.aidl",
"aidl/android/media/IAudioPolicyService.aidl",
@@ -471,6 +479,34 @@
"audiopolicy-types-aidl",
"capture_state_listener-aidl",
"framework-permission-aidl",
+ "spatializer-aidl",
+ ],
+
+ double_loadable: true,
+ backend: {
+ cpp: {
+ min_sdk_version: "29",
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.media",
+ ],
+ },
+ },
+}
+
+aidl_interface {
+ name: "spatializer-aidl",
+ unstable: true,
+ local_include_dir: "aidl",
+ host_supported: true,
+ vendor_available: true,
+ srcs: [
+ "aidl/android/media/INativeSpatializerCallback.aidl",
+ "aidl/android/media/ISpatializer.aidl",
+ "aidl/android/media/ISpatializerHeadTrackingCallback.aidl",
+ ],
+ imports: [
+ "audiopolicy-types-aidl",
],
double_loadable: true,
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index 6ad5483..9091599 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -70,7 +70,8 @@
audio_session_t sessionId,
audio_io_handle_t io,
const AudioDeviceTypeAddr& device,
- bool probe)
+ bool probe,
+ bool notifyFramesProcessed)
{
sp<media::IEffect> iEffect;
sp<IMemory> cblk;
@@ -124,6 +125,7 @@
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_AudioDeviceTypeAddress(device));
request.attributionSource = mClientAttributionSource;
request.probe = probe;
+ request.notifyFramesProcessed = notifyFramesProcessed;
media::CreateEffectResponse response;
@@ -194,7 +196,8 @@
audio_session_t sessionId,
audio_io_handle_t io,
const AudioDeviceTypeAddr& device,
- bool probe)
+ bool probe,
+ bool notifyFramesProcessed)
{
effect_uuid_t type;
effect_uuid_t *pType = nullptr;
@@ -211,7 +214,8 @@
pUuid = &uuid;
}
- return set(pType, pUuid, priority, cbf, user, sessionId, io, device, probe);
+ return set(pType, pUuid, priority, cbf, user, sessionId, io,
+ device, probe, notifyFramesProcessed);
}
@@ -522,6 +526,13 @@
}
}
+void AudioEffect::framesProcessed(int32_t frames)
+{
+ if (mCbf != NULL) {
+ mCbf(EVENT_FRAMES_PROCESSED, mUserData, &frames);
+ }
+}
+
// -------------------------------------------------------------------------
status_t AudioEffect::queryNumberEffects(uint32_t *numEffects)
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 4ff05e4..22f0295 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -974,6 +974,7 @@
.set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
// the following are NOT immutable
.set(AMEDIAMETRICS_PROP_STATE, stateToString(mActive))
+ .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
.set(AMEDIAMETRICS_PROP_SELECTEDMICDIRECTION, (int32_t)mSelectedMicDirection)
.set(AMEDIAMETRICS_PROP_SELECTEDMICFIELDDIRECTION, (double)mSelectedMicFieldDimension)
.record();
diff --git a/media/libaudioclient/AudioSystem.cpp b/media/libaudioclient/AudioSystem.cpp
index 4bfafe4..139d931 100644
--- a/media/libaudioclient/AudioSystem.cpp
+++ b/media/libaudioclient/AudioSystem.cpp
@@ -480,6 +480,12 @@
return af->systemReady();
}
+status_t AudioSystem::audioPolicyReady() {
+ const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
+ if (af == 0) return NO_INIT;
+ return af->audioPolicyReady();
+}
+
status_t AudioSystem::getFrameCountHAL(audio_io_handle_t ioHandle,
size_t* frameCount) {
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
@@ -2236,6 +2242,47 @@
return OK;
}
+status_t AudioSystem::getSpatializer(const sp<media::INativeSpatializerCallback>& callback,
+ sp<media::ISpatializer>* spatializer) {
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (spatializer == nullptr) {
+ return BAD_VALUE;
+ }
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ media::GetSpatializerResponse response;
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ aps->getSpatializer(callback, &response)));
+
+ *spatializer = response.spatializer;
+ return OK;
+}
+
+status_t AudioSystem::canBeSpatialized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices,
+ bool *canBeSpatialized) {
+ const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
+ if (aps == 0) {
+ return PERMISSION_DENIED;
+ }
+ audio_attributes_t attributes = attr != nullptr ? *attr : AUDIO_ATTRIBUTES_INITIALIZER;
+ audio_config_t configuration = config != nullptr ? *config : AUDIO_CONFIG_INITIALIZER;
+
+ std::optional<media::AudioAttributesInternal> attrAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_attributes_t_AudioAttributesInternal(attributes));
+ std::optional<media::AudioConfig> configAidl = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_config_t_AudioConfig(configuration));
+ std::vector<media::AudioDevice> devicesAidl = VALUE_OR_RETURN_STATUS(
+ convertContainer<std::vector<media::AudioDevice>>(devices,
+ legacy2aidl_AudioDeviceTypeAddress));
+ RETURN_STATUS_IF_ERROR(statusTFromBinderStatus(
+ aps->canBeSpatialized(attrAidl, configAidl, devicesAidl, canBeSpatialized)));
+ return OK;
+}
+
+
class CaptureStateListenerImpl : public media::BnCaptureStateListener,
public IBinder::DeathRecipient {
public:
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index f0a105a..ad00bdb 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -25,6 +25,7 @@
#include <android/media/IAudioPolicyService.h>
#include <android-base/macros.h>
+#include <android-base/stringprintf.h>
#include <audio_utils/clock.h>
#include <audio_utils/primitives.h>
#include <binder/IPCThreadState.h>
@@ -44,6 +45,7 @@
static const int kMaxLoopCountNotifications = 32;
using ::android::aidl_utils::statusTFromBinderStatus;
+using ::android::base::StringPrintf;
namespace android {
// ---------------------------------------------------------------------------
@@ -197,6 +199,7 @@
#define MM_PREFIX "android.media.audiotrack." // avoid cut-n-paste errors.
+ // Do not change this without changing the MediaMetricsService side.
// Java API 28 entries, do not change.
mMetricsItem->setCString(MM_PREFIX "streamtype", toString(track->streamType()).c_str());
mMetricsItem->setCString(MM_PREFIX "type",
@@ -212,6 +215,7 @@
mMetricsItem->setInt32(MM_PREFIX "frameCount", (int32_t)track->mFrameCount);
mMetricsItem->setCString(MM_PREFIX "attributes", toString(track->mAttributes).c_str());
mMetricsItem->setCString(MM_PREFIX "logSessionId", track->mLogSessionId.c_str());
+ mMetricsItem->setInt32(MM_PREFIX "underrunFrames", (int32_t)track->getUnderrunFrames());
}
// hand the user a snapshot of the metrics.
@@ -395,6 +399,7 @@
pid_t myPid;
uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(attributionSource.uid));
pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(attributionSource.pid));
+ std::string errorMessage;
// Note mPortId is not valid until the track is created, so omit mPortId in ALOG for set.
ALOGV("%s(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
@@ -404,86 +409,24 @@
sessionId, transferType, attributionSource.uid, attributionSource.pid);
mThreadCanCallJava = threadCanCallJava;
+
+ // These variables are pulled in an error report, so we initialize them early.
mSelectedDeviceId = selectedDeviceId;
mSessionId = sessionId;
+ mChannelMask = channelMask;
+ mReqFrameCount = mFrameCount = frameCount;
+ mSampleRate = sampleRate;
+ mOriginalSampleRate = sampleRate;
+ mAttributes = pAttributes != nullptr ? *pAttributes : AUDIO_ATTRIBUTES_INITIALIZER;
+ mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
- switch (transferType) {
- case TRANSFER_DEFAULT:
- if (sharedBuffer != 0) {
- transferType = TRANSFER_SHARED;
- } else if (cbf == NULL || threadCanCallJava) {
- transferType = TRANSFER_SYNC;
- } else {
- transferType = TRANSFER_CALLBACK;
- }
- break;
- case TRANSFER_CALLBACK:
- case TRANSFER_SYNC_NOTIF_CALLBACK:
- if (cbf == NULL || sharedBuffer != 0) {
- ALOGE("%s(): Transfer type %s but cbf == NULL || sharedBuffer != 0",
- convertTransferToText(transferType), __func__);
- status = BAD_VALUE;
- goto exit;
- }
- break;
- case TRANSFER_OBTAIN:
- case TRANSFER_SYNC:
- if (sharedBuffer != 0) {
- ALOGE("%s(): Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
- status = BAD_VALUE;
- goto exit;
- }
- break;
- case TRANSFER_SHARED:
- if (sharedBuffer == 0) {
- ALOGE("%s(): Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
- status = BAD_VALUE;
- goto exit;
- }
- break;
- default:
- ALOGE("%s(): Invalid transfer type %d",
- __func__, transferType);
- status = BAD_VALUE;
- goto exit;
- }
- mSharedBuffer = sharedBuffer;
- mTransfer = transferType;
- mDoNotReconnect = doNotReconnect;
-
- ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
- __func__, sharedBuffer->unsecurePointer(), sharedBuffer->size());
-
- ALOGV("%s(): streamType %d frameCount %zu flags %04x",
- __func__, streamType, frameCount, flags);
-
- // invariant that mAudioTrack != 0 is true only after set() returns successfully
- if (mAudioTrack != 0) {
- ALOGE("%s(): Track already in use", __func__);
- status = INVALID_OPERATION;
- goto exit;
- }
-
- // handle default values first.
- if (streamType == AUDIO_STREAM_DEFAULT) {
- streamType = AUDIO_STREAM_MUSIC;
- }
- if (pAttributes == NULL) {
- if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
- ALOGE("%s(): Invalid stream type %d", __func__, streamType);
- status = BAD_VALUE;
- goto exit;
- }
- mOriginalStreamType = streamType;
-
- } else {
+ // update format and flags before storing them in mFormat, mOrigFlags and mFlags
+ if (pAttributes != NULL) {
// stream type shouldn't be looked at, this track has audio attributes
- memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
ALOGV("%s(): Building AudioTrack with attributes:"
" usage=%d content=%d flags=0x%x tags=[%s]",
__func__,
mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
- mOriginalStreamType = AUDIO_STREAM_DEFAULT;
audio_flags_to_audio_output_flags(mAttributes.flags, &flags);
}
@@ -494,23 +437,6 @@
flags = static_cast<audio_output_flags_t>(flags | AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO);
}
- // validate parameters
- if (!audio_is_valid_format(format)) {
- ALOGE("%s(): Invalid format %#x", __func__, format);
- status = BAD_VALUE;
- goto exit;
- }
- mFormat = format;
-
- if (!audio_is_output_channel(channelMask)) {
- ALOGE("%s(): Invalid channel mask %#x", __func__, channelMask);
- status = BAD_VALUE;
- goto exit;
- }
- mChannelMask = channelMask;
- channelCount = audio_channel_count_from_out_mask(channelMask);
- mChannelCount = channelCount;
-
// force direct flag if format is not linear PCM
// or offload was requested
if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
@@ -529,7 +455,96 @@
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
- if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
+ mFormat = format;
+ mOrigFlags = mFlags = flags;
+
+ switch (transferType) {
+ case TRANSFER_DEFAULT:
+ if (sharedBuffer != 0) {
+ transferType = TRANSFER_SHARED;
+ } else if (cbf == NULL || threadCanCallJava) {
+ transferType = TRANSFER_SYNC;
+ } else {
+ transferType = TRANSFER_CALLBACK;
+ }
+ break;
+ case TRANSFER_CALLBACK:
+ case TRANSFER_SYNC_NOTIF_CALLBACK:
+ if (cbf == NULL || sharedBuffer != 0) {
+ errorMessage = StringPrintf(
+ "%s: Transfer type %s but cbf == NULL || sharedBuffer != 0",
+ convertTransferToText(transferType), __func__);
+ status = BAD_VALUE;
+ goto error;
+ }
+ break;
+ case TRANSFER_OBTAIN:
+ case TRANSFER_SYNC:
+ if (sharedBuffer != 0) {
+ errorMessage = StringPrintf(
+ "%s: Transfer type TRANSFER_OBTAIN but sharedBuffer != 0", __func__);
+ status = BAD_VALUE;
+ goto error;
+ }
+ break;
+ case TRANSFER_SHARED:
+ if (sharedBuffer == 0) {
+ errorMessage = StringPrintf(
+ "%s: Transfer type TRANSFER_SHARED but sharedBuffer == 0", __func__);
+ status = BAD_VALUE;
+ goto error;
+ }
+ break;
+ default:
+ errorMessage = StringPrintf("%s: Invalid transfer type %d", __func__, transferType);
+ status = BAD_VALUE;
+ goto error;
+ }
+ mSharedBuffer = sharedBuffer;
+ mTransfer = transferType;
+ mDoNotReconnect = doNotReconnect;
+
+ ALOGV_IF(sharedBuffer != 0, "%s(): sharedBuffer: %p, size: %zu",
+ __func__, sharedBuffer->unsecurePointer(), sharedBuffer->size());
+
+ // invariant that mAudioTrack != 0 is true only after set() returns successfully
+ if (mAudioTrack != 0) {
+ errorMessage = StringPrintf("%s: Track already in use", __func__);
+ status = INVALID_OPERATION;
+ goto error;
+ }
+
+ // handle default values first.
+ if (streamType == AUDIO_STREAM_DEFAULT) {
+ streamType = AUDIO_STREAM_MUSIC;
+ }
+ if (pAttributes == NULL) {
+ if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
+ errorMessage = StringPrintf("%s: Invalid stream type %d", __func__, streamType);
+ status = BAD_VALUE;
+ goto error;
+ }
+ mOriginalStreamType = streamType;
+ } else {
+ mOriginalStreamType = AUDIO_STREAM_DEFAULT;
+ }
+
+ // validate parameters
+ if (!audio_is_valid_format(format)) {
+ errorMessage = StringPrintf("%s: Invalid format %#x", __func__, format);
+ status = BAD_VALUE;
+ goto error;
+ }
+
+ if (!audio_is_output_channel(channelMask)) {
+ errorMessage = StringPrintf("%s: Invalid channel mask %#x", __func__, channelMask);
+ status = BAD_VALUE;
+ goto error;
+ }
+ channelCount = audio_channel_count_from_out_mask(channelMask);
+ mChannelCount = channelCount;
+
+ if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
if (audio_has_proportional_frames(format)) {
mFrameSize = channelCount * audio_bytes_per_sample(format);
} else {
@@ -543,13 +558,12 @@
}
// sampling rate must be specified for direct outputs
- if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
+ if (sampleRate == 0 && (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
+ errorMessage = StringPrintf(
+ "%s: sample rate must be specified for direct outputs", __func__);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
- mSampleRate = sampleRate;
- mOriginalSampleRate = sampleRate;
- mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
// 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
@@ -569,22 +583,22 @@
mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
mSendLevel = 0.0f;
// mFrameCount is initialized in createTrack_l
- mReqFrameCount = frameCount;
if (notificationFrames >= 0) {
mNotificationFramesReq = notificationFrames;
mNotificationsPerBufferReq = 0;
} else {
- if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
- ALOGE("%s(): notificationFrames=%d not permitted for non-fast track",
+ if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
+ errorMessage = StringPrintf(
+ "%s: notificationFrames=%d not permitted for non-fast track",
__func__, notificationFrames);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
if (frameCount > 0) {
ALOGE("%s(): notificationFrames=%d not permitted with non-zero frameCount=%zu",
__func__, notificationFrames, frameCount);
status = BAD_VALUE;
- goto exit;
+ goto error;
}
mNotificationFramesReq = 0;
const uint32_t minNotificationsPerBuffer = 1;
@@ -609,7 +623,6 @@
mClientAttributionSource.pid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(callingPid));
}
mAuxEffectId = 0;
- mOrigFlags = mFlags = flags;
mCbf = cbf;
if (cbf != NULL) {
@@ -629,6 +642,7 @@
mAudioTrackThread->requestExitAndWait();
mAudioTrackThread.clear();
}
+ // We do not goto error to prevent double-logging errors.
goto exit;
}
@@ -663,6 +677,12 @@
mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
mVolumeHandler = new media::VolumeHandler();
+error:
+ if (status != NO_ERROR) {
+ ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+ reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
+ }
+ // fall through
exit:
mStatus = status;
return status;
@@ -1688,12 +1708,13 @@
{
status_t status;
bool callbackAdded = false;
+ std::string errorMessage;
const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
- ALOGE("%s(%d): Could not get audioflinger",
+ errorMessage = StringPrintf("%s(%d): Could not get audioflinger",
__func__, mPortId);
- status = NO_INIT;
+ status = DEAD_OBJECT;
goto exit;
}
@@ -1770,10 +1791,11 @@
}
if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
- ALOGE("%s(%d): AudioFlinger could not create track, status: %d output %d",
+ errorMessage = StringPrintf(
+ "%s(%d): AudioFlinger could not create track, status: %d output %d",
__func__, mPortId, status, output.outputId);
if (status == NO_ERROR) {
- status = NO_INIT;
+ status = INVALID_OPERATION; // device not ready
}
goto exit;
}
@@ -1804,8 +1826,8 @@
output.audioTrack->getCblk(&sfr);
sp<IMemory> iMem = VALUE_OR_FATAL(aidl2legacy_NullableSharedFileRegion_IMemory(sfr));
if (iMem == 0) {
- ALOGE("%s(%d): Could not get control block", __func__, mPortId);
- status = NO_INIT;
+ errorMessage = StringPrintf("%s(%d): Could not get control block", __func__, mPortId);
+ status = FAILED_TRANSACTION;
goto exit;
}
// TODO: Using unsecurePointer() has some associated security pitfalls
@@ -1814,8 +1836,9 @@
// issue (e.g. by copying).
void *iMemPointer = iMem->unsecurePointer();
if (iMemPointer == NULL) {
- ALOGE("%s(%d): Could not get control block pointer", __func__, mPortId);
- status = NO_INIT;
+ errorMessage = StringPrintf(
+ "%s(%d): Could not get control block pointer", __func__, mPortId);
+ status = FAILED_TRANSACTION;
goto exit;
}
// invariant that mAudioTrack != 0 is true only after set() returns successfully
@@ -1873,8 +1896,10 @@
// issue (e.g. by copying).
buffers = mSharedBuffer->unsecurePointer();
if (buffers == NULL) {
- ALOGE("%s(%d): Could not get buffer pointer", __func__, mPortId);
- status = NO_INIT;
+ errorMessage = StringPrintf(
+ "%s(%d): Could not get buffer pointer", __func__, mPortId);
+ ALOGE("%s", errorMessage.c_str());
+ status = FAILED_TRANSACTION;
goto exit;
}
}
@@ -1952,6 +1977,7 @@
.set(AMEDIAMETRICS_PROP_VOLUME_LEFT, (double)mVolume[AUDIO_INTERLEAVE_LEFT])
.set(AMEDIAMETRICS_PROP_VOLUME_RIGHT, (double)mVolume[AUDIO_INTERLEAVE_RIGHT])
.set(AMEDIAMETRICS_PROP_STATE, stateToString(mState))
+ .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)NO_ERROR)
.set(AMEDIAMETRICS_PROP_AUXEFFECTID, (int32_t)mAuxEffectId)
.set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
.set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
@@ -1972,17 +1998,47 @@
}
exit:
- if (status != NO_ERROR && callbackAdded) {
- // note: mOutput is always valid is callbackAdded is true
- AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
+ if (status != NO_ERROR) {
+ if (callbackAdded) {
+ // note: mOutput is always valid is callbackAdded is true
+ AudioSystem::removeAudioDeviceCallback(this, mOutput, mPortId);
+ }
+ ALOGE_IF(!errorMessage.empty(), "%s", errorMessage.c_str());
+ reportError(status, AMEDIAMETRICS_PROP_EVENT_VALUE_CREATE, errorMessage.c_str());
}
-
mStatus = status;
// sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
return status;
}
+void AudioTrack::reportError(status_t status, const char *event, const char *message) const
+{
+ if (status == NO_ERROR) return;
+ // We report error on the native side because some callers do not come
+ // from Java.
+ // Ensure these variables are initialized in set().
+ mediametrics::LogItem(AMEDIAMETRICS_KEY_AUDIO_TRACK_ERROR)
+ .set(AMEDIAMETRICS_PROP_EVENT, event)
+ .set(AMEDIAMETRICS_PROP_STATUS, (int32_t)status)
+ .set(AMEDIAMETRICS_PROP_STATUSMESSAGE, message)
+ .set(AMEDIAMETRICS_PROP_ORIGINALFLAGS, toString(mOrigFlags).c_str())
+ .set(AMEDIAMETRICS_PROP_SESSIONID, (int32_t)mSessionId)
+ .set(AMEDIAMETRICS_PROP_CONTENTTYPE, toString(mAttributes.content_type).c_str())
+ .set(AMEDIAMETRICS_PROP_USAGE, toString(mAttributes.usage).c_str())
+ .set(AMEDIAMETRICS_PROP_SELECTEDDEVICEID, (int32_t)mSelectedDeviceId)
+ .set(AMEDIAMETRICS_PROP_ENCODING, toString(mFormat).c_str())
+ .set(AMEDIAMETRICS_PROP_CHANNELMASK, (int32_t)mChannelMask)
+ // the following are NOT immutable
+ // frame count is initially the requested frame count, but may be adjusted
+ // by AudioFlinger after creation.
+ .set(AMEDIAMETRICS_PROP_FRAMECOUNT, (int32_t)mFrameCount)
+ .set(AMEDIAMETRICS_PROP_SAMPLERATE, (int32_t)mSampleRate)
+ .set(AMEDIAMETRICS_PROP_PLAYBACK_SPEED, (double)mPlaybackRate.mSpeed)
+ .set(AMEDIAMETRICS_PROP_PLAYBACK_PITCH, (double)mPlaybackRate.mPitch)
+ .record();
+}
+
status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
{
if (audioBuffer == NULL) {
diff --git a/media/libaudioclient/IAudioFlinger.cpp b/media/libaudioclient/IAudioFlinger.cpp
index cae81f0..504e4f8 100644
--- a/media/libaudioclient/IAudioFlinger.cpp
+++ b/media/libaudioclient/IAudioFlinger.cpp
@@ -715,6 +715,10 @@
return statusTFromBinderStatus(mDelegate->systemReady());
}
+status_t AudioFlingerClientAdapter::audioPolicyReady() {
+ return statusTFromBinderStatus(mDelegate->audioPolicyReady());
+}
+
size_t AudioFlingerClientAdapter::frameCountHAL(audio_io_handle_t ioHandle) const {
auto result = [&]() -> ConversionResult<size_t> {
int32_t ioHandleAidl = VALUE_OR_RETURN(legacy2aidl_audio_io_handle_t_int32_t(ioHandle));
@@ -761,6 +765,12 @@
return statusTFromBinderStatus(mDelegate->updateSecondaryOutputs(trackSecondaryOutputInfos));
}
+status_t AudioFlingerClientAdapter::setDeviceConnectedState(
+ const struct audio_port_v7 *port, bool connected) {
+ media::AudioPort aidlPort = VALUE_OR_RETURN_STATUS(
+ legacy2aidl_audio_port_v7_AudioPort(*port));
+ return statusTFromBinderStatus(mDelegate->setDeviceConnectedState(aidlPort, connected));
+}
////////////////////////////////////////////////////////////////////////////////////////////////////
// AudioFlingerServerAdapter
@@ -1189,6 +1199,11 @@
return Status::fromStatusT(mDelegate->systemReady());
}
+Status AudioFlingerServerAdapter::audioPolicyReady() {
+ mDelegate->audioPolicyReady();
+ return Status::ok();
+}
+
Status AudioFlingerServerAdapter::frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) {
audio_io_handle_t ioHandleLegacy = VALUE_OR_RETURN_BINDER(
aidl2legacy_int32_t_audio_io_handle_t(ioHandle));
@@ -1227,4 +1242,10 @@
return Status::fromStatusT(mDelegate->updateSecondaryOutputs(trackSecondaryOutputs));
}
+Status AudioFlingerServerAdapter::setDeviceConnectedState(
+ const media::AudioPort& port, bool connected) {
+ audio_port_v7 portLegacy = VALUE_OR_RETURN_BINDER(aidl2legacy_AudioPort_audio_port_v7(port));
+ return Status::fromStatusT(mDelegate->setDeviceConnectedState(&portLegacy, connected));
+}
+
} // namespace android
diff --git a/media/libaudioclient/PolicyAidlConversion.cpp b/media/libaudioclient/PolicyAidlConversion.cpp
index 25fdb49..dcfde8b 100644
--- a/media/libaudioclient/PolicyAidlConversion.cpp
+++ b/media/libaudioclient/PolicyAidlConversion.cpp
@@ -26,16 +26,6 @@
using base::unexpected;
-ConversionResult<volume_group_t>
-aidl2legacy_int32_t_volume_group_t(int32_t aidl) {
- return convertReinterpret<volume_group_t>(aidl);
-}
-
-ConversionResult<int32_t>
-legacy2aidl_volume_group_t_int32_t(volume_group_t legacy) {
- return convertReinterpret<int32_t>(legacy);
-}
-
ConversionResult<uint32_t>
aidl2legacy_AudioMixType_uint32_t(media::AudioMixType aidl) {
switch (aidl) {
diff --git a/media/libaudioclient/aidl/android/media/AudioContentType.aidl b/media/libaudioclient/aidl/android/media/AudioContentType.aidl
index f734fba..a7d3277 100644
--- a/media/libaudioclient/aidl/android/media/AudioContentType.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioContentType.aidl
@@ -22,4 +22,5 @@
MUSIC = 2,
MOVIE = 3,
SONIFICATION = 4,
+ ULTRASOUND = 1997,
}
diff --git a/media/libaudioclient/aidl/android/media/AudioFlag.aidl b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
index 58b493b..91361fb 100644
--- a/media/libaudioclient/aidl/android/media/AudioFlag.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioFlag.aidl
@@ -34,4 +34,6 @@
MUTE_HAPTIC = 11,
NO_SYSTEM_CAPTURE = 12,
CAPTURE_PRIVATE = 13,
+ CONTENT_SPATIALIZED = 14,
+ NEVER_SPATIALIZE = 15,
}
diff --git a/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl b/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl
index bfc0eb0..d79769c 100644
--- a/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioInputFlags.aidl
@@ -28,4 +28,5 @@
VOIP_TX = 5,
HW_AV_SYNC = 6,
DIRECT = 7,
+ ULTRASOUND = 8,
}
diff --git a/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl b/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
index cebd8f0..f49b24c 100644
--- a/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioOutputFlags.aidl
@@ -36,4 +36,6 @@
VOIP_RX = 13,
INCALL_MUSIC = 14,
GAPLESS_OFFLOAD = 15,
+ SPATIALIZER = 16,
+ ULTRASOUND = 17,
}
diff --git a/media/libaudioclient/aidl/android/media/AudioSourceType.aidl b/media/libaudioclient/aidl/android/media/AudioSourceType.aidl
index 8673b92..2006e6c 100644
--- a/media/libaudioclient/aidl/android/media/AudioSourceType.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioSourceType.aidl
@@ -40,4 +40,5 @@
* Used only internally by the framework.
*/
HOTWORD = 1999,
+ ULTRASOUND = 2000,
}
diff --git a/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
index f88fc3c..8538d8a 100644
--- a/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioVibratorInfo.aidl
@@ -24,4 +24,5 @@
int id;
float resonantFrequency;
float qFactor;
+ float maxAmplitude;
}
diff --git a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
index 2d274f4..35a56eb 100644
--- a/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/CreateEffectRequest.aidl
@@ -37,4 +37,6 @@
AudioDevice device;
AttributionSourceState attributionSource;
boolean probe;
+ /** true if a callback must be sent each time audio frames are processed */
+ boolean notifyFramesProcessed;
}
diff --git a/media/libaudioclient/aidl/android/media/GetSpatializerResponse.aidl b/media/libaudioclient/aidl/android/media/GetSpatializerResponse.aidl
new file mode 100644
index 0000000..25115ac
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/GetSpatializerResponse.aidl
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.ISpatializer;
+
+/**
+ * Used as a return value for IAudioPolicyService.getSpatializer() method
+ * {@hide}
+ */
+ parcelable GetSpatializerResponse {
+ /* The ISpatializer interface if successful, null if not */
+ @nullable ISpatializer spatializer;
+}
diff --git a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
index d2cae6d..5cdde5d 100644
--- a/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioFlingerService.aidl
@@ -197,6 +197,9 @@
/* Indicate JAVA services are ready (scheduling, power management ...) */
oneway void systemReady();
+ /* Indicate audio policy service is ready */
+ oneway void audioPolicyReady();
+
// Returns the number of frames per audio HAL buffer.
long frameCountHAL(int /* audio_io_handle_t */ ioHandle);
@@ -213,4 +216,6 @@
// This usually happens when there is a dynamic policy registered.
void updateSecondaryOutputs(
in TrackSecondaryOutputInfo[] trackSecondaryOutputInfos);
+
+ void setDeviceConnectedState(in AudioPort devicePort, boolean connected);
}
diff --git a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
index fc37575..6140a64 100644
--- a/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
+++ b/media/libaudioclient/aidl/android/media/IAudioPolicyService.aidl
@@ -47,8 +47,10 @@
import android.media.EffectDescriptor;
import android.media.GetInputForAttrResponse;
import android.media.GetOutputForAttrResponse;
+import android.media.GetSpatializerResponse;
import android.media.IAudioPolicyServiceClient;
import android.media.ICaptureStateListener;
+import android.media.INativeSpatializerCallback;
import android.media.Int;
import android.media.SoundTriggerSession;
@@ -348,4 +350,29 @@
DeviceRole role);
boolean registerSoundTriggerCaptureStateListener(ICaptureStateListener listener);
+
+ /** If a spatializer stage effect is present on the platform, this will return an
+ * ISpatializer interface (see GetSpatializerResponse,aidl) to control this
+ * feature.
+ * If no spatializer stage is present, a null interface is returned.
+ * The INativeSpatializerCallback passed must not be null.
+ * Only one ISpatializer interface can exist at a given time. The native audio policy
+ * service will reject the request if an interface was already acquired and previous owner
+ * did not die or call ISpatializer.release().
+ */
+ GetSpatializerResponse getSpatializer(INativeSpatializerCallback callback);
+
+ /** Queries if some kind of spatialization will be performed if the audio playback context
+ * described by the provided arguments is present.
+ * The context is made of:
+ * - The audio attributes describing the playback use case.
+ * - The audio configuration describing the audio format, channels, sampling rate...
+ * - The devices describing the sink audio device selected for playback.
+ * All arguments are optional and only the specified arguments are used to match against
+ * supported criteria. For instance, supplying no argument will tell if spatialization is
+ * supported or not in general.
+ */
+ boolean canBeSpatialized(in @nullable AudioAttributesInternal attr,
+ in @nullable AudioConfig config,
+ in AudioDevice[] devices);
}
diff --git a/media/libaudioclient/aidl/android/media/IEffectClient.aidl b/media/libaudioclient/aidl/android/media/IEffectClient.aidl
index 3b6bcf1..37b442d 100644
--- a/media/libaudioclient/aidl/android/media/IEffectClient.aidl
+++ b/media/libaudioclient/aidl/android/media/IEffectClient.aidl
@@ -43,4 +43,10 @@
* TODO(ytai): replace opaque byte arrays with strongly typed parameters.
*/
oneway void commandExecuted(int cmdCode, in byte[] cmdData, in byte[] replyData);
+
+ /**
+ * Called whenever audio frames have been processed by the effect engine.
+ * @param frames number of frames processed.
+ */
+ oneway void framesProcessed(int frames);
}
diff --git a/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl b/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl
new file mode 100644
index 0000000..88b8108
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/INativeSpatializerCallback.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.SpatializationLevel;
+import android.media.SpatializerHeadTrackingMode;
+
+/**
+ * The INativeSpatializerCallback interface is a callback associated to the
+ * ISpatializer interface. The callback is used by the spatializer
+ * implementation in native audio server to communicate state changes to the
+ * client controlling the spatializer with the ISpatializer interface.
+ * {@hide}
+ */
+oneway interface INativeSpatializerCallback {
+ /** Called when the spatialization level applied by the spatializer changes
+ * (e.g. when the spatializer is enabled or disabled)
+ */
+ void onLevelChanged(SpatializationLevel level);
+
+ /** Called when the output stream the Spatializer is attached to changes.
+ * Indicates the IO Handle of the new output.
+ */
+ void onOutputChanged(int output);
+}
diff --git a/media/libaudioclient/aidl/android/media/ISpatializer.aidl b/media/libaudioclient/aidl/android/media/ISpatializer.aidl
new file mode 100644
index 0000000..b871238
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/ISpatializer.aidl
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.ISpatializerHeadTrackingCallback;
+import android.media.SpatializationLevel;
+import android.media.SpatializationMode;
+import android.media.SpatializerHeadTrackingMode;
+
+
+/**
+ * The ISpatializer interface is used to control the native audio service implementation
+ * of the spatializer stage with headtracking when present on a platform.
+ * It is intended for exclusive use by the java AudioService running in system_server.
+ * It provides APIs to discover the feature availability and options as well as control and report
+ * the active state and modes of the spatializer and head tracking effect.
+ * {@hide}
+ */
+interface ISpatializer {
+ /** Releases a ISpatializer interface previously acquired. */
+ void release();
+
+ /** Reports the list of supported spatialization levels (see SpatializationLevel.aidl).
+ * The list should never be empty if an ISpatializer interface was successfully
+ * retrieved with IAudioPolicyService.getSpatializer().
+ */
+ SpatializationLevel[] getSupportedLevels();
+
+ /** Selects the desired spatialization level (see SpatializationLevel.aidl). Selecting a level
+ * different from SpatializationLevel.NONE with create the specialized multichannel output
+ * mixer, create and enable the spatializer effect and let the audio policy attach eligible
+ * AudioTrack to this output stream.
+ */
+ void setLevel(SpatializationLevel level);
+
+ /** Gets the selected spatialization level (see SpatializationLevel.aidl) */
+ SpatializationLevel getLevel();
+
+ /** Reports if the spatializer engine supports head tracking or not.
+ * This is a pre condition independent of the fact that a head tracking sensor is
+ * registered or not.
+ */
+ boolean isHeadTrackingSupported();
+
+ /** Reports the list of supported head tracking modes (see SpatializerHeadTrackingMode.aidl).
+ * The list can be empty if the spatializer implementation does not support head tracking or if
+ * no head tracking sensor is registered (see setHeadSensor() and setScreenSensor()).
+ */
+ SpatializerHeadTrackingMode[] getSupportedHeadTrackingModes();
+
+ /** Selects the desired head tracking mode (see SpatializerHeadTrackingMode.aidl) */
+ void setDesiredHeadTrackingMode(SpatializerHeadTrackingMode mode);
+
+ /** Gets the actual head tracking mode. Can be different from the desired mode if conditions to
+ * enable the desired mode are not met (e.g if the head tracking device was removed)
+ */
+ SpatializerHeadTrackingMode getActualHeadTrackingMode();
+
+ /** Reset the head tracking algorithm to consider current head pose as neutral */
+ void recenterHeadTracker();
+
+ /** Set the screen to stage transform to use by the head tracking algorithm
+ * The screen to stage transform is conveyed as a vector of 6 elements,
+ * where the first three are a translation vector and
+ * the last three are a rotation vector.
+ */
+ void setGlobalTransform(in float[] screenToStage);
+
+ /**
+ * Set the sensor that is to be used for head-tracking.
+ * -1 can be used to disable head-tracking.
+ */
+ void setHeadSensor(int sensorHandle);
+
+ /**
+ * Set the sensor that is to be used for screen-tracking.
+ * -1 can be used to disable screen-tracking.
+ */
+ void setScreenSensor(int sensorHandle);
+
+ /**
+ * Sets the display orientation.
+ * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+ * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+ * viewed while facing the screen are positive.
+ */
+ void setDisplayOrientation(float physicalToLogicalAngle);
+
+ /**
+ * Sets the hinge angle for foldable devices.
+ */
+ void setHingeAngle(float hingeAngle);
+
+ /** Reports the list of supported spatialization modess (see SpatializationMode.aidl).
+ * The list should never be empty if an ISpatializer interface was successfully
+ * retrieved with IAudioPolicyService.getSpatializer().
+ */
+ SpatializationMode[] getSupportedModes();
+
+ /**
+ * Registers a callback to monitor head tracking functions.
+ * Only one callback can be registered on a Spatializer.
+ * The last callback registered wins and passing a nullptr unregisters
+ * last registered callback.
+ */
+ void registerHeadTrackingCallback(@nullable ISpatializerHeadTrackingCallback callback);
+
+ /**
+ * Sets a parameter to the spatializer engine. Used by effect implementor for vendor
+ * specific configuration.
+ */
+ void setParameter(int key, in byte[] value);
+
+ /**
+ * Gets a parameter from the spatializer engine. Used by effect implementor for vendor
+ * specific configuration.
+ */
+ void getParameter(int key, inout byte[] value);
+
+ /**
+ * Gets the io handle of the output stream the spatializer is connected to.
+ */
+ int getOutput();
+}
diff --git a/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl b/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl
new file mode 100644
index 0000000..23d5e13
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/ISpatializerHeadTrackingCallback.aidl
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.SpatializationLevel;
+import android.media.SpatializerHeadTrackingMode;
+
+/**
+ * The ISpatializerHeadTrackingCallback interface is a callback associated to the
+ * Spatializer head tracking function. It can be registered via the ISpatializer
+ * interface to monitor head tracking related states.
+ * {@hide}
+ */
+oneway interface ISpatializerHeadTrackingCallback {
+ /** Called when the head tracking mode has changed
+ */
+ void onHeadTrackingModeChanged(SpatializerHeadTrackingMode mode);
+
+ /** Called when the head to stage pose hase been updated
+ * The head to stage pose is conveyed as a vector of 6 elements,
+ * where the first three are a translation vector and
+ * the last three are a rotation vector.
+ */
+ void onHeadToSoundStagePoseUpdated(in float[] headToStage);
+}
diff --git a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
index 06b12e9..1541948 100644
--- a/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
+++ b/media/libaudioclient/aidl/android/media/OpenOutputRequest.aidl
@@ -17,6 +17,7 @@
package android.media;
import android.media.AudioConfig;
+import android.media.AudioConfigBase;
import android.media.AudioPort;
/**
@@ -25,7 +26,8 @@
parcelable OpenOutputRequest {
/** Interpreted as audio_module_handle_t. */
int module;
- AudioConfig config;
+ AudioConfig halConfig;
+ AudioConfigBase mixerConfig;
/** Type must be DEVICE. */
AudioPort device;
/** Bitmask, indexed by AudioOutputFlag. */
diff --git a/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl b/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl
new file mode 100644
index 0000000..961c5a1
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/SpatializationLevel.aidl
@@ -0,0 +1,34 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * The spatialization level supported by the spatializer stage effect implementation.
+ * Used by methods of the ISpatializer interface.
+ * {@hide}
+ */
+@Backing(type="byte")
+enum SpatializationLevel {
+ /** Spatialization is disabled. */
+ NONE = 0,
+ /** The spatializer accepts audio with positional multichannel masks (e.g 5.1). */
+ SPATIALIZER_MULTICHANNEL = 1,
+ /** The spatializer accepts audio made of a channel bed of positional multichannels (e.g 5.1)
+ * and audio objects positioned independently via meta data.
+ */
+ SPATIALIZER_MCHAN_BED_PLUS_OBJECTS = 2,
+}
diff --git a/media/libaudioclient/aidl/android/media/SpatializationMode.aidl b/media/libaudioclient/aidl/android/media/SpatializationMode.aidl
new file mode 100644
index 0000000..5d8fd93
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/SpatializationMode.aidl
@@ -0,0 +1,30 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+/**
+ * The spatialization mode supported by the spatializer stage effect implementation.
+ * Used by methods of the ISpatializer interface.
+ * {@hide}
+ */
+@Backing(type="byte")
+enum SpatializationMode {
+ /** The spatializer supports binaural mode (over headphones type devices). */
+ SPATIALIZATER_BINAURAL = 0,
+ /** The spatializer supports transaural mode (over speaker type devices). */
+ SPATIALIZATER_TRANSAURAL = 1,
+}
diff --git a/media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl b/media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl
new file mode 100644
index 0000000..58e0f61
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/SpatializerHeadTrackingMode.aidl
@@ -0,0 +1,35 @@
+/*
+ * Copyright 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+
+/**
+ * The head tracking mode supported by the spatializer effect implementation.
+ * Used by methods of the ISpatializer interface.
+ * {@hide}
+ */
+@Backing(type="byte")
+enum SpatializerHeadTrackingMode {
+ /** Head tracking is active in a mode not listed below (forward compatibility) */
+ OTHER = 0,
+ /** Head tracking is disabled */
+ DISABLED = 1,
+ /** Head tracking is performed relative to the real work environment */
+ RELATIVE_WORLD = 2,
+ /** Head tracking is performed relative to the device's screen */
+ RELATIVE_SCREEN = 3,
+}
diff --git a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
index d03c6fa..bd9e158 100644
--- a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
+++ b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
@@ -383,6 +383,9 @@
const std::vector<uint8_t> &replyData __unused) override {
return binder::Status::ok();
}
+ binder::Status framesProcessed(int32_t frames __unused) override {
+ return binder::Status::ok();
+ }
};
status_t AudioFlingerFuzzer::invokeAudioEffect() {
@@ -424,6 +427,7 @@
request.attributionSource.packageName = opPackageName;
request.attributionSource.pid = VALUE_OR_RETURN_STATUS(legacy2aidl_pid_t_int32_t(getpid()));
request.probe = false;
+ request.notifyFramesProcessed = false;
media::CreateEffectResponse response{};
status_t status = af->createEffect(request, &response);
@@ -648,11 +652,15 @@
sp<DeviceDescriptorBase> device = new DeviceDescriptorBase(getValue(&mFdp, kDevices));
audio_output_flags_t flags = getValue(&mFdp, kOutputFlags);
+ audio_config_base_t mixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+
media::OpenOutputRequest request{};
media::OpenOutputResponse response{};
request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
- request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ request.halConfig = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ request.mixerConfig =
+ VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_base_t_AudioConfigBase(mixerConfig));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
diff --git a/media/libaudioclient/include/media/AudioCommonTypes.h b/media/libaudioclient/include/media/AudioCommonTypes.h
index 5dfe5fc..5f0c590 100644
--- a/media/libaudioclient/include/media/AudioCommonTypes.h
+++ b/media/libaudioclient/include/media/AudioCommonTypes.h
@@ -41,6 +41,42 @@
return !(lhs==rhs);
}
+constexpr bool operator==(const audio_offload_info_t &lhs, const audio_offload_info_t &rhs)
+{
+ return lhs.version == rhs.version && lhs.size == rhs.size &&
+ lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+ lhs.format == rhs.format && lhs.stream_type == rhs.stream_type &&
+ lhs.bit_rate == rhs.bit_rate && lhs.duration_us == rhs.duration_us &&
+ lhs.has_video == rhs.has_video && lhs.is_streaming == rhs.is_streaming &&
+ lhs.bit_width == rhs.bit_width && lhs.offload_buffer_size == rhs.offload_buffer_size &&
+ lhs.usage == rhs.usage && lhs.encapsulation_mode == rhs.encapsulation_mode &&
+ lhs.content_id == rhs.content_id && lhs.sync_id == rhs.sync_id;
+}
+constexpr bool operator!=(const audio_offload_info_t &lhs, const audio_offload_info_t &rhs)
+{
+ return !(lhs==rhs);
+}
+
+constexpr bool operator==(const audio_config_t &lhs, const audio_config_t &rhs)
+{
+ return lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+ lhs.format == rhs.format && lhs.offload_info == rhs.offload_info;
+}
+constexpr bool operator!=(const audio_config_t &lhs, const audio_config_t &rhs)
+{
+ return !(lhs==rhs);
+}
+
+constexpr bool operator==(const audio_config_base_t &lhs, const audio_config_base_t &rhs)
+{
+ return lhs.sample_rate == rhs.sample_rate && lhs.channel_mask == rhs.channel_mask &&
+ lhs.format == rhs.format;
+}
+constexpr bool operator!=(const audio_config_base_t &lhs, const audio_config_base_t &rhs)
+{
+ return !(lhs==rhs);
+}
+
enum volume_group_t : uint32_t;
static const volume_group_t VOLUME_GROUP_NONE = static_cast<volume_group_t>(-1);
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
index 3c19ec1..dd4d2da 100644
--- a/media/libaudioclient/include/media/AudioEffect.h
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -283,7 +283,8 @@
EVENT_CONTROL_STATUS_CHANGED = 0,
EVENT_ENABLE_STATUS_CHANGED = 1,
EVENT_PARAMETER_CHANGED = 2,
- EVENT_ERROR = 3
+ EVENT_ERROR = 3,
+ EVENT_FRAMES_PROCESSED = 4,
};
/* Callback function notifying client application of a change in effect engine state or
@@ -389,7 +390,8 @@
audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
audio_io_handle_t io = AUDIO_IO_HANDLE_NONE,
const AudioDeviceTypeAddr& device = {},
- bool probe = false);
+ bool probe = false,
+ bool notifyFramesProcessed = false);
/*
* Same as above but with type and uuid specified by character strings.
*/
@@ -401,7 +403,8 @@
audio_session_t sessionId = AUDIO_SESSION_OUTPUT_MIX,
audio_io_handle_t io = AUDIO_IO_HANDLE_NONE,
const AudioDeviceTypeAddr& device = {},
- bool probe = false);
+ bool probe = false,
+ bool notifyFramesProcessed = false);
/* Result of constructing the AudioEffect. This must be checked
* before using any AudioEffect API.
@@ -552,6 +555,7 @@
virtual void commandExecuted(int32_t cmdCode,
const std::vector<uint8_t>& cmdData,
const std::vector<uint8_t>& replyData);
+ virtual void framesProcessed(int32_t frames);
private:
@@ -587,6 +591,14 @@
}
return binder::Status::ok();
}
+ binder::Status framesProcessed(int32_t frames) override {
+ sp<AudioEffect> effect = mEffect.promote();
+ if (effect != 0) {
+ effect->framesProcessed(frames);
+ }
+ return binder::Status::ok();
+ }
+
// IBinder::DeathRecipient
virtual void binderDied(const wp<IBinder>& /*who*/) {
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index 326919a..f17ee3a 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -264,6 +264,7 @@
size_t frameCount() const { return mFrameCount; }
size_t frameSize() const { return mFrameSize; }
audio_source_t inputSource() const { return mAttributes.source; }
+ audio_channel_mask_t channelMask() const { return mChannelMask; }
/*
* Return the period of the notification callback in frames.
diff --git a/media/libaudioclient/include/media/AudioSystem.h b/media/libaudioclient/include/media/AudioSystem.h
index a2cfb2b..4d85f7a 100644
--- a/media/libaudioclient/include/media/AudioSystem.h
+++ b/media/libaudioclient/include/media/AudioSystem.h
@@ -22,6 +22,8 @@
#include <android/media/AudioVibratorInfo.h>
#include <android/media/BnAudioFlingerClient.h>
#include <android/media/BnAudioPolicyServiceClient.h>
+#include <android/media/INativeSpatializerCallback.h>
+#include <android/media/ISpatializer.h>
#include <android/content/AttributionSourceState.h>
#include <media/AidlConversionUtil.h>
#include <media/AudioDeviceTypeAddr.h>
@@ -225,6 +227,9 @@
// Indicate JAVA services are ready (scheduling, power management ...)
static status_t systemReady();
+ // Indicate audio policy service is ready
+ static status_t audioPolicyReady();
+
// Returns the number of frames per audio HAL buffer.
// Corresponds to audio_stream->get_buffer_size()/audio_stream_in_frame_size() for input.
// See also getFrameCount().
@@ -485,6 +490,49 @@
static status_t getDeviceForStrategy(product_strategy_t strategy,
AudioDeviceTypeAddr &device);
+
+ /**
+ * If a spatializer stage effect is present on the platform, this will return an
+ * ISpatializer interface to control this feature.
+ * If no spatializer stage is present, a null interface is returned.
+ * The INativeSpatializerCallback passed must not be null.
+ * Only one ISpatializer interface can exist at a given time. The native audio policy
+ * service will reject the request if an interface was already acquired and previous owner
+ * did not die or call ISpatializer.release().
+ * @param callback in: the callback to receive state updates if the ISpatializer
+ * interface is acquired.
+ * @param spatializer out: the ISpatializer interface made available to control the
+ * platform spatializer
+ * @return NO_ERROR in case of success, DEAD_OBJECT, NO_INIT, PERMISSION_DENIED, BAD_VALUE
+ * in case of error.
+ */
+ static status_t getSpatializer(const sp<media::INativeSpatializerCallback>& callback,
+ sp<media::ISpatializer>* spatializer);
+
+ /**
+ * Queries if some kind of spatialization will be performed if the audio playback context
+ * described by the provided arguments is present.
+ * The context is made of:
+ * - The audio attributes describing the playback use case.
+ * - The audio configuration describing the audio format, channels, sampling rate ...
+ * - The devices describing the sink audio device selected for playback.
+ * All arguments are optional and only the specified arguments are used to match against
+ * supported criteria. For instance, supplying no argument will tell if spatialization is
+ * supported or not in general.
+ * @param attr audio attributes describing the playback use case
+ * @param config audio configuration describing the audio format, channels, sampling rate...
+ * @param devices the sink audio device selected for playback
+ * @param canBeSpatialized out: true if spatialization is enabled for this context,
+ * false otherwise
+ * @return NO_ERROR in case of success, DEAD_OBJECT, NO_INIT, BAD_VALUE
+ * in case of error.
+ */
+ static status_t canBeSpatialized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices,
+ bool *canBeSpatialized);
+
+
// A listener for capture state changes.
class CaptureStateListener : public RefBase {
public:
@@ -497,11 +545,11 @@
virtual ~CaptureStateListener() = default;
};
- // Regiseters a listener for sound trigger capture state changes.
+ // Registers a listener for sound trigger capture state changes.
// There may only be one such listener registered at any point.
- // The listener onStateChanged() method will be invoked sychronously from
+ // The listener onStateChanged() method will be invoked synchronously from
// this call with the initial value.
- // The listener onServiceDied() method will be invoked sychronously from
+ // The listener onServiceDied() method will be invoked synchronously from
// this call if initial attempt to register failed.
// If the audio policy service cannot be reached, this method will return
// PERMISSION_DENIED and will not invoke the callback, otherwise, it will
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 6b592cb..fa21265 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -402,6 +402,7 @@
uint32_t channelCount() const { return mChannelCount; }
size_t frameCount() const { return mFrameCount; }
+ audio_channel_mask_t channelMask() const { return mChannelMask; }
/*
* Return the period of the notification callback in frames.
@@ -1389,6 +1390,9 @@
std::string mMetricsId; // GUARDED_BY(mLock), could change in createTrack_l().
std::string mCallerName; // for example "aaudio"
+ // report error to mediametrics.
+ void reportError(status_t status, const char *event, const char *message) const;
+
private:
class AudioTrackCallback : public media::BnAudioTrackCallback {
public:
diff --git a/media/libaudioclient/include/media/IAudioFlinger.h b/media/libaudioclient/include/media/IAudioFlinger.h
index 0e059f7..6d4ab8e 100644
--- a/media/libaudioclient/include/media/IAudioFlinger.h
+++ b/media/libaudioclient/include/media/IAudioFlinger.h
@@ -329,6 +329,9 @@
/* Indicate JAVA services are ready (scheduling, power management ...) */
virtual status_t systemReady() = 0;
+ // Indicate audio policy service is ready
+ virtual status_t audioPolicyReady() = 0;
+
// Returns the number of frames per audio HAL buffer.
virtual size_t frameCountHAL(audio_io_handle_t ioHandle) const = 0;
@@ -344,6 +347,8 @@
virtual status_t updateSecondaryOutputs(
const TrackSecondaryOutputsMap& trackSecondaryOutputs) = 0;
+
+ virtual status_t setDeviceConnectedState(const struct audio_port_v7 *port, bool connected) = 0;
};
/**
@@ -432,12 +437,15 @@
status_t setAudioPortConfig(const struct audio_port_config* config) override;
audio_hw_sync_t getAudioHwSyncForSession(audio_session_t sessionId) override;
status_t systemReady() override;
+ status_t audioPolicyReady() override;
+
size_t frameCountHAL(audio_io_handle_t ioHandle) const override;
status_t getMicrophones(std::vector<media::MicrophoneInfo>* microphones) override;
status_t setAudioHalPids(const std::vector<pid_t>& pids) override;
status_t setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
status_t updateSecondaryOutputs(
const TrackSecondaryOutputsMap& trackSecondaryOutputs) override;
+ status_t setDeviceConnectedState(const struct audio_port_v7 *port, bool connected) override;
private:
const sp<media::IAudioFlingerService> mDelegate;
@@ -514,6 +522,7 @@
SET_AUDIO_PORT_CONFIG = media::BnAudioFlingerService::TRANSACTION_setAudioPortConfig,
GET_AUDIO_HW_SYNC_FOR_SESSION = media::BnAudioFlingerService::TRANSACTION_getAudioHwSyncForSession,
SYSTEM_READY = media::BnAudioFlingerService::TRANSACTION_systemReady,
+ AUDIO_POLICY_READY = media::BnAudioFlingerService::TRANSACTION_audioPolicyReady,
FRAME_COUNT_HAL = media::BnAudioFlingerService::TRANSACTION_frameCountHAL,
GET_MICROPHONES = media::BnAudioFlingerService::TRANSACTION_getMicrophones,
SET_MASTER_BALANCE = media::BnAudioFlingerService::TRANSACTION_setMasterBalance,
@@ -522,6 +531,7 @@
SET_AUDIO_HAL_PIDS = media::BnAudioFlingerService::TRANSACTION_setAudioHalPids,
SET_VIBRATOR_INFOS = media::BnAudioFlingerService::TRANSACTION_setVibratorInfos,
UPDATE_SECONDARY_OUTPUTS = media::BnAudioFlingerService::TRANSACTION_updateSecondaryOutputs,
+ SET_DEVICE_CONNECTED_STATE = media::BnAudioFlingerService::TRANSACTION_setDeviceConnectedState,
};
/**
@@ -624,12 +634,14 @@
Status setAudioPortConfig(const media::AudioPortConfig& config) override;
Status getAudioHwSyncForSession(int32_t sessionId, int32_t* _aidl_return) override;
Status systemReady() override;
+ Status audioPolicyReady() override;
Status frameCountHAL(int32_t ioHandle, int64_t* _aidl_return) override;
Status getMicrophones(std::vector<media::MicrophoneInfoData>* _aidl_return) override;
Status setAudioHalPids(const std::vector<int32_t>& pids) override;
Status setVibratorInfos(const std::vector<media::AudioVibratorInfo>& vibratorInfos) override;
Status updateSecondaryOutputs(
const std::vector<media::TrackSecondaryOutputInfo>& trackSecondaryOutputInfos) override;
+ Status setDeviceConnectedState(const media::AudioPort& port, bool connected) override;
private:
const sp<AudioFlingerServerAdapter::Delegate> mDelegate;
diff --git a/media/libaudioclient/include/media/PolicyAidlConversion.h b/media/libaudioclient/include/media/PolicyAidlConversion.h
index 873f27a..2cfa438 100644
--- a/media/libaudioclient/include/media/PolicyAidlConversion.h
+++ b/media/libaudioclient/include/media/PolicyAidlConversion.h
@@ -39,11 +39,6 @@
namespace android {
-ConversionResult<volume_group_t>
-aidl2legacy_int32_t_volume_group_t(int32_t aidl);
-ConversionResult<int32_t>
-legacy2aidl_volume_group_t_int32_t(volume_group_t legacy);
-
ConversionResult<product_strategy_t>
aidl2legacy_int32_t_product_strategy_t(int32_t aidl);
ConversionResult<int32_t>
diff --git a/media/libaudiohal/Android.bp b/media/libaudiohal/Android.bp
index bd24c84..b32c735 100644
--- a/media/libaudiohal/Android.bp
+++ b/media/libaudiohal/Android.bp
@@ -27,6 +27,7 @@
"libaudiohal@5.0",
"libaudiohal@6.0",
"libaudiohal@7.0",
+ "libaudiohal@7.1",
],
shared_libs: [
diff --git a/media/libaudiohal/FactoryHalHidl.cpp b/media/libaudiohal/FactoryHalHidl.cpp
index c19d2c2..804edcc 100644
--- a/media/libaudiohal/FactoryHalHidl.cpp
+++ b/media/libaudiohal/FactoryHalHidl.cpp
@@ -31,6 +31,7 @@
/** Supported HAL versions, in order of preference.
*/
const char* sAudioHALVersions[] = {
+ "7.1",
"7.0",
"6.0",
"5.0",
diff --git a/media/libaudiohal/impl/Android.bp b/media/libaudiohal/impl/Android.bp
index a2c6e8a..e9c8723 100644
--- a/media/libaudiohal/impl/Android.bp
+++ b/media/libaudiohal/impl/Android.bp
@@ -7,22 +7,33 @@
default_applicable_licenses: ["frameworks_av_license"],
}
-cc_defaults {
- name: "libaudiohal_default",
-
+filegroup {
+ name: "audio_core_hal_client_sources",
srcs: [
"DeviceHalLocal.cpp",
"DevicesFactoryHalHybrid.cpp",
"DevicesFactoryHalLocal.cpp",
- "StreamHalLocal.cpp",
-
- "ConversionHelperHidl.cpp",
"DeviceHalHidl.cpp",
"DevicesFactoryHalHidl.cpp",
+ "StreamHalLocal.cpp",
+ "StreamHalHidl.cpp",
+ ],
+}
+
+filegroup {
+ name: "audio_effect_hal_client_sources",
+ srcs: [
"EffectBufferHalHidl.cpp",
"EffectHalHidl.cpp",
"EffectsFactoryHalHidl.cpp",
- "StreamHalHidl.cpp",
+ ],
+}
+
+cc_defaults {
+ name: "libaudiohal_default",
+
+ srcs: [
+ "ConversionHelperHidl.cpp",
],
cflags: [
@@ -65,6 +76,10 @@
cc_library_shared {
name: "libaudiohal@4.0",
defaults: ["libaudiohal_default"],
+ srcs: [
+ ":audio_core_hal_client_sources",
+ ":audio_effect_hal_client_sources",
+ ],
shared_libs: [
"android.hardware.audio.common@4.0",
"android.hardware.audio.common@4.0-util",
@@ -83,6 +98,10 @@
cc_library_shared {
name: "libaudiohal@5.0",
defaults: ["libaudiohal_default"],
+ srcs: [
+ ":audio_core_hal_client_sources",
+ ":audio_effect_hal_client_sources",
+ ],
shared_libs: [
"android.hardware.audio.common@5.0",
"android.hardware.audio.common@5.0-util",
@@ -101,6 +120,10 @@
cc_library_shared {
name: "libaudiohal@6.0",
defaults: ["libaudiohal_default"],
+ srcs: [
+ ":audio_core_hal_client_sources",
+ ":audio_effect_hal_client_sources",
+ ],
shared_libs: [
"android.hardware.audio.common@6.0",
"android.hardware.audio.common@6.0-util",
@@ -119,6 +142,10 @@
cc_library_shared {
name: "libaudiohal@7.0",
defaults: ["libaudiohal_default"],
+ srcs: [
+ ":audio_core_hal_client_sources",
+ ":audio_effect_hal_client_sources",
+ ],
shared_libs: [
"android.hardware.audio.common@7.0",
"android.hardware.audio.common@7.0-util",
@@ -133,3 +160,25 @@
"-include common/all-versions/VersionMacro.h",
]
}
+
+cc_library_shared {
+ name: "libaudiohal@7.1",
+ defaults: ["libaudiohal_default"],
+ srcs: [
+ ":audio_core_hal_client_sources",
+ ],
+ shared_libs: [
+ "android.hardware.audio.common@7.0",
+ "android.hardware.audio.common@7.1-util",
+ "android.hardware.audio@7.0",
+ "android.hardware.audio@7.1",
+ "android.hardware.audio@7.1-util",
+ ],
+ cflags: [
+ "-DMAJOR_VERSION=7",
+ "-DMINOR_VERSION=1",
+ "-DCOMMON_TYPES_MINOR_VERSION=0",
+ "-DCORE_TYPES_MINOR_VERSION=0",
+ "-include common/all-versions/VersionMacro.h",
+ ]
+}
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.cpp b/media/libaudiohal/impl/ConversionHelperHidl.cpp
index 32eaa31..1d34814 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.cpp
+++ b/media/libaudiohal/impl/ConversionHelperHidl.cpp
@@ -24,10 +24,9 @@
#include "ConversionHelperHidl.h"
namespace android {
-namespace CPP_VERSION {
-using namespace ::android::hardware::audio::common::CPP_VERSION;
-using namespace ::android::hardware::audio::CPP_VERSION;
+using namespace ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION;
+using namespace ::android::hardware::audio::CORE_TYPES_CPP_VERSION;
// static
status_t ConversionHelperHidl::keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys) {
@@ -105,6 +104,15 @@
}
// static
+void ConversionHelperHidl::argsFromHal(
+ const Vector<String16>& args, hidl_vec<hidl_string> *hidlArgs) {
+ hidlArgs->resize(args.size());
+ for (size_t i = 0; i < args.size(); ++i) {
+ (*hidlArgs)[i] = String8(args[i]).c_str();
+ }
+}
+
+// static
status_t ConversionHelperHidl::analyzeResult(const Result& result) {
switch (result) {
case Result::OK: return OK;
@@ -120,5 +128,4 @@
ALOGE("%s %p %s: %s (from rpc)", mClassName, this, funcName, description);
}
-} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/ConversionHelperHidl.h b/media/libaudiohal/impl/ConversionHelperHidl.h
index 59122c7..9368551 100644
--- a/media/libaudiohal/impl/ConversionHelperHidl.h
+++ b/media/libaudiohal/impl/ConversionHelperHidl.h
@@ -17,26 +17,28 @@
#ifndef ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
#define ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
-#include PATH(android/hardware/audio/FILE_VERSION/types.h)
+#include PATH(android/hardware/audio/CORE_TYPES_FILE_VERSION/types.h)
#include <hidl/HidlSupport.h>
#include <system/audio.h>
#include <utils/String8.h>
+#include <utils/String16.h>
+#include <utils/Vector.h>
-using ::android::hardware::audio::CPP_VERSION::ParameterValue;
-using CoreResult = ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::ParameterValue;
+using CoreResult = ::android::hardware::audio::CORE_TYPES_CPP_VERSION::Result;
using ::android::hardware::Return;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
namespace android {
-namespace CPP_VERSION {
class ConversionHelperHidl {
protected:
static status_t keysFromHal(const String8& keys, hidl_vec<hidl_string> *hidlKeys);
static status_t parametersFromHal(const String8& kvPairs, hidl_vec<ParameterValue> *hidlParams);
static void parametersToHal(const hidl_vec<ParameterValue>& parameters, String8 *values);
+ static void argsFromHal(const Vector<String16>& args, hidl_vec<hidl_string> *hidlArgs);
ConversionHelperHidl(const char* className);
@@ -82,7 +84,6 @@
void emitError(const char* funcName, const char* description);
};
-} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_CONVERSION_HELPER_HIDL_H
diff --git a/media/libaudiohal/impl/DeviceHalHidl.cpp b/media/libaudiohal/impl/DeviceHalHidl.cpp
index 02d66ae..ca2286e 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.cpp
+++ b/media/libaudiohal/impl/DeviceHalHidl.cpp
@@ -30,27 +30,40 @@
#include <util/CoreUtils.h>
#include "DeviceHalHidl.h"
-#include "EffectHalHidl.h"
#include "ParameterUtils.h"
#include "StreamHalHidl.h"
-using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
+using ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION::implementation::HidlUtils;
using ::android::hardware::audio::common::utils::EnumBitfield;
-using ::android::hardware::audio::CPP_VERSION::implementation::CoreUtils;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::implementation::CoreUtils;
using ::android::hardware::hidl_string;
using ::android::hardware::hidl_vec;
namespace android {
-namespace CPP_VERSION {
-using namespace ::android::hardware::audio::common::CPP_VERSION;
-using namespace ::android::hardware::audio::CPP_VERSION;
+using namespace ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION;
+using namespace ::android::hardware::audio::CORE_TYPES_CPP_VERSION;
-using EffectHalHidl = ::android::effect::CPP_VERSION::EffectHalHidl;
+DeviceHalHidl::DeviceHalHidl(const sp<::android::hardware::audio::CPP_VERSION::IDevice>& device)
+ : ConversionHelperHidl("Device"), mDevice(device) {
+}
-DeviceHalHidl::DeviceHalHidl(const sp<IDevice>& device)
- : ConversionHelperHidl("Device"), mDevice(device),
- mPrimaryDevice(IPrimaryDevice::castFrom(device)) {
+DeviceHalHidl::DeviceHalHidl(
+ const sp<::android::hardware::audio::CPP_VERSION::IPrimaryDevice>& device)
+ : ConversionHelperHidl("Device"),
+#if MAJOR_VERSION <= 6 || (MAJOR_VERSION == 7 && MINOR_VERSION == 0)
+ mDevice(device),
+#endif
+ mPrimaryDevice(device) {
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+ auto getDeviceRet = mPrimaryDevice->getDevice();
+ if (getDeviceRet.isOk()) {
+ mDevice = getDeviceRet;
+ } else {
+ ALOGE("Call to IPrimaryDevice.getDevice has failed: %s",
+ getDeviceRet.description().c_str());
+ }
+#endif
}
DeviceHalHidl::~DeviceHalHidl() {
@@ -209,12 +222,17 @@
return status;
}
Result retval = Result::NOT_INITIALIZED;
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+ Return<void> ret = mDevice->openOutputStream_7_1(
+#else
Return<void> ret = mDevice->openOutputStream(
+#endif
handle, hidlDevice, hidlConfig, hidlFlags,
#if MAJOR_VERSION >= 4
{} /* metadata */,
#endif
- [&](Result r, const sp<IStreamOut>& result, const AudioConfig& suggestedConfig) {
+ [&](Result r, const sp<::android::hardware::audio::CPP_VERSION::IStreamOut>& result,
+ const AudioConfig& suggestedConfig) {
retval = r;
if (retval == Result::OK) {
*outStream = new StreamOutHalHidl(result);
@@ -284,7 +302,9 @@
#endif
Return<void> ret = mDevice->openInputStream(
handle, hidlDevice, hidlConfig, hidlFlags, sinkMetadata,
- [&](Result r, const sp<IStreamIn>& result, const AudioConfig& suggestedConfig) {
+ [&](Result r,
+ const sp<::android::hardware::audio::CORE_TYPES_CPP_VERSION::IStreamIn>& result,
+ const AudioConfig& suggestedConfig) {
retval = r;
if (retval == Result::OK) {
*inStream = new StreamInHalHidl(result);
@@ -432,8 +452,7 @@
audio_port_handle_t device, sp<EffectHalInterface> effect) {
if (mDevice == 0) return NO_INIT;
return processReturn("addDeviceEffect", mDevice->addDeviceEffect(
- static_cast<AudioPortHandle>(device),
- static_cast<EffectHalHidl*>(effect.get())->effectId()));
+ static_cast<AudioPortHandle>(device), effect->effectId()));
}
#else
status_t DeviceHalHidl::addDeviceEffect(
@@ -447,8 +466,7 @@
audio_port_handle_t device, sp<EffectHalInterface> effect) {
if (mDevice == 0) return NO_INIT;
return processReturn("removeDeviceEffect", mDevice->removeDeviceEffect(
- static_cast<AudioPortHandle>(device),
- static_cast<EffectHalHidl*>(effect.get())->effectId()));
+ static_cast<AudioPortHandle>(device), effect->effectId()));
}
#else
status_t DeviceHalHidl::removeDeviceEffect(
@@ -457,11 +475,39 @@
}
#endif
-status_t DeviceHalHidl::dump(int fd) {
+status_t DeviceHalHidl::setConnectedState(const struct audio_port_v7 *port, bool connected) {
+ if (mDevice == 0) return NO_INIT;
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+ if (supportsSetConnectedState7_1) {
+ AudioPort hidlPort;
+ if (status_t result = HidlUtils::audioPortFromHal(*port, &hidlPort); result != NO_ERROR) {
+ return result;
+ }
+ Return<Result> ret = mDevice->setConnectedState_7_1(hidlPort, connected);
+ if (!ret.isOk() || ret != Result::NOT_SUPPORTED) {
+ return processReturn("setConnectedState_7_1", ret);
+ } else if (ret == Result::OK) {
+ return NO_ERROR;
+ }
+ supportsSetConnectedState7_1 = false;
+ }
+#endif
+ DeviceAddress hidlAddress;
+ if (status_t result = CoreUtils::deviceAddressFromHal(
+ port->ext.device.type, port->ext.device.address, &hidlAddress);
+ result != NO_ERROR) {
+ return result;
+ }
+ return processReturn("setConnectedState", mDevice->setConnectedState(hidlAddress, connected));
+}
+
+status_t DeviceHalHidl::dump(int fd, const Vector<String16>& args) {
if (mDevice == 0) return NO_INIT;
native_handle_t* hidlHandle = native_handle_create(1, 0);
hidlHandle->data[0] = fd;
- Return<void> ret = mDevice->debug(hidlHandle, {} /* options */);
+ hidl_vec<hidl_string> hidlArgs;
+ argsFromHal(args, &hidlArgs);
+ Return<void> ret = mDevice->debug(hidlHandle, hidlArgs);
native_handle_delete(hidlHandle);
// TODO(b/111997867, b/177271958) Workaround - remove when fixed.
@@ -478,5 +524,4 @@
return processReturn("dump", ret);
}
-} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalHidl.h b/media/libaudiohal/impl/DeviceHalHidl.h
index 2c847cf..fb0be5a 100644
--- a/media/libaudiohal/impl/DeviceHalHidl.h
+++ b/media/libaudiohal/impl/DeviceHalHidl.h
@@ -20,15 +20,11 @@
#include PATH(android/hardware/audio/FILE_VERSION/IDevice.h)
#include PATH(android/hardware/audio/FILE_VERSION/IPrimaryDevice.h)
#include <media/audiohal/DeviceHalInterface.h>
+#include <media/audiohal/EffectHalInterface.h>
#include "ConversionHelperHidl.h"
-using ::android::hardware::audio::CPP_VERSION::IDevice;
-using ::android::hardware::audio::CPP_VERSION::IPrimaryDevice;
-using ::android::hardware::Return;
-
namespace android {
-namespace CPP_VERSION {
class DeviceHalHidl : public DeviceHalInterface, public ConversionHelperHidl
{
@@ -119,15 +115,21 @@
status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
- virtual status_t dump(int fd);
+ status_t setConnectedState(const struct audio_port_v7 *port, bool connected) override;
+
+ status_t dump(int fd, const Vector<String16>& args) override;
private:
friend class DevicesFactoryHalHidl;
- sp<IDevice> mDevice;
- sp<IPrimaryDevice> mPrimaryDevice; // Null if it's not a primary device.
+ sp<::android::hardware::audio::CPP_VERSION::IDevice> mDevice;
+ // Null if it's not a primary device.
+ sp<::android::hardware::audio::CPP_VERSION::IPrimaryDevice> mPrimaryDevice;
+ bool supportsSetConnectedState7_1 = true;
// Can not be constructed directly by clients.
- explicit DeviceHalHidl(const sp<IDevice>& device);
+ explicit DeviceHalHidl(const sp<::android::hardware::audio::CPP_VERSION::IDevice>& device);
+ explicit DeviceHalHidl(
+ const sp<::android::hardware::audio::CPP_VERSION::IPrimaryDevice>& device);
// The destructor automatically closes the device.
virtual ~DeviceHalHidl();
@@ -135,7 +137,6 @@
template <typename HalPort> status_t getAudioPortImpl(HalPort *port);
};
-} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICE_HAL_HIDL_H
diff --git a/media/libaudiohal/impl/DeviceHalLocal.cpp b/media/libaudiohal/impl/DeviceHalLocal.cpp
index af7dc1a..e473e41 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.cpp
+++ b/media/libaudiohal/impl/DeviceHalLocal.cpp
@@ -17,13 +17,13 @@
#define LOG_TAG "DeviceHalLocal"
//#define LOG_NDEBUG 0
+#include <media/AudioParameter.h>
#include <utils/Log.h>
#include "DeviceHalLocal.h"
#include "StreamHalLocal.h"
namespace android {
-namespace CPP_VERSION {
DeviceHalLocal::DeviceHalLocal(audio_hw_device_t *dev)
: mDev(dev) {
@@ -233,7 +233,15 @@
return INVALID_OPERATION;
}
-status_t DeviceHalLocal::dump(int fd) {
+status_t DeviceHalLocal::setConnectedState(const struct audio_port_v7 *port, bool connected) {
+ AudioParameter param(String8(port->ext.device.address));
+ const String8 key(connected ?
+ AudioParameter::keyDeviceConnect : AudioParameter::keyDeviceDisconnect);
+ param.addInt(key, port->ext.device.type);
+ return setParameters(param.toString());
+}
+
+status_t DeviceHalLocal::dump(int fd, const Vector<String16>& /* args */) {
return mDev->dump(mDev, fd);
}
@@ -245,5 +253,4 @@
mDev->close_input_stream(mDev, stream_in);
}
-} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/DeviceHalLocal.h b/media/libaudiohal/impl/DeviceHalLocal.h
index 46b510b..79db930 100644
--- a/media/libaudiohal/impl/DeviceHalLocal.h
+++ b/media/libaudiohal/impl/DeviceHalLocal.h
@@ -21,7 +21,6 @@
#include <media/audiohal/DeviceHalInterface.h>
namespace android {
-namespace CPP_VERSION {
class DeviceHalLocal : public DeviceHalInterface
{
@@ -112,7 +111,9 @@
status_t addDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
status_t removeDeviceEffect(audio_port_handle_t device, sp<EffectHalInterface> effect) override;
- virtual status_t dump(int fd);
+ status_t setConnectedState(const struct audio_port_v7 *port, bool connected) override;
+
+ status_t dump(int fd, const Vector<String16>& args) override;
void closeOutputStream(struct audio_stream_out *stream_out);
void closeInputStream(struct audio_stream_in *stream_in);
@@ -131,7 +132,6 @@
virtual ~DeviceHalLocal();
};
-} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICE_HAL_LOCAL_H
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
index 1c0eacb..f475729 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.cpp
@@ -31,14 +31,13 @@
#include "DevicesFactoryHalHidl.h"
using ::android::hardware::audio::CPP_VERSION::IDevice;
-using ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::Result;
using ::android::hardware::Return;
using ::android::hardware::Void;
using ::android::hidl::manager::V1_0::IServiceManager;
using ::android::hidl::manager::V1_0::IServiceNotification;
namespace android {
-namespace CPP_VERSION {
class ServiceNotificationListener : public IServiceNotification {
public:
@@ -115,14 +114,37 @@
if (status != OK) return status;
Result retval = Result::NOT_INITIALIZED;
for (const auto& factory : factories) {
- Return<void> ret = factory->openDevice(
- hidlId,
- [&](Result r, const sp<IDevice>& result) {
- retval = r;
- if (retval == Result::OK) {
- *device = new DeviceHalHidl(result);
- }
- });
+ Return<void> ret;
+ if (strcmp(name, AUDIO_HARDWARE_MODULE_ID_PRIMARY) == 0) {
+ // In V7.1 it's not possible to cast IDevice back to IPrimaryDevice,
+ // thus openPrimaryDevice must be used.
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+ ret = factory->openPrimaryDevice_7_1(
+#else
+ ret = factory->openPrimaryDevice(
+#endif
+ [&](Result r,
+ const sp<::android::hardware::audio::CPP_VERSION::IPrimaryDevice>& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ *device = new DeviceHalHidl(result);
+ }
+ });
+ } else {
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+ ret = factory->openDevice_7_1(
+#else
+ ret = factory->openDevice(
+#endif
+ hidlId,
+ [&](Result r,
+ const sp<::android::hardware::audio::CPP_VERSION::IDevice>& result) {
+ retval = r;
+ if (retval == Result::OK) {
+ *device = new DeviceHalHidl(result);
+ }
+ });
+ }
if (!ret.isOk()) return FAILED_TRANSACTION;
switch (retval) {
// Device was found and was initialized successfully.
@@ -178,7 +200,8 @@
return NO_ERROR;
}
-void DevicesFactoryHalHidl::addDeviceFactory(sp<IDevicesFactory> factory, bool needToNotify) {
+void DevicesFactoryHalHidl::addDeviceFactory(
+ sp<::android::hardware::audio::CPP_VERSION::IDevicesFactory> factory, bool needToNotify) {
// It is assumed that the DevicesFactoryHalInterface instance is owned
// by AudioFlinger and thus have the same lifespan.
factory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
@@ -198,10 +221,10 @@
}
}
-std::vector<sp<IDevicesFactory>> DevicesFactoryHalHidl::copyDeviceFactories() {
+std::vector<sp<::android::hardware::audio::CPP_VERSION::IDevicesFactory>>
+ DevicesFactoryHalHidl::copyDeviceFactories() {
std::lock_guard<std::mutex> lock(mLock);
return mDeviceFactories;
}
-} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHidl.h b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
index 6f84efe..fd8dbc4 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHidl.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHidl.h
@@ -30,7 +30,6 @@
using ::android::hardware::audio::CPP_VERSION::IDevicesFactory;
namespace android {
-namespace CPP_VERSION {
class DevicesFactoryHalHidl : public DevicesFactoryHalInterface
{
@@ -59,7 +58,6 @@
virtual ~DevicesFactoryHalHidl() = default;
};
-} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HIDL_H
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
index cde8d85..d684c27 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.cpp
@@ -22,7 +22,6 @@
#include "DevicesFactoryHalLocal.h"
namespace android {
-namespace CPP_VERSION {
DevicesFactoryHalHybrid::DevicesFactoryHalHybrid(sp<IDevicesFactory> hidlFactory)
: mLocalFactory(new DevicesFactoryHalLocal()),
@@ -51,11 +50,9 @@
return INVALID_OPERATION;
}
-} // namespace CPP_VERSION
-
extern "C" __attribute__((visibility("default"))) void* createIDevicesFactory() {
auto service = hardware::audio::CPP_VERSION::IDevicesFactory::getService();
- return service ? new CPP_VERSION::DevicesFactoryHalHybrid(service) : nullptr;
+ return service ? new DevicesFactoryHalHybrid(service) : nullptr;
}
} // namespace android
diff --git a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
index 568a1fb..6b2b845 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalHybrid.h
@@ -25,7 +25,6 @@
using ::android::hardware::audio::CPP_VERSION::IDevicesFactory;
namespace android {
-namespace CPP_VERSION {
class DevicesFactoryHalHybrid : public DevicesFactoryHalInterface
{
@@ -45,7 +44,6 @@
sp<DevicesFactoryHalInterface> mHidlFactory;
};
-} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_HYBRID_H
diff --git a/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp b/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
index af67ff5..13a9acd 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.cpp
@@ -26,7 +26,6 @@
#include "DevicesFactoryHalLocal.h"
namespace android {
-namespace CPP_VERSION {
static status_t load_audio_interface(const char *if_name, audio_hw_device_t **dev)
{
@@ -67,5 +66,4 @@
return rc;
}
-} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/DevicesFactoryHalLocal.h b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
index 32bf362..eacf109 100644
--- a/media/libaudiohal/impl/DevicesFactoryHalLocal.h
+++ b/media/libaudiohal/impl/DevicesFactoryHalLocal.h
@@ -24,7 +24,6 @@
#include "DeviceHalLocal.h"
namespace android {
-namespace CPP_VERSION {
class DevicesFactoryHalLocal : public DevicesFactoryHalInterface
{
@@ -50,7 +49,6 @@
virtual ~DevicesFactoryHalLocal() {}
};
-} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_DEVICES_FACTORY_HAL_LOCAL_H
diff --git a/media/libaudiohal/impl/EffectBufferHalHidl.cpp b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
index 5367972..65297af 100644
--- a/media/libaudiohal/impl/EffectBufferHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.cpp
@@ -31,7 +31,6 @@
namespace android {
namespace effect {
-namespace CPP_VERSION {
// static
uint64_t EffectBufferHalHidl::makeUniqueId() {
@@ -144,5 +143,4 @@
}
} // namespace effect
-} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/EffectBufferHalHidl.h b/media/libaudiohal/impl/EffectBufferHalHidl.h
index 4826813..a9df68b 100644
--- a/media/libaudiohal/impl/EffectBufferHalHidl.h
+++ b/media/libaudiohal/impl/EffectBufferHalHidl.h
@@ -28,7 +28,6 @@
namespace android {
namespace effect {
-namespace CPP_VERSION {
using namespace ::android::hardware::audio::effect::CPP_VERSION;
@@ -74,7 +73,6 @@
status_t init();
};
-} // namespace CPP_VERSION
} // namespace effect
} // namespace android
diff --git a/media/libaudiohal/impl/EffectHalHidl.cpp b/media/libaudiohal/impl/EffectHalHidl.cpp
index 51ad146..1bb1e5f 100644
--- a/media/libaudiohal/impl/EffectHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectHalHidl.cpp
@@ -36,7 +36,6 @@
namespace android {
namespace effect {
-namespace CPP_VERSION {
using namespace ::android::hardware::audio::common::CPP_VERSION;
using namespace ::android::hardware::audio::effect::CPP_VERSION;
@@ -310,6 +309,5 @@
return result;
}
-} // namespace CPP_VERSION
} // namespace effect
} // namespace android
diff --git a/media/libaudiohal/impl/EffectHalHidl.h b/media/libaudiohal/impl/EffectHalHidl.h
index 8e46638..07745db 100644
--- a/media/libaudiohal/impl/EffectHalHidl.h
+++ b/media/libaudiohal/impl/EffectHalHidl.h
@@ -28,7 +28,6 @@
namespace android {
namespace effect {
-namespace CPP_VERSION {
using namespace ::android::hardware::audio::effect::CPP_VERSION;
@@ -63,7 +62,7 @@
virtual status_t dump(int fd);
- uint64_t effectId() const { return mEffectId; }
+ virtual uint64_t effectId() const { return mEffectId; }
private:
friend class EffectsFactoryHalHidl;
@@ -96,7 +95,6 @@
status_t setProcessBuffers();
};
-} // namespace CPP_VERSION
} // namespace effect
} // namespace android
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
index f042b92..90954b2 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.cpp
@@ -33,7 +33,6 @@
namespace android {
namespace effect {
-namespace CPP_VERSION {
using namespace ::android::hardware::audio::common::CPP_VERSION;
using namespace ::android::hardware::audio::effect::CPP_VERSION;
@@ -73,7 +72,9 @@
uint32_t index, effect_descriptor_t *pDescriptor) {
// TODO: We need somehow to track the changes on the server side
// or figure out how to convert everybody to query all the descriptors at once.
- // TODO: check for nullptr
+ if (pDescriptor == nullptr) {
+ return BAD_VALUE;
+ }
if (mLastDescriptors.size() == 0) {
status_t queryResult = queryAllDescriptors();
if (queryResult != OK) return queryResult;
@@ -85,7 +86,9 @@
status_t EffectsFactoryHalHidl::getDescriptor(
const effect_uuid_t *pEffectUuid, effect_descriptor_t *pDescriptor) {
- // TODO: check for nullptr
+ if (pDescriptor == nullptr || pEffectUuid == nullptr) {
+ return BAD_VALUE;
+ }
if (mEffectsFactory == 0) return NO_INIT;
Uuid hidlUuid;
UuidUtils::uuidFromHal(*pEffectUuid, &hidlUuid);
@@ -105,6 +108,33 @@
return processReturn(__FUNCTION__, ret);
}
+status_t EffectsFactoryHalHidl::getDescriptors(const effect_uuid_t *pEffectType,
+ std::vector<effect_descriptor_t> *descriptors) {
+ if (pEffectType == nullptr || descriptors == nullptr) {
+ return BAD_VALUE;
+ }
+
+ uint32_t numEffects = 0;
+ status_t status = queryNumberEffects(&numEffects);
+ if (status != NO_ERROR) {
+ ALOGW("%s error %d from FactoryHal queryNumberEffects", __func__, status);
+ return status;
+ }
+
+ for (uint32_t i = 0; i < numEffects; i++) {
+ effect_descriptor_t descriptor;
+ status = getDescriptor(i, &descriptor);
+ if (status != NO_ERROR) {
+ ALOGW("%s error %d from FactoryHal getDescriptor", __func__, status);
+ continue;
+ }
+ if (memcmp(&descriptor.type, pEffectType, sizeof(effect_uuid_t)) == 0) {
+ descriptors->push_back(descriptor);
+ }
+ }
+ return descriptors->empty() ? NAME_NOT_FOUND : NO_ERROR;
+}
+
status_t EffectsFactoryHalHidl::createEffect(
const effect_uuid_t *pEffectUuid, int32_t sessionId, int32_t ioId,
int32_t deviceId __unused, sp<EffectHalInterface> *effect) {
@@ -173,12 +203,11 @@
return EffectBufferHalHidl::mirror(external, size, buffer);
}
-} // namespace CPP_VERSION
} // namespace effect
extern "C" __attribute__((visibility("default"))) void* createIEffectsFactory() {
auto service = hardware::audio::effect::CPP_VERSION::IEffectsFactory::getService();
- return service ? new effect::CPP_VERSION::EffectsFactoryHalHidl(service) : nullptr;
+ return service ? new effect::EffectsFactoryHalHidl(service) : nullptr;
}
} // namespace android
diff --git a/media/libaudiohal/impl/EffectsFactoryHalHidl.h b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
index 5fa85e7..7491133 100644
--- a/media/libaudiohal/impl/EffectsFactoryHalHidl.h
+++ b/media/libaudiohal/impl/EffectsFactoryHalHidl.h
@@ -24,10 +24,9 @@
namespace android {
namespace effect {
-namespace CPP_VERSION {
using ::android::hardware::hidl_vec;
-using ::android::CPP_VERSION::ConversionHelperHidl;
+using ::android::ConversionHelperHidl;
using namespace ::android::hardware::audio::effect::CPP_VERSION;
class EffectsFactoryHalHidl : public EffectsFactoryHalInterface, public ConversionHelperHidl
@@ -45,6 +44,9 @@
virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
effect_descriptor_t *pDescriptor);
+ virtual status_t getDescriptors(const effect_uuid_t *pEffectType,
+ std::vector<effect_descriptor_t> *descriptors);
+
// Creates an effect engine of the specified type.
// To release the effect engine, it is necessary to release references
// to the returned effect object.
@@ -67,7 +69,6 @@
status_t queryAllDescriptors();
};
-} // namespace CPP_VERSION
} // namespace effect
} // namespace android
diff --git a/media/libaudiohal/impl/ParameterUtils.h b/media/libaudiohal/impl/ParameterUtils.h
index 9cab72e..b5dcb9d 100644
--- a/media/libaudiohal/impl/ParameterUtils.h
+++ b/media/libaudiohal/impl/ParameterUtils.h
@@ -16,17 +16,16 @@
#pragma once
-#include PATH(android/hardware/audio/FILE_VERSION/types.h)
+#include PATH(android/hardware/audio/CORE_TYPES_FILE_VERSION/types.h)
#include <hidl/HidlSupport.h>
-using ::android::hardware::audio::CPP_VERSION::ParameterValue;
-using ::android::hardware::audio::CPP_VERSION::Result;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::ParameterValue;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::Result;
using ::android::hardware::Return;
using ::android::hardware::hidl_vec;
using ::android::hardware::hidl_string;
namespace android {
-namespace CPP_VERSION {
namespace utils {
#if MAJOR_VERSION == 2
@@ -56,5 +55,4 @@
#endif
} // namespace utils
-} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/StreamHalHidl.cpp b/media/libaudiohal/impl/StreamHalHidl.cpp
index 129b1c1..6916ca1 100644
--- a/media/libaudiohal/impl/StreamHalHidl.cpp
+++ b/media/libaudiohal/impl/StreamHalHidl.cpp
@@ -23,29 +23,26 @@
#include <mediautils/SchedulingPolicyService.h>
#include <utils/Log.h>
-#include PATH(android/hardware/audio/FILE_VERSION/IStreamOutCallback.h)
+#include PATH(android/hardware/audio/CORE_TYPES_FILE_VERSION/IStreamOutCallback.h)
#include <HidlUtils.h>
#include <util/CoreUtils.h>
#include "DeviceHalHidl.h"
-#include "EffectHalHidl.h"
#include "ParameterUtils.h"
#include "StreamHalHidl.h"
-using ::android::hardware::audio::common::CPP_VERSION::implementation::HidlUtils;
-using ::android::hardware::audio::CPP_VERSION::implementation::CoreUtils;
+using ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION::implementation::HidlUtils;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::implementation::CoreUtils;
using ::android::hardware::MQDescriptorSync;
using ::android::hardware::Return;
using ::android::hardware::Void;
namespace android {
-namespace CPP_VERSION {
-using EffectHalHidl = ::android::effect::CPP_VERSION::EffectHalHidl;
-using ReadCommand = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadCommand;
+using ReadCommand = ::android::hardware::audio::CORE_TYPES_CPP_VERSION::IStreamIn::ReadCommand;
-using namespace ::android::hardware::audio::common::CPP_VERSION;
-using namespace ::android::hardware::audio::CPP_VERSION;
+using namespace ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION;
+using namespace ::android::hardware::audio::CORE_TYPES_CPP_VERSION;
StreamHalHidl::StreamHalHidl(IStream *stream)
: ConversionHelperHidl("Stream"),
@@ -137,14 +134,12 @@
status_t StreamHalHidl::addEffect(sp<EffectHalInterface> effect) {
if (!mStream) return NO_INIT;
- return processReturn("addEffect", mStream->addEffect(
- static_cast<EffectHalHidl*>(effect.get())->effectId()));
+ return processReturn("addEffect", mStream->addEffect(effect->effectId()));
}
status_t StreamHalHidl::removeEffect(sp<EffectHalInterface> effect) {
if (!mStream) return NO_INIT;
- return processReturn("removeEffect", mStream->removeEffect(
- static_cast<EffectHalHidl*>(effect.get())->effectId()));
+ return processReturn("removeEffect", mStream->removeEffect(effect->effectId()));
}
status_t StreamHalHidl::standby() {
@@ -152,11 +147,13 @@
return processReturn("standby", mStream->standby());
}
-status_t StreamHalHidl::dump(int fd) {
+status_t StreamHalHidl::dump(int fd, const Vector<String16>& args) {
if (!mStream) return NO_INIT;
native_handle_t* hidlHandle = native_handle_create(1, 0);
hidlHandle->data[0] = fd;
- Return<void> ret = mStream->debug(hidlHandle, {} /* options */);
+ hidl_vec<hidl_string> hidlArgs;
+ argsFromHal(args, &hidlArgs);
+ Return<void> ret = mStream->debug(hidlHandle, hidlArgs);
native_handle_delete(hidlHandle);
// TODO(b/111997867, b/177271958) Workaround - remove when fixed.
@@ -326,7 +323,8 @@
} // namespace
-StreamOutHalHidl::StreamOutHalHidl(const sp<IStreamOut>& stream)
+StreamOutHalHidl::StreamOutHalHidl(
+ const sp<::android::hardware::audio::CPP_VERSION::IStreamOut>& stream)
: StreamHalHidl(stream.get()), mStream(stream), mWriterClient(0), mEfGroup(nullptr) {
}
@@ -642,7 +640,11 @@
#elif MAJOR_VERSION >= 4
status_t StreamOutHalHidl::updateSourceMetadata(
const StreamOutHalInterface::SourceMetadata& sourceMetadata) {
- CPP_VERSION::SourceMetadata hidlMetadata;
+#if MAJOR_VERSION == 4
+ ::android::hardware::audio::CORE_TYPES_CPP_VERSION::SourceMetadata hidlMetadata;
+#else
+ ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION::SourceMetadata hidlMetadata;
+#endif
if (status_t status = CoreUtils::sourceMetadataFromHalV7(
sourceMetadata.tracks, true /*ignoreNonVendorTags*/, &hidlMetadata);
status != OK) {
@@ -684,6 +686,7 @@
// Codec format callback is supported starting from audio HAL V6.0
return INVALID_OPERATION;
}
+
#else
status_t StreamOutHalHidl::getDualMonoMode(audio_dual_mono_mode_t* mode) {
@@ -753,7 +756,7 @@
static_cast<TimestretchFallbackMode>(playbackRate.mFallbackMode)}));
}
-#include PATH(android/hardware/audio/FILE_VERSION/IStreamOutEventCallback.h)
+#include PATH(android/hardware/audio/CORE_TYPES_FILE_VERSION/IStreamOutEventCallback.h)
namespace {
@@ -789,6 +792,84 @@
}
#endif
+#if MAJOR_VERSION == 7 && MINOR_VERSION == 1
+using hardware::audio::V7_1::LatencyMode;
+
+status_t StreamOutHalHidl::setLatencyMode(audio_latency_mode_t mode) {
+ if (mStream == 0) return NO_INIT;
+ return processReturn(
+ "setLatencyMode", mStream->setLatencyMode(static_cast<LatencyMode>(mode)));
+};
+
+status_t StreamOutHalHidl::getRecommendedLatencyModes(std::vector<audio_latency_mode_t> *modes) {
+ if (!mStream) return NO_INIT;
+ Result retval;
+ Return<void> ret = mStream->getRecommendedLatencyModes(
+ [&](Result r, hidl_vec<LatencyMode> hidlModes) {
+ retval = r;
+ for (size_t i = 0; i < hidlModes.size(); i++) {
+ modes->push_back(static_cast<audio_latency_mode_t>(hidlModes[i]));
+ }
+ });
+ return processReturn("getRecommendedLatencyModes", ret, retval);
+};
+
+#include PATH(android/hardware/audio/FILE_VERSION/IStreamOutLatencyModeCallback.h)
+
+using hardware::audio::V7_1::IStreamOutLatencyModeCallback;
+
+namespace {
+struct StreamOutLatencyModeCallback : public IStreamOutLatencyModeCallback {
+ StreamOutLatencyModeCallback(const wp<StreamOutHalHidl>& stream) : mStream(stream) {}
+
+ // IStreamOutLatencyModeCallback implementation
+ Return<void> onRecommendedLatencyModeChanged(const hidl_vec<LatencyMode>& hidlModes) override {
+ sp<StreamOutHalHidl> stream = mStream.promote();
+ if (stream != nullptr) {
+ std::vector<audio_latency_mode_t> modes;
+ for (size_t i = 0; i < hidlModes.size(); i++) {
+ modes.push_back(static_cast<audio_latency_mode_t>(hidlModes[i]));
+ }
+ stream->onRecommendedLatencyModeChanged(modes);
+ }
+ return Void();
+ }
+
+ private:
+ wp<StreamOutHalHidl> mStream;
+};
+} // namespace
+
+status_t StreamOutHalHidl::setLatencyModeCallback(
+ const sp<StreamOutHalInterfaceLatencyModeCallback>& callback) {
+
+ if (mStream == nullptr) return NO_INIT;
+ mLatencyModeCallback = callback;
+ status_t status = processReturn(
+ "setLatencyModeCallback",
+ mStream->setLatencyModeCallback(
+ callback.get() == nullptr ? nullptr : new StreamOutLatencyModeCallback(this)));
+ return status;
+};
+
+#else
+
+status_t StreamOutHalHidl::setLatencyMode(audio_latency_mode_t mode __unused) {
+ return INVALID_OPERATION;
+};
+
+status_t StreamOutHalHidl::getRecommendedLatencyModes(
+ std::vector<audio_latency_mode_t> *modes __unused) {
+ return INVALID_OPERATION;
+};
+
+status_t StreamOutHalHidl::setLatencyModeCallback(
+ const sp<StreamOutHalInterfaceLatencyModeCallback>& callback __unused) {
+ return INVALID_OPERATION;
+};
+
+#endif
+
void StreamOutHalHidl::onWriteReady() {
sp<StreamOutHalInterfaceCallback> callback = mCallback.load().promote();
if (callback == 0) return;
@@ -817,8 +898,16 @@
callback->onCodecFormatChanged(metadataBs);
}
+void StreamOutHalHidl::onRecommendedLatencyModeChanged(
+ const std::vector<audio_latency_mode_t>& modes) {
+ sp<StreamOutHalInterfaceLatencyModeCallback> callback = mLatencyModeCallback.load().promote();
+ if (callback == nullptr) return;
+ callback->onRecommendedLatencyModeChanged(modes);
+}
-StreamInHalHidl::StreamInHalHidl(const sp<IStreamIn>& stream)
+
+StreamInHalHidl::StreamInHalHidl(
+ const sp<::android::hardware::audio::CORE_TYPES_CPP_VERSION::IStreamIn>& stream)
: StreamHalHidl(stream.get()), mStream(stream), mReaderClient(0), mEfGroup(nullptr) {
}
@@ -1031,7 +1120,11 @@
status_t StreamInHalHidl::updateSinkMetadata(const
StreamInHalInterface::SinkMetadata& sinkMetadata) {
- CPP_VERSION::SinkMetadata hidlMetadata;
+#if MAJOR_VERSION == 4
+ ::android::hardware::audio::CORE_TYPES_CPP_VERSION::SinkMetadata hidlMetadata;
+#else
+ ::android::hardware::audio::common::COMMON_TYPES_CPP_VERSION::SinkMetadata hidlMetadata;
+#endif
if (status_t status = CoreUtils::sinkMetadataFromHalV7(
sinkMetadata.tracks, true /*ignoreNonVendorTags*/, &hidlMetadata);
status != OK) {
@@ -1066,5 +1159,4 @@
}
#endif
-} // namespace CPP_VERSION
} // namespace android
diff --git a/media/libaudiohal/impl/StreamHalHidl.h b/media/libaudiohal/impl/StreamHalHidl.h
index 970903b..44bf60a 100644
--- a/media/libaudiohal/impl/StreamHalHidl.h
+++ b/media/libaudiohal/impl/StreamHalHidl.h
@@ -19,30 +19,29 @@
#include <atomic>
-#include PATH(android/hardware/audio/FILE_VERSION/IStream.h)
-#include PATH(android/hardware/audio/FILE_VERSION/IStreamIn.h)
+#include PATH(android/hardware/audio/CORE_TYPES_FILE_VERSION/IStream.h)
+#include PATH(android/hardware/audio/CORE_TYPES_FILE_VERSION/IStreamIn.h)
#include PATH(android/hardware/audio/FILE_VERSION/IStreamOut.h)
#include <fmq/EventFlag.h>
#include <fmq/MessageQueue.h>
+#include <media/audiohal/EffectHalInterface.h>
#include <media/audiohal/StreamHalInterface.h>
#include <mediautils/Synchronization.h>
#include "ConversionHelperHidl.h"
#include "StreamPowerLog.h"
-using ::android::hardware::audio::CPP_VERSION::IStream;
-using ::android::hardware::audio::CPP_VERSION::IStreamIn;
-using ::android::hardware::audio::CPP_VERSION::IStreamOut;
+using ::android::hardware::audio::CORE_TYPES_CPP_VERSION::IStream;
using ::android::hardware::EventFlag;
using ::android::hardware::MessageQueue;
using ::android::hardware::Return;
-using ReadParameters = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadParameters;
-using ReadStatus = ::android::hardware::audio::CPP_VERSION::IStreamIn::ReadStatus;
+using ReadParameters =
+ ::android::hardware::audio::CORE_TYPES_CPP_VERSION::IStreamIn::ReadParameters;
+using ReadStatus = ::android::hardware::audio::CORE_TYPES_CPP_VERSION::IStreamIn::ReadStatus;
using WriteCommand = ::android::hardware::audio::CPP_VERSION::IStreamOut::WriteCommand;
using WriteStatus = ::android::hardware::audio::CPP_VERSION::IStreamOut::WriteStatus;
namespace android {
-namespace CPP_VERSION {
class DeviceHalHidl;
@@ -71,7 +70,7 @@
// Put the audio hardware input/output into standby mode.
virtual status_t standby();
- virtual status_t dump(int fd);
+ virtual status_t dump(int fd, const Vector<String16>& args) override;
// Start a stream operating in mmap mode.
virtual status_t start();
@@ -191,6 +190,13 @@
// Methods used by StreamCodecFormatCallback (HIDL).
void onCodecFormatChanged(const std::basic_string<uint8_t>& metadataBs);
+ status_t setLatencyMode(audio_latency_mode_t mode) override;
+ status_t getRecommendedLatencyModes(std::vector<audio_latency_mode_t> *modes) override;
+ status_t setLatencyModeCallback(
+ const sp<StreamOutHalInterfaceLatencyModeCallback>& callback) override;
+
+ void onRecommendedLatencyModeChanged(const std::vector<audio_latency_mode_t>& modes);
+
private:
friend class DeviceHalHidl;
typedef MessageQueue<WriteCommand, hardware::kSynchronizedReadWrite> CommandMQ;
@@ -199,7 +205,9 @@
mediautils::atomic_wp<StreamOutHalInterfaceCallback> mCallback;
mediautils::atomic_wp<StreamOutHalInterfaceEventCallback> mEventCallback;
- const sp<IStreamOut> mStream;
+ mediautils::atomic_wp<StreamOutHalInterfaceLatencyModeCallback> mLatencyModeCallback;
+
+ const sp<::android::hardware::audio::CPP_VERSION::IStreamOut> mStream;
std::unique_ptr<CommandMQ> mCommandMQ;
std::unique_ptr<DataMQ> mDataMQ;
std::unique_ptr<StatusMQ> mStatusMQ;
@@ -207,7 +215,7 @@
EventFlag* mEfGroup;
// Can not be constructed directly by clients.
- StreamOutHalHidl(const sp<IStreamOut>& stream);
+ StreamOutHalHidl(const sp<::android::hardware::audio::CPP_VERSION::IStreamOut>& stream);
virtual ~StreamOutHalHidl();
@@ -255,7 +263,7 @@
typedef MessageQueue<uint8_t, hardware::kSynchronizedReadWrite> DataMQ;
typedef MessageQueue<ReadStatus, hardware::kSynchronizedReadWrite> StatusMQ;
- const sp<IStreamIn> mStream;
+ const sp<::android::hardware::audio::CORE_TYPES_CPP_VERSION::IStreamIn> mStream;
std::unique_ptr<CommandMQ> mCommandMQ;
std::unique_ptr<DataMQ> mDataMQ;
std::unique_ptr<StatusMQ> mStatusMQ;
@@ -263,7 +271,8 @@
EventFlag* mEfGroup;
// Can not be constructed directly by clients.
- StreamInHalHidl(const sp<IStreamIn>& stream);
+ StreamInHalHidl(
+ const sp<::android::hardware::audio::CORE_TYPES_CPP_VERSION::IStreamIn>& stream);
virtual ~StreamInHalHidl();
@@ -273,7 +282,6 @@
status_t prepareForReading(size_t bufferSize);
};
-} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_STREAM_HAL_HIDL_H
diff --git a/media/libaudiohal/impl/StreamHalLocal.cpp b/media/libaudiohal/impl/StreamHalLocal.cpp
index 34bd5df..477f510 100644
--- a/media/libaudiohal/impl/StreamHalLocal.cpp
+++ b/media/libaudiohal/impl/StreamHalLocal.cpp
@@ -27,7 +27,6 @@
#include "StreamHalLocal.h"
namespace android {
-namespace CPP_VERSION {
StreamHalLocal::StreamHalLocal(audio_stream_t *stream, sp<DeviceHalLocal> device)
: mDevice(device),
@@ -87,7 +86,8 @@
return mStream->standby(mStream);
}
-status_t StreamHalLocal::dump(int fd) {
+status_t StreamHalLocal::dump(int fd, const Vector<String16>& args) {
+ (void) args;
status_t status = mStream->dump(mStream, fd);
mStreamPowerLog.dump(fd);
return status;
@@ -517,7 +517,4 @@
}
#endif
-} // namespace CPP_VERSION
} // namespace android
-
-
diff --git a/media/libaudiohal/impl/StreamHalLocal.h b/media/libaudiohal/impl/StreamHalLocal.h
index b260495..770137f 100644
--- a/media/libaudiohal/impl/StreamHalLocal.h
+++ b/media/libaudiohal/impl/StreamHalLocal.h
@@ -21,7 +21,6 @@
#include "StreamPowerLog.h"
namespace android {
-namespace CPP_VERSION {
class DeviceHalLocal;
@@ -50,7 +49,7 @@
// Put the audio hardware input/output into standby mode.
virtual status_t standby();
- virtual status_t dump(int fd);
+ virtual status_t dump(int fd, const Vector<String16>& args) override;
// Start a stream operating in mmap mode.
virtual status_t start() = 0;
@@ -169,6 +168,18 @@
status_t setEventCallback(const sp<StreamOutHalInterfaceEventCallback>& callback) override;
+ status_t setLatencyMode(audio_latency_mode_t mode __unused) override {
+ return INVALID_OPERATION;
+ }
+ status_t getRecommendedLatencyModes(
+ std::vector<audio_latency_mode_t> *modes __unused) override {
+ return INVALID_OPERATION;
+ }
+ status_t setLatencyModeCallback(
+ const sp<StreamOutHalInterfaceLatencyModeCallback>& callback __unused) override {
+ return INVALID_OPERATION;
+ }
+
private:
audio_stream_out_t *mStream;
wp<StreamOutHalInterfaceCallback> mCallback;
@@ -246,7 +257,6 @@
void doUpdateSinkMetadataV7(const SinkMetadata& sinkMetadata);
};
-} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_STREAM_HAL_LOCAL_H
diff --git a/media/libaudiohal/impl/StreamPowerLog.h b/media/libaudiohal/impl/StreamPowerLog.h
index f6a554b..c08ee47 100644
--- a/media/libaudiohal/impl/StreamPowerLog.h
+++ b/media/libaudiohal/impl/StreamPowerLog.h
@@ -24,7 +24,6 @@
#include <system/audio.h>
namespace android {
-namespace CPP_VERSION {
class StreamPowerLog {
public:
@@ -99,7 +98,6 @@
size_t mFrameSize;
};
-} // namespace CPP_VERSION
} // namespace android
#endif // ANDROID_HARDWARE_STREAM_POWER_LOG_H
diff --git a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
index 29ef011..f0a0b29 100644
--- a/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/DeviceHalInterface.h
@@ -120,7 +120,10 @@
virtual status_t removeDeviceEffect(
audio_port_handle_t device, sp<EffectHalInterface> effect) = 0;
- virtual status_t dump(int fd) = 0;
+ // Update the connection status of an external device.
+ virtual status_t setConnectedState(const struct audio_port_v7 *port, bool connected) = 0;
+
+ virtual status_t dump(int fd, const Vector<String16>& args) = 0;
protected:
// Subclasses can not be constructed directly by clients.
diff --git a/media/libaudiohal/include/media/audiohal/EffectHalInterface.h b/media/libaudiohal/include/media/audiohal/EffectHalInterface.h
index 03165bd..2969c92 100644
--- a/media/libaudiohal/include/media/audiohal/EffectHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/EffectHalInterface.h
@@ -57,6 +57,9 @@
virtual status_t dump(int fd) = 0;
+ // Unique effect ID to use with the core HAL.
+ virtual uint64_t effectId() const = 0;
+
protected:
// Subclasses can not be constructed directly by clients.
EffectHalInterface() {}
diff --git a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
index 9fb56ae..3e505bd 100644
--- a/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/EffectsFactoryHalInterface.h
@@ -37,6 +37,9 @@
virtual status_t getDescriptor(const effect_uuid_t *pEffectUuid,
effect_descriptor_t *pDescriptor) = 0;
+ virtual status_t getDescriptors(const effect_uuid_t *pEffectType,
+ std::vector<effect_descriptor_t> *descriptors) = 0;
+
// Creates an effect engine of the specified type.
// To release the effect engine, it is necessary to release references
// to the returned effect object.
diff --git a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
index 2be12fb..e12fe77 100644
--- a/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
+++ b/media/libaudiohal/include/media/audiohal/StreamHalInterface.h
@@ -25,6 +25,7 @@
#include <utils/Errors.h>
#include <utils/RefBase.h>
#include <utils/String8.h>
+#include <utils/Vector.h>
namespace android {
@@ -69,7 +70,7 @@
// Put the audio hardware input/output into standby mode.
virtual status_t standby() = 0;
- virtual status_t dump(int fd) = 0;
+ virtual status_t dump(int fd, const Vector<String16>& args = {}) = 0;
// Start a stream operating in mmap mode.
virtual status_t start() = 0;
@@ -116,6 +117,18 @@
virtual ~StreamOutHalInterfaceEventCallback() {}
};
+class StreamOutHalInterfaceLatencyModeCallback : public virtual RefBase {
+public:
+ /**
+ * Called with the new list of supported latency modes when a change occurs.
+ */
+ virtual void onRecommendedLatencyModeChanged(std::vector<audio_latency_mode_t> modes) = 0;
+
+protected:
+ StreamOutHalInterfaceLatencyModeCallback() {}
+ virtual ~StreamOutHalInterfaceLatencyModeCallback() {}
+};
+
class StreamOutHalInterface : public virtual StreamHalInterface {
public:
// Return the audio hardware driver estimated latency in milliseconds.
@@ -193,6 +206,42 @@
virtual status_t setEventCallback(const sp<StreamOutHalInterfaceEventCallback>& callback) = 0;
+ /**
+ * Indicates the requested latency mode for this output stream.
+ *
+ * The requested mode can be one of the modes returned by
+ * getRecommendedLatencyModes() API.
+ *
+ * @param mode the requested latency mode.
+ * @return operation completion status.
+ */
+ virtual status_t setLatencyMode(audio_latency_mode_t mode) = 0;
+
+ /**
+ * Indicates which latency modes are currently supported on this output stream.
+ * If the transport protocol (e.g Bluetooth A2DP) used by this output stream to reach
+ * the output device supports variable latency modes, the HAL indicates which
+ * modes are currently supported.
+ * The framework can then call setLatencyMode() with one of the supported modes to select
+ * the desired operation mode.
+ *
+ * @param modes currrently supported latency modes.
+ * @return operation completion status.
+ */
+ virtual status_t getRecommendedLatencyModes(std::vector<audio_latency_mode_t> *modes) = 0;
+
+ /**
+ * Set the callback interface for notifying changes in supported latency modes.
+ *
+ * Calling this method with a null pointer will result in releasing
+ * the callback.
+ *
+ * @param callback the registered callback or null to unregister.
+ * @return operation completion status.
+ */
+ virtual status_t setLatencyModeCallback(
+ const sp<StreamOutHalInterfaceLatencyModeCallback>& callback) = 0;
+
protected:
virtual ~StreamOutHalInterface() {}
};
diff --git a/media/libaudioprocessing/AudioMixer.cpp b/media/libaudioprocessing/AudioMixer.cpp
index d85e2e9..e6fdb1d 100644
--- a/media/libaudioprocessing/AudioMixer.cpp
+++ b/media/libaudioprocessing/AudioMixer.cpp
@@ -108,15 +108,11 @@
if (track->mHapticChannelCount > 0) {
track->mAdjustInChannelCount = track->channelCount + track->mHapticChannelCount;
- track->mAdjustOutChannelCount = track->channelCount + track->mMixerHapticChannelCount;
- track->mAdjustNonDestructiveInChannelCount = track->mAdjustOutChannelCount;
- track->mAdjustNonDestructiveOutChannelCount = track->channelCount;
+ track->mAdjustOutChannelCount = track->channelCount;
track->mKeepContractedChannels = track->mHapticPlaybackEnabled;
} else {
track->mAdjustInChannelCount = 0;
track->mAdjustOutChannelCount = 0;
- track->mAdjustNonDestructiveInChannelCount = 0;
- track->mAdjustNonDestructiveOutChannelCount = 0;
track->mKeepContractedChannels = false;
}
@@ -131,8 +127,7 @@
// do it after downmix since track format may change!
track->prepareForReformat();
- track->prepareForAdjustChannelsNonDestructive(mFrameCount);
- track->prepareForAdjustChannels();
+ track->prepareForAdjustChannels(mFrameCount);
// Resampler channels may have changed.
track->recreateResampler(mSampleRate);
@@ -193,6 +188,24 @@
// mDownmixerBufferProvider reset below.
}
+ // See if we should use our built-in non-effect downmixer.
+ if (mMixerInFormat == AUDIO_FORMAT_PCM_FLOAT
+ && mMixerChannelMask == AUDIO_CHANNEL_OUT_STEREO
+ && audio_channel_mask_get_representation(channelMask)
+ == AUDIO_CHANNEL_REPRESENTATION_POSITION) {
+ mDownmixerBufferProvider.reset(new ChannelMixBufferProvider(channelMask,
+ mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount));
+ if (static_cast<ChannelMixBufferProvider *>(mDownmixerBufferProvider.get())
+ ->isValid()) {
+ mDownmixRequiresFormat = mMixerInFormat;
+ reconfigureBufferProviders();
+ ALOGD("%s: Fallback using ChannelMix", __func__);
+ return NO_ERROR;
+ } else {
+ ALOGD("%s: ChannelMix not supported for channel mask %#x", __func__, channelMask);
+ }
+ }
+
// Effect downmixer does not accept the channel conversion. Let's use our remixer.
mDownmixerBufferProvider.reset(new RemixBufferProvider(channelMask,
mMixerChannelMask, mMixerInFormat, kCopyBufferFrameCount));
@@ -265,48 +278,20 @@
}
}
-status_t AudioMixer::Track::prepareForAdjustChannels()
+status_t AudioMixer::Track::prepareForAdjustChannels(size_t frames)
{
ALOGV("AudioMixer::prepareForAdjustChannels(%p) with inChannelCount: %u, outChannelCount: %u",
this, mAdjustInChannelCount, mAdjustOutChannelCount);
unprepareForAdjustChannels();
if (mAdjustInChannelCount != mAdjustOutChannelCount) {
- mAdjustChannelsBufferProvider.reset(new AdjustChannelsBufferProvider(
- mFormat, mAdjustInChannelCount, mAdjustOutChannelCount, kCopyBufferFrameCount));
- reconfigureBufferProviders();
- }
- return NO_ERROR;
-}
-
-void AudioMixer::Track::unprepareForAdjustChannelsNonDestructive()
-{
- ALOGV("AUDIOMIXER::unprepareForAdjustChannelsNonDestructive");
- if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
- mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
- reconfigureBufferProviders();
- }
-}
-
-status_t AudioMixer::Track::prepareForAdjustChannelsNonDestructive(size_t frames)
-{
- ALOGV("AudioMixer::prepareForAdjustChannelsNonDestructive(%p) with inChannelCount: %u, "
- "outChannelCount: %u, keepContractedChannels: %d",
- this, mAdjustNonDestructiveInChannelCount, mAdjustNonDestructiveOutChannelCount,
- mKeepContractedChannels);
- unprepareForAdjustChannelsNonDestructive();
- if (mAdjustNonDestructiveInChannelCount != mAdjustNonDestructiveOutChannelCount) {
uint8_t* buffer = mKeepContractedChannels
? (uint8_t*)mainBuffer + frames * audio_bytes_per_frame(
mMixerChannelCount, mMixerFormat)
- : NULL;
- mContractChannelsNonDestructiveBufferProvider.reset(
- new AdjustChannelsBufferProvider(
- mFormat,
- mAdjustNonDestructiveInChannelCount,
- mAdjustNonDestructiveOutChannelCount,
- frames,
- mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
- buffer));
+ : nullptr;
+ mAdjustChannelsBufferProvider.reset(new AdjustChannelsBufferProvider(
+ mFormat, mAdjustInChannelCount, mAdjustOutChannelCount, frames,
+ mKeepContractedChannels ? mMixerFormat : AUDIO_FORMAT_INVALID,
+ buffer, mMixerHapticChannelCount));
reconfigureBufferProviders();
}
return NO_ERROR;
@@ -314,9 +299,9 @@
void AudioMixer::Track::clearContractedBuffer()
{
- if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
+ if (mAdjustChannelsBufferProvider.get() != nullptr) {
static_cast<AdjustChannelsBufferProvider*>(
- mContractChannelsNonDestructiveBufferProvider.get())->clearContractedFrames();
+ mAdjustChannelsBufferProvider.get())->clearContractedFrames();
}
}
@@ -328,10 +313,6 @@
mAdjustChannelsBufferProvider->setBufferProvider(bufferProvider);
bufferProvider = mAdjustChannelsBufferProvider.get();
}
- if (mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
- mContractChannelsNonDestructiveBufferProvider->setBufferProvider(bufferProvider);
- bufferProvider = mContractChannelsNonDestructiveBufferProvider.get();
- }
if (mReformatBufferProvider.get() != nullptr) {
mReformatBufferProvider->setBufferProvider(bufferProvider);
bufferProvider = mReformatBufferProvider.get();
@@ -377,7 +358,7 @@
track->mainBuffer = valueBuf;
ALOGV("setParameter(TRACK, MAIN_BUFFER, %p)", valueBuf);
if (track->mKeepContractedChannels) {
- track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+ track->prepareForAdjustChannels(mFrameCount);
}
invalidate();
}
@@ -405,7 +386,7 @@
track->mMixerFormat = format;
ALOGV("setParameter(TRACK, MIXER_FORMAT, %#x)", format);
if (track->mKeepContractedChannels) {
- track->prepareForAdjustChannelsNonDestructive(mFrameCount);
+ track->prepareForAdjustChannels(mFrameCount);
}
}
} break;
@@ -424,8 +405,7 @@
if (track->mHapticPlaybackEnabled != hapticPlaybackEnabled) {
track->mHapticPlaybackEnabled = hapticPlaybackEnabled;
track->mKeepContractedChannels = hapticPlaybackEnabled;
- track->prepareForAdjustChannelsNonDestructive(mFrameCount);
- track->prepareForAdjustChannels();
+ track->prepareForAdjustChannels(mFrameCount);
}
} break;
case HAPTIC_INTENSITY: {
@@ -434,6 +414,12 @@
track->mHapticIntensity = hapticIntensity;
}
} break;
+ case HAPTIC_MAX_AMPLITUDE: {
+ const float hapticMaxAmplitude = *reinterpret_cast<float*>(value);
+ if (track->mHapticMaxAmplitude != hapticMaxAmplitude) {
+ track->mHapticMaxAmplitude = hapticMaxAmplitude;
+ }
+ } break;
default:
LOG_ALWAYS_FATAL("setParameter track: bad param %d", param);
}
@@ -512,8 +498,6 @@
track->mDownmixerBufferProvider->reset();
} else if (track->mReformatBufferProvider.get() != nullptr) {
track->mReformatBufferProvider->reset();
- } else if (track->mContractChannelsNonDestructiveBufferProvider.get() != nullptr) {
- track->mContractChannelsNonDestructiveBufferProvider->reset();
} else if (track->mAdjustChannelsBufferProvider.get() != nullptr) {
track->mAdjustChannelsBufferProvider->reset();
}
@@ -553,12 +537,11 @@
// haptic
t->mHapticPlaybackEnabled = false;
t->mHapticIntensity = os::HapticScale::NONE;
+ t->mHapticMaxAmplitude = NAN;
t->mMixerHapticChannelMask = AUDIO_CHANNEL_NONE;
t->mMixerHapticChannelCount = 0;
t->mAdjustInChannelCount = t->channelCount + t->mHapticChannelCount;
- t->mAdjustOutChannelCount = t->channelCount + t->mMixerHapticChannelCount;
- t->mAdjustNonDestructiveInChannelCount = t->mAdjustOutChannelCount;
- t->mAdjustNonDestructiveOutChannelCount = t->channelCount;
+ t->mAdjustOutChannelCount = t->channelCount;
t->mKeepContractedChannels = false;
// Check the downmixing (or upmixing) requirements.
status_t status = t->prepareForDownmix();
@@ -569,8 +552,7 @@
// prepareForDownmix() may change mDownmixRequiresFormat
ALOGVV("mMixerFormat:%#x mMixerInFormat:%#x\n", t->mMixerFormat, t->mMixerInFormat);
t->prepareForReformat();
- t->prepareForAdjustChannelsNonDestructive(mFrameCount);
- t->prepareForAdjustChannels();
+ t->prepareForAdjustChannels(mFrameCount);
return OK;
}
@@ -602,7 +584,8 @@
switch (t->mMixerFormat) {
// Mixer format should be AUDIO_FORMAT_PCM_FLOAT.
case AUDIO_FORMAT_PCM_FLOAT: {
- os::scaleHapticData((float*) buffer, sampleCount, t->mHapticIntensity);
+ os::scaleHapticData((float*) buffer, sampleCount, t->mHapticIntensity,
+ t->mHapticMaxAmplitude);
} break;
default:
LOG_ALWAYS_FATAL("bad mMixerFormat: %#x", t->mMixerFormat);
diff --git a/media/libaudioprocessing/AudioMixerOps.h b/media/libaudioprocessing/AudioMixerOps.h
index cd47dc6..ab6a8b6 100644
--- a/media/libaudioprocessing/AudioMixerOps.h
+++ b/media/libaudioprocessing/AudioMixerOps.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_AUDIO_MIXER_OPS_H
#define ANDROID_AUDIO_MIXER_OPS_H
+#include <audio_utils/channels.h>
+#include <audio_utils/primitives.h>
#include <system/audio.h>
namespace android {
@@ -229,15 +231,26 @@
* complexity of working on interleaved streams is now getting
* too high, and likely limits compiler optimization.
*/
-template <int MIXTYPE, int NCHAN,
+
+// compile-time function.
+constexpr inline bool usesCenterChannel(audio_channel_mask_t mask) {
+ using namespace audio_utils::channels;
+ for (size_t i = 0; i < std::size(kSideFromChannelIdx); ++i) {
+ if ((mask & (1 << i)) != 0 && kSideFromChannelIdx[i] == AUDIO_GEOMETRY_SIDE_CENTER) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/*
+ * Applies stereo volume to the audio data based on proper left right channel affinity
+ * (templated channel MASK parameter).
+ */
+template <int MIXTYPE, audio_channel_mask_t MASK,
typename TO, typename TI, typename TV,
typename F>
-void stereoVolumeHelper(TO*& out, const TI*& in, const TV *vol, F f) {
- static_assert(NCHAN > 0 && NCHAN <= FCC_LIMIT);
- static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
- || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
- || MIXTYPE == MIXTYPE_STEREOEXPAND
- || MIXTYPE == MIXTYPE_MONOEXPAND);
+void stereoVolumeHelperWithChannelMask(TO*& out, const TI*& in, const TV *vol, F f) {
auto proc = [](auto& a, const auto& b) {
if constexpr (MIXTYPE == MIXTYPE_MULTI_STEREOVOL
|| MIXTYPE == MIXTYPE_STEREOEXPAND
@@ -250,59 +263,113 @@
auto inp = [&in]() -> const TI& {
if constexpr (MIXTYPE == MIXTYPE_STEREOEXPAND
|| MIXTYPE == MIXTYPE_MONOEXPAND) {
- return *in;
+ return *in; // note STEREOEXPAND assumes replicated L/R channels (see doc below).
} else {
return *in++;
}
};
- // HALs should only expose the canonical channel masks.
- proc(*out++, f(inp(), vol[0])); // front left
- if constexpr (NCHAN == 1) return;
- proc(*out++, f(inp(), vol[1])); // front right
- if constexpr (NCHAN == 2) return;
- if constexpr (NCHAN == 4) {
- proc(*out++, f(inp(), vol[0])); // back left
- proc(*out++, f(inp(), vol[1])); // back right
- return;
- }
-
- // TODO: Precompute center volume if not ramping.
std::decay_t<TV> center;
- if constexpr (std::is_floating_point_v<TV>) {
- center = (vol[0] + vol[1]) * 0.5; // do not use divide
- } else {
- center = (vol[0] >> 1) + (vol[1] >> 1); // rounds to 0.
- }
- proc(*out++, f(inp(), center)); // center (or 2.1 LFE)
- if constexpr (NCHAN == 3) return;
- if constexpr (NCHAN == 5) {
- proc(*out++, f(inp(), vol[0])); // back left
- proc(*out++, f(inp(), vol[1])); // back right
- return;
- }
-
- proc(*out++, f(inp(), center)); // lfe
- proc(*out++, f(inp(), vol[0])); // back left
- proc(*out++, f(inp(), vol[1])); // back right
- if constexpr (NCHAN == 6) return;
- if constexpr (NCHAN == 7) {
- proc(*out++, f(inp(), center)); // back center
- return;
- }
- // NCHAN == 8
- proc(*out++, f(inp(), vol[0])); // side left
- proc(*out++, f(inp(), vol[1])); // side right
- if constexpr (NCHAN > FCC_8) {
- // Mutes to zero extended surround channels.
- // 7.1.4 has the correct behavior.
- // 22.2 has the behavior that FLC and FRC will be mixed instead
- // of SL and SR and LFE will be center, not left.
- for (int i = 8; i < NCHAN; ++i) {
- // TODO: Consider using android::audio_utils::channels::kSideFromChannelIdx
- proc(*out++, f(inp(), 0.f));
+ constexpr bool USES_CENTER_CHANNEL = usesCenterChannel(MASK);
+ if constexpr (USES_CENTER_CHANNEL) {
+ if constexpr (std::is_floating_point_v<TV>) {
+ center = (vol[0] + vol[1]) * 0.5; // do not use divide
+ } else {
+ center = (vol[0] >> 1) + (vol[1] >> 1); // rounds to 0.
}
}
+
+ using namespace audio_utils::channels;
+
+ // if LFE and LFE2 are both present, they take left and right volume respectively.
+ constexpr unsigned LFE_LFE2 = \
+ AUDIO_CHANNEL_OUT_LOW_FREQUENCY | AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2;
+ constexpr bool has_LFE_LFE2 = (MASK & LFE_LFE2) == LFE_LFE2;
+
+#pragma push_macro("DO_CHANNEL_POSITION")
+#undef DO_CHANNEL_POSITION
+#define DO_CHANNEL_POSITION(BIT_INDEX) \
+ if constexpr ((MASK & (1 << BIT_INDEX)) != 0) { \
+ constexpr auto side = kSideFromChannelIdx[BIT_INDEX]; \
+ if constexpr (side == AUDIO_GEOMETRY_SIDE_LEFT || \
+ has_LFE_LFE2 && (1 << BIT_INDEX) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) { \
+ proc(*out++, f(inp(), vol[0])); \
+ } else if constexpr (side == AUDIO_GEOMETRY_SIDE_RIGHT || \
+ has_LFE_LFE2 && (1 << BIT_INDEX) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) { \
+ proc(*out++, f(inp(), vol[1])); \
+ } else /* constexpr */ { \
+ proc(*out++, f(inp(), center)); \
+ } \
+ }
+
+ DO_CHANNEL_POSITION(0);
+ DO_CHANNEL_POSITION(1);
+ DO_CHANNEL_POSITION(2);
+ DO_CHANNEL_POSITION(3);
+ DO_CHANNEL_POSITION(4);
+ DO_CHANNEL_POSITION(5);
+ DO_CHANNEL_POSITION(6);
+ DO_CHANNEL_POSITION(7);
+
+ DO_CHANNEL_POSITION(8);
+ DO_CHANNEL_POSITION(9);
+ DO_CHANNEL_POSITION(10);
+ DO_CHANNEL_POSITION(11);
+ DO_CHANNEL_POSITION(12);
+ DO_CHANNEL_POSITION(13);
+ DO_CHANNEL_POSITION(14);
+ DO_CHANNEL_POSITION(15);
+
+ DO_CHANNEL_POSITION(16);
+ DO_CHANNEL_POSITION(17);
+ DO_CHANNEL_POSITION(18);
+ DO_CHANNEL_POSITION(19);
+ DO_CHANNEL_POSITION(20);
+ DO_CHANNEL_POSITION(21);
+ DO_CHANNEL_POSITION(22);
+ DO_CHANNEL_POSITION(23);
+ DO_CHANNEL_POSITION(24);
+ DO_CHANNEL_POSITION(25);
+ static_assert(FCC_LIMIT <= FCC_26); // Note: this may need to change.
+#pragma pop_macro("DO_CHANNEL_POSITION")
+}
+
+// These are the channel position masks we expect from the HAL.
+// See audio_channel_out_mask_from_count() but this is constexpr
+constexpr inline audio_channel_mask_t canonicalChannelMaskFromCount(size_t channelCount) {
+ constexpr audio_channel_mask_t canonical[] = {
+ [0] = AUDIO_CHANNEL_NONE,
+ [1] = AUDIO_CHANNEL_OUT_MONO,
+ [2] = AUDIO_CHANNEL_OUT_STEREO,
+ [3] = AUDIO_CHANNEL_OUT_2POINT1,
+ [4] = AUDIO_CHANNEL_OUT_QUAD,
+ [5] = AUDIO_CHANNEL_OUT_PENTA,
+ [6] = AUDIO_CHANNEL_OUT_5POINT1,
+ [7] = AUDIO_CHANNEL_OUT_6POINT1,
+ [8] = AUDIO_CHANNEL_OUT_7POINT1,
+ [12] = AUDIO_CHANNEL_OUT_7POINT1POINT4,
+ [14] = AUDIO_CHANNEL_OUT_9POINT1POINT4,
+ [16] = AUDIO_CHANNEL_OUT_9POINT1POINT6,
+ [24] = AUDIO_CHANNEL_OUT_22POINT2,
+ };
+ return channelCount < std::size(canonical) ? canonical[channelCount] : AUDIO_CHANNEL_NONE;
+}
+
+template <int MIXTYPE, int NCHAN,
+ typename TO, typename TI, typename TV,
+ typename F>
+void stereoVolumeHelper(TO*& out, const TI*& in, const TV *vol, F f) {
+ static_assert(NCHAN > 0 && NCHAN <= FCC_LIMIT);
+ static_assert(MIXTYPE == MIXTYPE_MULTI_STEREOVOL
+ || MIXTYPE == MIXTYPE_MULTI_SAVEONLY_STEREOVOL
+ || MIXTYPE == MIXTYPE_STEREOEXPAND
+ || MIXTYPE == MIXTYPE_MONOEXPAND);
+ constexpr audio_channel_mask_t MASK{canonicalChannelMaskFromCount(NCHAN)};
+ if constexpr (MASK == AUDIO_CHANNEL_NONE) {
+ ALOGE("%s: Invalid position count %d", __func__, NCHAN);
+ return; // not a valid system mask, ignore.
+ }
+ stereoVolumeHelperWithChannelMask<MIXTYPE, MASK, TO, TI, TV, F>(out, in, vol, f);
}
/*
diff --git a/media/libaudioprocessing/BufferProviders.cpp b/media/libaudioprocessing/BufferProviders.cpp
index 6d31c12..4658db8 100644
--- a/media/libaudioprocessing/BufferProviders.cpp
+++ b/media/libaudioprocessing/BufferProviders.cpp
@@ -364,6 +364,29 @@
src, mInputChannels, mIdxAry, mSampleSize, frames);
}
+ChannelMixBufferProvider::ChannelMixBufferProvider(audio_channel_mask_t inputChannelMask,
+ audio_channel_mask_t outputChannelMask, audio_format_t format,
+ size_t bufferFrameCount) :
+ CopyBufferProvider(
+ audio_bytes_per_sample(format)
+ * audio_channel_count_from_out_mask(inputChannelMask),
+ audio_bytes_per_sample(format)
+ * audio_channel_count_from_out_mask(outputChannelMask),
+ bufferFrameCount)
+{
+ ALOGV("ChannelMixBufferProvider(%p)(%#x, %#x, %#x)",
+ this, format, inputChannelMask, outputChannelMask);
+ if (outputChannelMask == AUDIO_CHANNEL_OUT_STEREO && format == AUDIO_FORMAT_PCM_FLOAT) {
+ mIsValid = mChannelMix.setInputChannelMask(inputChannelMask);
+ }
+}
+
+void ChannelMixBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
+{
+ mChannelMix.process(static_cast<const float *>(src), static_cast<float *>(dst),
+ frames, false /* accumulate */);
+}
+
ReformatBufferProvider::ReformatBufferProvider(int32_t channelCount,
audio_format_t inputFormat, audio_format_t outputFormat,
size_t bufferFrameCount) :
@@ -630,7 +653,8 @@
AdjustChannelsBufferProvider::AdjustChannelsBufferProvider(
audio_format_t format, size_t inChannelCount, size_t outChannelCount,
- size_t frameCount, audio_format_t contractedFormat, void* contractedBuffer) :
+ size_t frameCount, audio_format_t contractedFormat, void* contractedBuffer,
+ size_t contractedOutChannelCount) :
CopyBufferProvider(
audio_bytes_per_frame(inChannelCount, format),
audio_bytes_per_frame(std::max(inChannelCount, outChannelCount), format),
@@ -640,15 +664,22 @@
mOutChannelCount(outChannelCount),
mSampleSizeInBytes(audio_bytes_per_sample(format)),
mFrameCount(frameCount),
- mContractedChannelCount(inChannelCount - outChannelCount),
- mContractedFormat(contractedFormat),
+ mContractedFormat(inChannelCount > outChannelCount
+ ? contractedFormat : AUDIO_FORMAT_INVALID),
+ mContractedInChannelCount(inChannelCount > outChannelCount
+ ? inChannelCount - outChannelCount : 0),
+ mContractedOutChannelCount(contractedOutChannelCount),
+ mContractedSampleSizeInBytes(audio_bytes_per_sample(contractedFormat)),
+ mContractedInputFrameSize(mContractedInChannelCount * mContractedSampleSizeInBytes),
mContractedBuffer(contractedBuffer),
mContractedWrittenFrames(0)
{
- ALOGV("AdjustChannelsBufferProvider(%p)(%#x, %zu, %zu, %zu, %#x, %p)", this, format,
- inChannelCount, outChannelCount, frameCount, contractedFormat, contractedBuffer);
+ ALOGV("AdjustChannelsBufferProvider(%p)(%#x, %zu, %zu, %zu, %#x, %p, %zu)",
+ this, format, inChannelCount, outChannelCount, frameCount, contractedFormat,
+ contractedBuffer, contractedOutChannelCount);
if (mContractedFormat != AUDIO_FORMAT_INVALID && mInChannelCount > mOutChannelCount) {
- mContractedFrameSize = audio_bytes_per_frame(mContractedChannelCount, mContractedFormat);
+ mContractedOutputFrameSize =
+ audio_bytes_per_frame(mContractedOutChannelCount, mContractedFormat);
}
}
@@ -667,25 +698,39 @@
void AdjustChannelsBufferProvider::copyFrames(void *dst, const void *src, size_t frames)
{
- if (mInChannelCount > mOutChannelCount) {
- // For case multi to mono, adjust_channels has special logic that will mix first two input
- // channels into a single output channel. In that case, use adjust_channels_non_destructive
- // to keep only one channel data even when contracting to mono.
- adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount,
- mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
- if (mContractedFormat != AUDIO_FORMAT_INVALID
- && mContractedBuffer != nullptr) {
- const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+ // For case multi to mono, adjust_channels has special logic that will mix first two input
+ // channels into a single output channel. In that case, use adjust_channels_non_destructive
+ // to keep only one channel data even when contracting to mono.
+ adjust_channels_non_destructive(src, mInChannelCount, dst, mOutChannelCount,
+ mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
+ if (mContractedFormat != AUDIO_FORMAT_INVALID
+ && mContractedBuffer != nullptr) {
+ const size_t contractedIdx = frames * mOutChannelCount * mSampleSizeInBytes;
+ uint8_t* oriBuf = (uint8_t*) dst + contractedIdx;
+ uint8_t* buf = (uint8_t*) mContractedBuffer
+ + mContractedWrittenFrames * mContractedOutputFrameSize;
+ if (mContractedInChannelCount > mContractedOutChannelCount) {
+ // Adjust the channels first as the contracted buffer may not have enough
+ // space for the data.
+ // Use adjust_channels_non_destructive to avoid mix first two channels into one single
+ // output channel when it is multi to mono.
+ adjust_channels_non_destructive(
+ oriBuf, mContractedInChannelCount, oriBuf, mContractedOutChannelCount,
+ mSampleSizeInBytes, frames * mContractedInChannelCount * mSampleSizeInBytes);
memcpy_by_audio_format(
- (uint8_t*) mContractedBuffer + mContractedWrittenFrames * mContractedFrameSize,
- mContractedFormat, (uint8_t*) dst + contractedIdx, mFormat,
- mContractedChannelCount * frames);
- mContractedWrittenFrames += frames;
+ buf, mContractedFormat, oriBuf, mFormat, mContractedOutChannelCount * frames);
+ } else {
+ // Copy the data first as the dst buffer may not have enough space for extra channel.
+ memcpy_by_audio_format(
+ buf, mContractedFormat, oriBuf, mFormat, mContractedInChannelCount * frames);
+ // Note that if the contracted data is from MONO to MULTICHANNEL, the first 2 channels
+ // will be duplicated with the original single input channel and all the other channels
+ // will be 0-filled.
+ adjust_channels(
+ buf, mContractedInChannelCount, buf, mContractedOutChannelCount,
+ mContractedSampleSizeInBytes, mContractedInputFrameSize * frames);
}
- } else {
- // Prefer expanding data from the end of each audio frame.
- adjust_channels(src, mInChannelCount, dst, mOutChannelCount,
- mSampleSizeInBytes, frames * mInChannelCount * mSampleSizeInBytes);
+ mContractedWrittenFrames += frames;
}
}
diff --git a/media/libaudioprocessing/include/media/AudioMixer.h b/media/libaudioprocessing/include/media/AudioMixer.h
index 70eafe3..2993a60 100644
--- a/media/libaudioprocessing/include/media/AudioMixer.h
+++ b/media/libaudioprocessing/include/media/AudioMixer.h
@@ -50,6 +50,7 @@
// for haptic
HAPTIC_ENABLED = 0x4007, // Set haptic data from this track should be played or not.
HAPTIC_INTENSITY = 0x4008, // Set the intensity to play haptic data.
+ HAPTIC_MAX_AMPLITUDE = 0x4009, // Set the max amplitude allowed for haptic data.
// for target TIMESTRETCH
PLAYBACK_RATE = 0x4300, // Configure timestretch on this track name;
// parameter 'value' is a pointer to the new playback rate.
@@ -79,7 +80,6 @@
mPostDownmixReformatBufferProvider.reset(nullptr);
mDownmixerBufferProvider.reset(nullptr);
mReformatBufferProvider.reset(nullptr);
- mContractChannelsNonDestructiveBufferProvider.reset(nullptr);
mAdjustChannelsBufferProvider.reset(nullptr);
}
@@ -94,10 +94,8 @@
void unprepareForDownmix();
status_t prepareForReformat();
void unprepareForReformat();
- status_t prepareForAdjustChannels();
+ status_t prepareForAdjustChannels(size_t frames);
void unprepareForAdjustChannels();
- status_t prepareForAdjustChannelsNonDestructive(size_t frames);
- void unprepareForAdjustChannelsNonDestructive();
void clearContractedBuffer();
bool setPlaybackRate(const AudioPlaybackRate &playbackRate);
void reconfigureBufferProviders();
@@ -113,24 +111,18 @@
* 2) mAdjustChannelsBufferProvider: Expands or contracts sample data from one interleaved
* channel format to another. Expanded channels are filled with zeros and put at the end
* of each audio frame. Contracted channels are copied to the end of the buffer.
- * 3) mContractChannelsNonDestructiveBufferProvider: Non-destructively contract sample data.
- * This is currently using at audio-haptic coupled playback to separate audio and haptic
- * data. Contracted channels could be written to given buffer.
- * 4) mReformatBufferProvider: If not NULL, performs the audio reformat to
+ * 3) mReformatBufferProvider: If not NULL, performs the audio reformat to
* match either mMixerInFormat or mDownmixRequiresFormat, if the downmixer
* requires reformat. For example, it may convert floating point input to
* PCM_16_bit if that's required by the downmixer.
- * 5) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
+ * 4) mDownmixerBufferProvider: If not NULL, performs the channel remixing to match
* the number of channels required by the mixer sink.
- * 6) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
+ * 5) mPostDownmixReformatBufferProvider: If not NULL, performs reformatting from
* the downmixer requirements to the mixer engine input requirements.
- * 7) mTimestretchBufferProvider: Adds timestretching for playback rate
+ * 6) mTimestretchBufferProvider: Adds timestretching for playback rate
*/
AudioBufferProvider* mInputBufferProvider; // externally provided buffer provider.
- // TODO: combine mAdjustChannelsBufferProvider and
- // mContractChannelsNonDestructiveBufferProvider
std::unique_ptr<PassthruBufferProvider> mAdjustChannelsBufferProvider;
- std::unique_ptr<PassthruBufferProvider> mContractChannelsNonDestructiveBufferProvider;
std::unique_ptr<PassthruBufferProvider> mReformatBufferProvider;
std::unique_ptr<PassthruBufferProvider> mDownmixerBufferProvider;
std::unique_ptr<PassthruBufferProvider> mPostDownmixReformatBufferProvider;
@@ -145,14 +137,13 @@
// Haptic
bool mHapticPlaybackEnabled;
os::HapticScale mHapticIntensity;
+ float mHapticMaxAmplitude;
audio_channel_mask_t mHapticChannelMask;
uint32_t mHapticChannelCount;
audio_channel_mask_t mMixerHapticChannelMask;
uint32_t mMixerHapticChannelCount;
uint32_t mAdjustInChannelCount;
uint32_t mAdjustOutChannelCount;
- uint32_t mAdjustNonDestructiveInChannelCount;
- uint32_t mAdjustNonDestructiveOutChannelCount;
bool mKeepContractedChannels;
};
diff --git a/media/libaudioprocessing/include/media/BufferProviders.h b/media/libaudioprocessing/include/media/BufferProviders.h
index b038854..b3ab8a5 100644
--- a/media/libaudioprocessing/include/media/BufferProviders.h
+++ b/media/libaudioprocessing/include/media/BufferProviders.h
@@ -20,6 +20,7 @@
#include <stdint.h>
#include <sys/types.h>
+#include <audio_utils/ChannelMix.h>
#include <media/AudioBufferProvider.h>
#include <media/AudioResamplerPublic.h>
#include <system/audio.h>
@@ -129,6 +130,23 @@
static const int32_t SESSION_ID_INVALID_AND_IGNORED = -2;
};
+// ChannelMixBufferProvider derives from CopyBufferProvider to perform an
+// downmix to the proper channel count and mask.
+class ChannelMixBufferProvider : public CopyBufferProvider {
+public:
+ ChannelMixBufferProvider(audio_channel_mask_t inputChannelMask,
+ audio_channel_mask_t outputChannelMask, audio_format_t format,
+ size_t bufferFrameCount);
+
+ void copyFrames(void *dst, const void *src, size_t frames) override;
+
+ bool isValid() const { return mIsValid; }
+
+protected:
+ audio_utils::channels::ChannelMix mChannelMix;
+ bool mIsValid = false;
+};
+
// RemixBufferProvider derives from CopyBufferProvider to perform an
// upmix or downmix to the proper channel count and mask.
class RemixBufferProvider : public CopyBufferProvider {
@@ -223,17 +241,22 @@
// Extra expanded channels are filled with zeros and put at the end of each audio frame.
// Contracted channels are copied to the end of the output buffer(storage should be
// allocated appropriately).
-// Contracted channels could be written to output buffer.
+// Contracted channels could be written to output buffer and got adjusted. When the contracted
+// channels are adjusted in the contracted buffer, the input channel count will be calculated
+// as `inChannelCount - outChannelCount`. The output channel count is provided by caller, which
+// is `contractedOutChannelCount`. Currently, adjusting contracted channels is used for audio
+// coupled haptic playback. If the device supports two haptic channels while apps only provide
+// single haptic channel, the second haptic channel will be duplicated with the first haptic
+// channel's data. If the device supports single haptic channels while apps provide two haptic
+// channels, the second channel will be contracted.
class AdjustChannelsBufferProvider : public CopyBufferProvider {
public:
- AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
- size_t outChannelCount, size_t frameCount) : AdjustChannelsBufferProvider(
- format, inChannelCount, outChannelCount,
- frameCount, AUDIO_FORMAT_INVALID, nullptr) { }
// Contracted data is converted to contractedFormat and put into contractedBuffer.
AdjustChannelsBufferProvider(audio_format_t format, size_t inChannelCount,
- size_t outChannelCount, size_t frameCount, audio_format_t contractedFormat,
- void* contractedBuffer);
+ size_t outChannelCount, size_t frameCount,
+ audio_format_t contractedFormat = AUDIO_FORMAT_INVALID,
+ void* contractedBuffer = nullptr,
+ size_t contractedOutChannelCount = 0);
//Overrides
status_t getNextBuffer(Buffer* pBuffer) override;
void copyFrames(void *dst, const void *src, size_t frames) override;
@@ -247,11 +270,14 @@
const size_t mOutChannelCount;
const size_t mSampleSizeInBytes;
const size_t mFrameCount;
- const size_t mContractedChannelCount;
const audio_format_t mContractedFormat;
+ const size_t mContractedInChannelCount;
+ const size_t mContractedOutChannelCount;
+ const size_t mContractedSampleSizeInBytes;
+ const size_t mContractedInputFrameSize; // contracted input frame size
void *mContractedBuffer;
size_t mContractedWrittenFrames;
- size_t mContractedFrameSize;
+ size_t mContractedOutputFrameSize; // contracted output frame size
};
// ----------------------------------------------------------------------------
} // namespace android
diff --git a/media/libaudioprocessing/tests/Android.bp b/media/libaudioprocessing/tests/Android.bp
index 3856817..ad402db 100644
--- a/media/libaudioprocessing/tests/Android.bp
+++ b/media/libaudioprocessing/tests/Android.bp
@@ -76,6 +76,7 @@
//
cc_binary {
name: "mixerops_objdump",
+ header_libs: ["libaudioutils_headers"],
srcs: ["mixerops_objdump.cpp"],
}
@@ -84,6 +85,16 @@
//
cc_benchmark {
name: "mixerops_benchmark",
+ header_libs: ["libaudioutils_headers"],
srcs: ["mixerops_benchmark.cpp"],
static_libs: ["libgoogle-benchmark"],
}
+
+//
+// mixerops unit test
+//
+cc_test {
+ name: "mixerops_tests",
+ defaults: ["libaudioprocessing_test_defaults"],
+ srcs: ["mixerops_tests.cpp"],
+}
diff --git a/media/libaudioprocessing/tests/mixerops_benchmark.cpp b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
index 7a4c5c7..f866b1a 100644
--- a/media/libaudioprocessing/tests/mixerops_benchmark.cpp
+++ b/media/libaudioprocessing/tests/mixerops_benchmark.cpp
@@ -16,11 +16,9 @@
#include <inttypes.h>
#include <type_traits>
-#include "../../../../system/media/audio_utils/include/audio_utils/primitives.h"
#define LOG_ALWAYS_FATAL(...)
#include <../AudioMixerOps.h>
-
#include <benchmark/benchmark.h>
using namespace android;
diff --git a/media/libaudioprocessing/tests/mixerops_tests.cpp b/media/libaudioprocessing/tests/mixerops_tests.cpp
new file mode 100644
index 0000000..2500ba9
--- /dev/null
+++ b/media/libaudioprocessing/tests/mixerops_tests.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "mixerop_tests"
+#include <log/log.h>
+
+#include <inttypes.h>
+#include <type_traits>
+
+#include <../AudioMixerOps.h>
+#include <gtest/gtest.h>
+
+using namespace android;
+
+// Note: gtest templated tests require typenames, not integers.
+template <int MIXTYPE, int NCHAN>
+class MixerOpsBasicTest {
+public:
+ static void testStereoVolume() {
+ using namespace android::audio_utils::channels;
+
+ constexpr size_t FRAME_COUNT = 1000;
+ constexpr size_t SAMPLE_COUNT = FRAME_COUNT * NCHAN;
+
+ const float in[SAMPLE_COUNT] = {[0 ... (SAMPLE_COUNT - 1)] = 1.f};
+
+ AUDIO_GEOMETRY_SIDE sides[NCHAN];
+ size_t i = 0;
+ unsigned channel = canonicalChannelMaskFromCount(NCHAN);
+ constexpr unsigned LFE_LFE2 =
+ AUDIO_CHANNEL_OUT_LOW_FREQUENCY | AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2;
+ bool has_LFE_LFE2 = (channel & LFE_LFE2) == LFE_LFE2;
+ while (channel != 0) {
+ const int index = __builtin_ctz(channel);
+ if (has_LFE_LFE2 && (1 << index) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) {
+ sides[i++] = AUDIO_GEOMETRY_SIDE_LEFT; // special case
+ } else if (has_LFE_LFE2 && (1 << index) == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+ sides[i++] = AUDIO_GEOMETRY_SIDE_RIGHT; // special case
+ } else {
+ sides[i++] = sideFromChannelIdx(index);
+ }
+ channel &= ~(1 << index);
+ }
+
+ float vola[2] = {1.f, 0.f}; // left volume at max.
+ float out[SAMPLE_COUNT]{};
+ float aux[FRAME_COUNT]{};
+ float volaux = 0.5;
+ {
+ volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, vola, volaux);
+ const float *outp = out;
+ const float *auxp = aux;
+ const float left = vola[0];
+ const float center = (vola[0] + vola[1]) * 0.5;
+ const float right = vola[1];
+ for (size_t i = 0; i < FRAME_COUNT; ++i) {
+ for (size_t j = 0; j < NCHAN; ++j) {
+ const float audio = *outp++;
+ if (sides[j] == AUDIO_GEOMETRY_SIDE_LEFT) {
+ EXPECT_EQ(left, audio);
+ } else if (sides[j] == AUDIO_GEOMETRY_SIDE_CENTER) {
+ EXPECT_EQ(center, audio);
+ } else {
+ EXPECT_EQ(right, audio);
+ }
+ }
+ EXPECT_EQ(volaux, *auxp++); // works if all channels contain 1.f
+ }
+ }
+ float volb[2] = {0.f, 0.5f}; // right volume at half max.
+ {
+ // this accumulates into out, aux.
+ // float out[SAMPLE_COUNT]{};
+ // float aux[FRAME_COUNT]{};
+ volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, volb, volaux);
+ const float *outp = out;
+ const float *auxp = aux;
+ const float left = vola[0] + volb[0];
+ const float center = (vola[0] + vola[1] + volb[0] + volb[1]) * 0.5;
+ const float right = vola[1] + volb[1];
+ for (size_t i = 0; i < FRAME_COUNT; ++i) {
+ for (size_t j = 0; j < NCHAN; ++j) {
+ const float audio = *outp++;
+ if (sides[j] == AUDIO_GEOMETRY_SIDE_LEFT) {
+ EXPECT_EQ(left, audio);
+ } else if (sides[j] == AUDIO_GEOMETRY_SIDE_CENTER) {
+ EXPECT_EQ(center, audio);
+ } else {
+ EXPECT_EQ(right, audio);
+ }
+ }
+ // aux is accumulated so 2x the amplitude
+ EXPECT_EQ(volaux * 2.f, *auxp++); // works if all channels contain 1.f
+ }
+ }
+
+ { // test aux as derived from out.
+ // AUX channel is the weighted sum of all of the output channels prior to volume
+ // adjustment. We must set L and R to the same volume to allow computation
+ // of AUX from the output values.
+ const float volmono = 0.25f;
+ const float vollr[2] = {volmono, volmono}; // all the same.
+ float out[SAMPLE_COUNT]{};
+ float aux[FRAME_COUNT]{};
+ volumeMulti<MIXTYPE, NCHAN>(out, FRAME_COUNT, in, aux, vollr, volaux);
+ const float *outp = out;
+ const float *auxp = aux;
+ for (size_t i = 0; i < FRAME_COUNT; ++i) {
+ float accum = 0.f;
+ for (size_t j = 0; j < NCHAN; ++j) {
+ accum += *outp++;
+ }
+ EXPECT_EQ(accum / NCHAN * volaux / volmono, *auxp++);
+ }
+ }
+ }
+};
+
+TEST(mixerops, stereovolume_1) { // Note: mono not used for output sinks yet.
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 1>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_2) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 2>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_3) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 3>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_4) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 4>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_5) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 5>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_6) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 6>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_7) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 7>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_8) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 8>::testStereoVolume();
+}
+TEST(mixerops, stereovolume_12) {
+ if constexpr (FCC_LIMIT >= 12) { // NOTE: FCC_LIMIT is an enum, so can't #if
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 12>::testStereoVolume();
+ }
+}
+TEST(mixerops, stereovolume_24) {
+ if constexpr (FCC_LIMIT >= 24) {
+ MixerOpsBasicTest<MIXTYPE_MULTI_STEREOVOL, 24>::testStereoVolume();
+ }
+}
+TEST(mixerops, channel_equivalence) {
+ // we must match the constexpr function with the system determined channel mask from count.
+ for (size_t i = 0; i < FCC_LIMIT; ++i) {
+ const audio_channel_mask_t actual = canonicalChannelMaskFromCount(i);
+ const audio_channel_mask_t system = audio_channel_out_mask_from_count(i);
+ if (system == AUDIO_CHANNEL_INVALID) continue;
+ EXPECT_EQ(system, actual);
+ }
+}
diff --git a/media/libeffects/downmix/EffectDownmix.cpp b/media/libeffects/downmix/EffectDownmix.cpp
index f500bc3..d8f5787 100644
--- a/media/libeffects/downmix/EffectDownmix.cpp
+++ b/media/libeffects/downmix/EffectDownmix.cpp
@@ -19,7 +19,7 @@
#include <log/log.h>
#include "EffectDownmix.h"
-#include <math.h>
+#include <audio_utils/ChannelMix.h>
// Do not submit with DOWNMIX_TEST_CHANNEL_INDEX defined, strictly for testing
//#define DOWNMIX_TEST_CHANNEL_INDEX 0
@@ -35,12 +35,13 @@
} downmix_state_t;
/* parameters for each downmixer */
-typedef struct {
+struct downmix_object_t {
downmix_state_t state;
downmix_type_t type;
bool apply_volume_correction;
uint8_t input_channel_count;
-} downmix_object_t;
+ android::audio_utils::channels::ChannelMix channelMix;
+};
typedef struct downmix_module_s {
const struct effect_interface_s *itfe;
@@ -77,11 +78,6 @@
downmix_object_t *pDownmixer, int32_t param, uint32_t size, void *pValue);
static int Downmix_getParameter(
downmix_object_t *pDownmixer, int32_t param, uint32_t *pSize, void *pValue);
-static void Downmix_foldFromQuad(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static void Downmix_foldFrom5Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static void Downmix_foldFrom7Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate);
-static bool Downmix_foldGeneric(
- uint32_t mask, float *pSrc, float *pDst, size_t numFrames, bool accumulate);
// effect_handle_t interface implementation for downmix effect
const struct effect_interface_s gDownmixInterface = {
@@ -192,9 +188,11 @@
if (!mask) {
return false;
}
- // check against unsupported channels
- if (mask & ~AUDIO_CHANNEL_OUT_22POINT2) {
- ALOGE("Unsupported channels in %u", mask & ~AUDIO_CHANNEL_OUT_22POINT2);
+ // check against unsupported channels (up to FCC_26)
+ constexpr uint32_t MAXIMUM_CHANNEL_MASK = AUDIO_CHANNEL_OUT_22POINT2
+ | AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT | AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT;
+ if (mask & ~MAXIMUM_CHANNEL_MASK) {
+ ALOGE("Unsupported channels in %#x", mask & ~MAXIMUM_CHANNEL_MASK);
return false;
}
return true;
@@ -315,7 +313,8 @@
audio_buffer_t *inBuffer, audio_buffer_t *outBuffer) {
downmix_object_t *pDownmixer;
- float *pSrc, *pDst;
+ const float *pSrc;
+ float *pDst;
downmix_module_t *pDwmModule = (downmix_module_t *)self;
if (pDwmModule == NULL) {
@@ -344,7 +343,8 @@
const bool accumulate =
(pDwmModule->config.outputCfg.accessMode == EFFECT_BUFFER_ACCESS_ACCUMULATE);
- const uint32_t downmixInputChannelMask = pDwmModule->config.inputCfg.channels;
+ const audio_channel_mask_t downmixInputChannelMask =
+ (audio_channel_mask_t)pDwmModule->config.inputCfg.channels;
switch(pDownmixer->type) {
@@ -368,38 +368,13 @@
}
break;
- case DOWNMIX_TYPE_FOLD:
-#ifdef DOWNMIX_ALWAYS_USE_GENERIC_DOWNMIXER
- // bypass the optimized downmix routines for the common formats
- if (!Downmix_foldGeneric(
- downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
- ALOGE("Multichannel configuration %#x is not supported",
- downmixInputChannelMask);
- return -EINVAL;
- }
- break;
-#endif
- // optimize for the common formats
- switch (downmixInputChannelMask) {
- case AUDIO_CHANNEL_OUT_QUAD_BACK:
- case AUDIO_CHANNEL_OUT_QUAD_SIDE:
- Downmix_foldFromQuad(pSrc, pDst, numFrames, accumulate);
- break;
- case AUDIO_CHANNEL_OUT_5POINT1_BACK:
- case AUDIO_CHANNEL_OUT_5POINT1_SIDE:
- Downmix_foldFrom5Point1(pSrc, pDst, numFrames, accumulate);
- break;
- case AUDIO_CHANNEL_OUT_7POINT1:
- Downmix_foldFrom7Point1(pSrc, pDst, numFrames, accumulate);
- break;
- default:
- if (!Downmix_foldGeneric(
- downmixInputChannelMask, pSrc, pDst, numFrames, accumulate)) {
+ case DOWNMIX_TYPE_FOLD: {
+ if (!pDownmixer->channelMix.process(
+ pSrc, pDst, numFrames, accumulate, downmixInputChannelMask)) {
ALOGE("Multichannel configuration %#x is not supported",
downmixInputChannelMask);
return -EINVAL;
}
- break;
}
break;
@@ -674,6 +649,12 @@
ALOGE("Downmix_Configure error: invalid config");
return -EINVAL;
}
+ // when configuring the effect, do not allow a blank or unsupported channel mask
+ if (!Downmix_validChannelMask(pConfig->inputCfg.channels)) {
+ ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
+ pConfig->inputCfg.channels);
+ return -EINVAL;
+ }
if (&pDwmModule->config != pConfig) {
memcpy(&pDwmModule->config, pConfig, sizeof(effect_config_t));
@@ -684,12 +665,6 @@
pDownmixer->apply_volume_correction = false;
pDownmixer->input_channel_count = 8; // matches default input of AUDIO_CHANNEL_OUT_7POINT1
} else {
- // when configuring the effect, do not allow a blank or unsupported channel mask
- if (!Downmix_validChannelMask(pConfig->inputCfg.channels)) {
- ALOGE("Downmix_Configure error: input channel mask(0x%x) not supported",
- pConfig->inputCfg.channels);
- return -EINVAL;
- }
pDownmixer->input_channel_count =
audio_channel_count_from_out_mask(pConfig->inputCfg.channels);
}
@@ -780,7 +755,6 @@
return 0;
} /* end Downmix_setParameter */
-
/*----------------------------------------------------------------------------
* Downmix_getParameter()
*----------------------------------------------------------------------------
@@ -829,299 +803,3 @@
return 0;
} /* end Downmix_getParameter */
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFromQuad()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a quad signal to stereo
- *
- * Inputs:
- * pSrc quad audio samples to downmix
- * numFrames the number of quad frames to downmix
- * accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- * or overwrite pDst (when false)
- *
- * Outputs:
- * pDst downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFromQuad(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is RL
- // sample at index 3 is RR
- if (accumulate) {
- while (numFrames) {
- // FL + RL
- pDst[0] = clamp_float(pDst[0] + ((pSrc[0] + pSrc[2]) / 2.0f));
- // FR + RR
- pDst[1] = clamp_float(pDst[1] + ((pSrc[1] + pSrc[3]) / 2.0f));
- pSrc += 4;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // FL + RL
- pDst[0] = clamp_float((pSrc[0] + pSrc[2]) / 2.0f);
- // FR + RR
- pDst[1] = clamp_float((pSrc[1] + pSrc[3]) / 2.0f);
- pSrc += 4;
- pDst += 2;
- numFrames--;
- }
- }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFrom5Point1()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a 5.1 signal to stereo
- *
- * Inputs:
- * pSrc 5.1 audio samples to downmix
- * numFrames the number of 5.1 frames to downmix
- * accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- * or overwrite pDst (when false)
- *
- * Outputs:
- * pDst downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFrom5Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
- float lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is FC
- // sample at index 3 is LFE
- // sample at index 4 is RL
- // sample at index 5 is RR
- // code is mostly duplicated between the two values of accumulate to avoid repeating the test
- // for every sample
- if (accumulate) {
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
- + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
- // FL + centerPlusLfeContrib + RL
- lt = pSrc[0] + centerPlusLfeContrib + pSrc[4];
- // FR + centerPlusLfeContrib + RR
- rt = pSrc[1] + centerPlusLfeContrib + pSrc[5];
- // accumulate in destination
- pDst[0] = clamp_float(pDst[0] + (lt / 2.0f));
- pDst[1] = clamp_float(pDst[1] + (rt / 2.0f));
- pSrc += 6;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
- + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
- // FL + centerPlusLfeContrib + RL
- lt = pSrc[0] + centerPlusLfeContrib + pSrc[4];
- // FR + centerPlusLfeContrib + RR
- rt = pSrc[1] + centerPlusLfeContrib + pSrc[5];
- // store in destination
- pDst[0] = clamp_float(lt / 2.0f); // differs from when accumulate is true above
- pDst[1] = clamp_float(rt / 2.0f); // differs from when accumulate is true above
- pSrc += 6;
- pDst += 2;
- numFrames--;
- }
- }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldFrom7Point1()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix a 7.1 signal to stereo
- *
- * Inputs:
- * pSrc 7.1 audio samples to downmix
- * numFrames the number of 7.1 frames to downmix
- * accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- * or overwrite pDst (when false)
- *
- * Outputs:
- * pDst downmixed stereo audio samples
- *
- *----------------------------------------------------------------------------
- */
-void Downmix_foldFrom7Point1(float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
- float lt, rt, centerPlusLfeContrib; // samples in Q19.12 format
- // sample at index 0 is FL
- // sample at index 1 is FR
- // sample at index 2 is FC
- // sample at index 3 is LFE
- // sample at index 4 is RL
- // sample at index 5 is RR
- // sample at index 6 is SL
- // sample at index 7 is SR
- // code is mostly duplicated between the two values of accumulate to avoid repeating the test
- // for every sample
- if (accumulate) {
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
- + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
- // FL + centerPlusLfeContrib + SL + RL
- lt = pSrc[0] + centerPlusLfeContrib + pSrc[6] + pSrc[4];
- // FR + centerPlusLfeContrib + SR + RR
- rt = pSrc[1] + centerPlusLfeContrib + pSrc[7] + pSrc[5];
- //accumulate in destination
- pDst[0] = clamp_float(pDst[0] + (lt / 2.0f));
- pDst[1] = clamp_float(pDst[1] + (rt / 2.0f));
- pSrc += 8;
- pDst += 2;
- numFrames--;
- }
- } else { // same code as above but without adding and clamping pDst[i] to itself
- while (numFrames) {
- // centerPlusLfeContrib = FC(-3dB) + LFE(-3dB)
- centerPlusLfeContrib = (pSrc[2] * MINUS_3_DB_IN_FLOAT)
- + (pSrc[3] * MINUS_3_DB_IN_FLOAT);
- // FL + centerPlusLfeContrib + SL + RL
- lt = pSrc[0] + centerPlusLfeContrib + pSrc[6] + pSrc[4];
- // FR + centerPlusLfeContrib + SR + RR
- rt = pSrc[1] + centerPlusLfeContrib + pSrc[7] + pSrc[5];
- // store in destination
- pDst[0] = clamp_float(lt / 2.0f); // differs from when accumulate is true above
- pDst[1] = clamp_float(rt / 2.0f); // differs from when accumulate is true above
- pSrc += 8;
- pDst += 2;
- numFrames--;
- }
- }
-}
-
-/*----------------------------------------------------------------------------
- * Downmix_foldGeneric()
- *----------------------------------------------------------------------------
- * Purpose:
- * downmix to stereo a multichannel signal of arbitrary channel position mask.
- *
- * Inputs:
- * mask the channel mask of pSrc
- * pSrc multichannel audio buffer to downmix
- * numFrames the number of multichannel frames to downmix
- * accumulate whether to mix (when true) the result of the downmix with the contents of pDst,
- * or overwrite pDst (when false)
- *
- * Outputs:
- * pDst downmixed stereo audio samples
- *
- * Returns: false if multichannel format is not supported
- *
- *----------------------------------------------------------------------------
- */
-bool Downmix_foldGeneric(
- uint32_t mask, float *pSrc, float *pDst, size_t numFrames, bool accumulate) {
-
- if (!Downmix_validChannelMask(mask)) {
- return false;
- }
- const int numChan = audio_channel_count_from_out_mask(mask);
-
- // compute at what index each channel is: samples will be in the following order:
- // FL FR FC LFE BL BR BC SL SR
- //
- // (transfer matrix)
- // FL FR FC LFE BL BR BC SL SR
- // 0.5 0.353 0.353 0.5 0.353 0.5
- // 0.5 0.353 0.353 0.5 0.353 0.5
-
- // derive the indices for the transfer matrix columns that have non-zero values.
- int indexFL = -1;
- int indexFR = -1;
- int indexFC = -1;
- int indexLFE = -1;
- int indexBL = -1;
- int indexBR = -1;
- int indexBC = -1;
- int indexSL = -1;
- int indexSR = -1;
- int index = 0;
- for (unsigned tmp = mask;
- (tmp & (AUDIO_CHANNEL_OUT_7POINT1 | AUDIO_CHANNEL_OUT_BACK_CENTER)) != 0;
- ++index) {
- const unsigned lowestBit = tmp & -(signed)tmp;
- switch (lowestBit) {
- case AUDIO_CHANNEL_OUT_FRONT_LEFT:
- indexFL = index;
- break;
- case AUDIO_CHANNEL_OUT_FRONT_RIGHT:
- indexFR = index;
- break;
- case AUDIO_CHANNEL_OUT_FRONT_CENTER:
- indexFC = index;
- break;
- case AUDIO_CHANNEL_OUT_LOW_FREQUENCY:
- indexLFE = index;
- break;
- case AUDIO_CHANNEL_OUT_BACK_LEFT:
- indexBL = index;
- break;
- case AUDIO_CHANNEL_OUT_BACK_RIGHT:
- indexBR = index;
- break;
- case AUDIO_CHANNEL_OUT_BACK_CENTER:
- indexBC = index;
- break;
- case AUDIO_CHANNEL_OUT_SIDE_LEFT:
- indexSL = index;
- break;
- case AUDIO_CHANNEL_OUT_SIDE_RIGHT:
- indexSR = index;
- break;
- }
- tmp ^= lowestBit;
- }
-
- // With good branch prediction, this should run reasonably fast.
- // Also consider using a transfer matrix form.
- while (numFrames) {
- // compute contribution of FC, BC and LFE
- float centersLfeContrib = 0;
- if (indexFC >= 0) centersLfeContrib = pSrc[indexFC];
- if (indexLFE >= 0) centersLfeContrib += pSrc[indexLFE];
- if (indexBC >= 0) centersLfeContrib += pSrc[indexBC];
- centersLfeContrib *= MINUS_3_DB_IN_FLOAT;
-
- float ch[2];
- ch[0] = centersLfeContrib;
- ch[1] = centersLfeContrib;
-
- // mix in left / right channels
- if (indexFL >= 0) ch[0] += pSrc[indexFL];
- if (indexFR >= 0) ch[1] += pSrc[indexFR];
-
- if (indexSL >= 0) ch[0] += pSrc[indexSL];
- if (indexSR >= 0) ch[1] += pSrc[indexSR]; // note pair checks enforce this if indexSL != 0
-
- if (indexBL >= 0) ch[0] += pSrc[indexBL];
- if (indexBR >= 0) ch[1] += pSrc[indexBR]; // note pair checks enforce this if indexBL != 0
-
- // scale to prevent overflow.
- ch[0] *= 0.5f;
- ch[1] *= 0.5f;
-
- if (accumulate) {
- ch[0] += pDst[0];
- ch[1] += pDst[1];
- }
-
- pDst[0] = clamp_float(ch[0]);
- pDst[1] = clamp_float(ch[1]);
- pSrc += numChan;
- pDst += 2;
- numFrames--;
- }
- return true;
-}
diff --git a/media/libeffects/downmix/benchmark/downmix_benchmark.cpp b/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
index ee169c2..d9d40ed 100644
--- a/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
+++ b/media/libeffects/downmix/benchmark/downmix_benchmark.cpp
@@ -35,16 +35,14 @@
AUDIO_CHANNEL_OUT_STEREO,
AUDIO_CHANNEL_OUT_2POINT1,
AUDIO_CHANNEL_OUT_2POINT0POINT2,
- AUDIO_CHANNEL_OUT_QUAD,
- AUDIO_CHANNEL_OUT_QUAD_BACK,
+ AUDIO_CHANNEL_OUT_QUAD, // AUDIO_CHANNEL_OUT_QUAD_BACK
AUDIO_CHANNEL_OUT_QUAD_SIDE,
AUDIO_CHANNEL_OUT_SURROUND,
AUDIO_CHANNEL_OUT_2POINT1POINT2,
AUDIO_CHANNEL_OUT_3POINT0POINT2,
AUDIO_CHANNEL_OUT_PENTA,
AUDIO_CHANNEL_OUT_3POINT1POINT2,
- AUDIO_CHANNEL_OUT_5POINT1,
- AUDIO_CHANNEL_OUT_5POINT1_BACK,
+ AUDIO_CHANNEL_OUT_5POINT1, // AUDIO_CHANNEL_OUT_5POINT1_BACK
AUDIO_CHANNEL_OUT_5POINT1_SIDE,
AUDIO_CHANNEL_OUT_6POINT1,
AUDIO_CHANNEL_OUT_5POINT1POINT2,
@@ -62,58 +60,34 @@
static constexpr size_t kFrameCount = 1000;
/*
-Pixel 3XL
-downmix_benchmark:
- #BM_Downmix/0 4723 ns 4708 ns 148694
- #BM_Downmix/1 4717 ns 4702 ns 148873
- #BM_Downmix/2 4803 ns 4788 ns 145893
- #BM_Downmix/3 5056 ns 5041 ns 139110
- #BM_Downmix/4 4710 ns 4696 ns 149625
- #BM_Downmix/5 1514 ns 1509 ns 463694
- #BM_Downmix/6 1513 ns 1509 ns 463451
- #BM_Downmix/7 1516 ns 1511 ns 463899
- #BM_Downmix/8 4445 ns 4431 ns 157831
- #BM_Downmix/9 5081 ns 5065 ns 138412
- #BM_Downmix/10 4354 ns 4341 ns 161247
- #BM_Downmix/11 4411 ns 4397 ns 158893
- #BM_Downmix/12 4434 ns 4420 ns 157992
- #BM_Downmix/13 4845 ns 4830 ns 144873
- #BM_Downmix/14 4851 ns 4835 ns 144954
- #BM_Downmix/15 4884 ns 4870 ns 144233
- #BM_Downmix/16 5832 ns 5813 ns 120565
- #BM_Downmix/17 5241 ns 5224 ns 133927
- #BM_Downmix/18 5044 ns 5028 ns 139131
- #BM_Downmix/19 5244 ns 5227 ns 132315
- #BM_Downmix/20 5943 ns 5923 ns 117759
- #BM_Downmix/21 5990 ns 5971 ns 117263
- #BM_Downmix/22 4468 ns 4454 ns 156689
- #BM_Downmix/23 7306 ns 7286 ns 95911
---
-downmix_benchmark: (generic fold)
- #BM_Downmix/0 4722 ns 4707 ns 149847
- #BM_Downmix/1 4714 ns 4698 ns 148748
- #BM_Downmix/2 4794 ns 4779 ns 145661
- #BM_Downmix/3 5053 ns 5035 ns 139172
- #BM_Downmix/4 4695 ns 4678 ns 149762
- #BM_Downmix/5 4381 ns 4368 ns 159675
- #BM_Downmix/6 4387 ns 4373 ns 160267
- #BM_Downmix/7 4732 ns 4717 ns 148514
- #BM_Downmix/8 4430 ns 4415 ns 158133
- #BM_Downmix/9 5101 ns 5084 ns 138353
- #BM_Downmix/10 4356 ns 4343 ns 160821
- #BM_Downmix/11 4397 ns 4383 ns 159995
- #BM_Downmix/12 4438 ns 4424 ns 158117
- #BM_Downmix/13 5243 ns 5226 ns 133863
- #BM_Downmix/14 5259 ns 5242 ns 131855
- #BM_Downmix/15 5245 ns 5228 ns 133686
- #BM_Downmix/16 5829 ns 5809 ns 120543
- #BM_Downmix/17 5245 ns 5228 ns 133533
- #BM_Downmix/18 5935 ns 5916 ns 118282
- #BM_Downmix/19 5263 ns 5245 ns 133657
- #BM_Downmix/20 5998 ns 5978 ns 114693
- #BM_Downmix/21 5989 ns 5969 ns 117450
- #BM_Downmix/22 4442 ns 4431 ns 157913
- #BM_Downmix/23 7309 ns 7290 ns 95797
+Pixel 4XL
+$ adb shell /data/benchmarktest/downmix_benchmark/vendor/downmix_benchmark
+
+--------------------------------------------------------
+Benchmark Time CPU Iterations
+--------------------------------------------------------
+BM_Downmix/0 3638 ns 3624 ns 197517 AUDIO_CHANNEL_OUT_MONO
+BM_Downmix/1 4040 ns 4024 ns 178766
+BM_Downmix/2 4759 ns 4740 ns 134741 AUDIO_CHANNEL_OUT_STEREO
+BM_Downmix/3 6042 ns 6017 ns 129546 AUDIO_CHANNEL_OUT_2POINT1
+BM_Downmix/4 6897 ns 6868 ns 96316 AUDIO_CHANNEL_OUT_2POINT0POINT2
+BM_Downmix/5 2117 ns 2109 ns 331705 AUDIO_CHANNEL_OUT_QUAD
+BM_Downmix/6 2097 ns 2088 ns 335421 AUDIO_CHANNEL_OUT_QUAD_SIDE
+BM_Downmix/7 7291 ns 7263 ns 96256 AUDIO_CHANNEL_OUT_SURROUND
+BM_Downmix/8 8246 ns 8206 ns 84318 AUDIO_CHANNEL_OUT_2POINT1POINT2
+BM_Downmix/9 8341 ns 8303 ns 84298 AUDIO_CHANNEL_OUT_3POINT0POINT2
+BM_Downmix/10 7549 ns 7517 ns 84293 AUDIO_CHANNEL_OUT_PENTA
+BM_Downmix/11 9395 ns 9354 ns 75209 AUDIO_CHANNEL_OUT_3POINT1POINT2
+BM_Downmix/12 3267 ns 3253 ns 215596 AUDIO_CHANNEL_OUT_5POINT1
+BM_Downmix/13 3178 ns 3163 ns 220132 AUDIO_CHANNEL_OUT_5POINT1_SIDE
+BM_Downmix/14 10245 ns 10199 ns 67486 AUDIO_CHANNEL_OUT_6POINT1
+BM_Downmix/15 10975 ns 10929 ns 61359 AUDIO_CHANNEL_OUT_5POINT1POINT2
+BM_Downmix/16 3796 ns 3780 ns 184728 AUDIO_CHANNEL_OUT_7POINT1
+BM_Downmix/17 13562 ns 13503 ns 51823 AUDIO_CHANNEL_OUT_5POINT1POINT4
+BM_Downmix/18 13573 ns 13516 ns 51800 AUDIO_CHANNEL_OUT_7POINT1POINT2
+BM_Downmix/19 15502 ns 15435 ns 47147 AUDIO_CHANNEL_OUT_7POINT1POINT4
+BM_Downmix/20 16693 ns 16624 ns 42109 AUDIO_CHANNEL_OUT_13POINT_360RA
+BM_Downmix/21 28267 ns 28116 ns 24982 AUDIO_CHANNEL_OUT_22POINT2
*/
static void BM_Downmix(benchmark::State& state) {
@@ -125,7 +99,7 @@
std::minstd_rand gen(channelMask);
std::uniform_real_distribution<> dis(-1.0f, 1.0f);
std::vector<float> input(kFrameCount * channelCount);
- std::vector<float> output(kFrameCount * 2);
+ std::vector<float> output(kFrameCount * FCC_2);
for (auto& in : input) {
in = dis(gen);
}
@@ -187,7 +161,8 @@
benchmark::ClobberMemory();
}
- state.SetComplexityN(state.range(0));
+ state.SetComplexityN(channelCount);
+ state.SetLabel(audio_channel_out_mask_to_string(channelMask));
if (int status = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(effectHandle); status != 0) {
ALOGE("release_effect returned an error = %d\n", status);
diff --git a/media/libeffects/downmix/tests/downmix_tests.cpp b/media/libeffects/downmix/tests/downmix_tests.cpp
index d4b7a3a..20e19a3 100644
--- a/media/libeffects/downmix/tests/downmix_tests.cpp
+++ b/media/libeffects/downmix/tests/downmix_tests.cpp
@@ -33,16 +33,14 @@
AUDIO_CHANNEL_OUT_STEREO,
AUDIO_CHANNEL_OUT_2POINT1,
AUDIO_CHANNEL_OUT_2POINT0POINT2,
- AUDIO_CHANNEL_OUT_QUAD,
- AUDIO_CHANNEL_OUT_QUAD_BACK,
+ AUDIO_CHANNEL_OUT_QUAD, // AUDIO_CHANNEL_OUT_QUAD_BACK
AUDIO_CHANNEL_OUT_QUAD_SIDE,
AUDIO_CHANNEL_OUT_SURROUND,
AUDIO_CHANNEL_OUT_2POINT1POINT2,
AUDIO_CHANNEL_OUT_3POINT0POINT2,
AUDIO_CHANNEL_OUT_PENTA,
AUDIO_CHANNEL_OUT_3POINT1POINT2,
- AUDIO_CHANNEL_OUT_5POINT1,
- AUDIO_CHANNEL_OUT_5POINT1_BACK,
+ AUDIO_CHANNEL_OUT_5POINT1, // AUDIO_CHANNEL_OUT_5POINT1_BACK
AUDIO_CHANNEL_OUT_5POINT1_SIDE,
AUDIO_CHANNEL_OUT_6POINT1,
AUDIO_CHANNEL_OUT_5POINT1POINT2,
@@ -52,10 +50,72 @@
AUDIO_CHANNEL_OUT_7POINT1POINT4,
AUDIO_CHANNEL_OUT_13POINT_360RA,
AUDIO_CHANNEL_OUT_22POINT2,
+ audio_channel_mask_t(AUDIO_CHANNEL_OUT_22POINT2
+ | AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT | AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT),
};
-static constexpr audio_channel_mask_t kConsideredChannels =
- (audio_channel_mask_t)(AUDIO_CHANNEL_OUT_7POINT1 | AUDIO_CHANNEL_OUT_BACK_CENTER);
+constexpr float COEF_25 = 0.2508909536f;
+constexpr float COEF_35 = 0.3543928915f;
+constexpr float COEF_36 = 0.3552343859f;
+constexpr float COEF_61 = 0.6057043428f;
+
+constexpr inline float kScaleFromChannelIdxLeft[] = {
+ 1.f, // AUDIO_CHANNEL_OUT_FRONT_LEFT = 0x1u,
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_RIGHT = 0x2u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_CENTER = 0x4u,
+ 0.5f, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY = 0x8u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BACK_LEFT = 0x10u,
+ 0.f, // AUDIO_CHANNEL_OUT_BACK_RIGHT = 0x20u,
+ COEF_61, // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40u,
+ COEF_25, // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+ 0.5f, // AUDIO_CHANNEL_OUT_BACK_CENTER = 0x100u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_SIDE_LEFT = 0x200u,
+ 0.f, // AUDIO_CHANNEL_OUT_SIDE_RIGHT = 0x400u,
+ COEF_36, // AUDIO_CHANNEL_OUT_TOP_CENTER = 0x800u,
+ 1.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT = 0x1000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER = 0x2000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT = 0x4000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT = 0x8000u,
+ COEF_35, // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER = 0x10000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT = 0x20000u,
+ COEF_61, // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT = 0x40000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT = 0x80000u,
+ 1.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT = 0x100000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER = 0x200000u,
+ 0.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT = 0x400000u,
+ 0.f, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2 = 0x800000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT = 0x1000000u,
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT = 0x2000000u,
+};
+
+constexpr inline float kScaleFromChannelIdxRight[] = {
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_LEFT = 0x1u,
+ 1.f, // AUDIO_CHANNEL_OUT_FRONT_RIGHT = 0x2u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_CENTER = 0x4u,
+ 0.5f, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY = 0x8u,
+ 0.f, // AUDIO_CHANNEL_OUT_BACK_LEFT = 0x10u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BACK_RIGHT = 0x20u,
+ COEF_25, // AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER = 0x40u,
+ COEF_61, // AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER = 0x80u,
+ 0.5f, // AUDIO_CHANNEL_OUT_BACK_CENTER = 0x100u,
+ 0.f, // AUDIO_CHANNEL_OUT_SIDE_LEFT = 0x200u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_SIDE_RIGHT = 0x400u,
+ COEF_36, // AUDIO_CHANNEL_OUT_TOP_CENTER = 0x800u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_LEFT = 0x1000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_FRONT_CENTER = 0x2000u,
+ 1.f, // AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT = 0x4000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_BACK_LEFT = 0x8000u,
+ COEF_35, // AUDIO_CHANNEL_OUT_TOP_BACK_CENTER = 0x10000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_TOP_BACK_RIGHT = 0x20000u,
+ 0.f, // AUDIO_CHANNEL_OUT_TOP_SIDE_LEFT = 0x40000u,
+ COEF_61, // AUDIO_CHANNEL_OUT_TOP_SIDE_RIGHT = 0x80000u,
+ 0.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_LEFT = 0x100000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_CENTER = 0x200000u,
+ 1.f, // AUDIO_CHANNEL_OUT_BOTTOM_FRONT_RIGHT = 0x400000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2 = 0x800000u,
+ 0.f, // AUDIO_CHANNEL_OUT_FRONT_WIDE_LEFT = 0x1000000u,
+ M_SQRT1_2, // AUDIO_CHANNEL_OUT_FRONT_WIDE_RIGHT = 0x2000000u,
+};
// Downmix doesn't change with sample rate
static constexpr size_t kSampleRates[] = {
@@ -93,8 +153,8 @@
void testBalance(int sampleRate, audio_channel_mask_t channelMask) {
using namespace ::android::audio_utils::channels;
- size_t frames = 100;
- unsigned outChannels = 2;
+ size_t frames = 100; // set to an even number (2, 4, 6 ... ) stream alternates +1, -1.
+ constexpr unsigned outChannels = 2;
unsigned inChannels = audio_channel_count_from_out_mask(channelMask);
std::vector<float> input(frames * inChannels);
std::vector<float> output(frames * outChannels);
@@ -102,7 +162,7 @@
double savedPower[32][2]{};
for (unsigned i = 0, channel = channelMask; channel != 0; ++i) {
const int index = __builtin_ctz(channel);
- ASSERT_LT(index, FCC_24);
+ ASSERT_LT(index, FCC_26);
const int pairIndex = pairIdxFromChannelIdx(index);
const AUDIO_GEOMETRY_SIDE side = sideFromChannelIdx(index);
const int channelBit = 1 << index;
@@ -119,7 +179,7 @@
auto stats = channelStatistics(output, 2 /* channels */);
// printf("power: %s %s\n", stats[0].toString().c_str(), stats[1].toString().c_str());
- double power[2] = { stats[0].getVariance(), stats[1].getVariance() };
+ double power[2] = { stats[0].getPopVariance(), stats[1].getPopVariance() };
// Check symmetric power for pair channels on exchange of left/right position.
// to do this, we save previous power measurements.
@@ -130,28 +190,39 @@
savedPower[index][0] = power[0];
savedPower[index][1] = power[1];
- // Confirm exactly the mix amount prescribed by the existing downmix effect.
- // For future changes to the downmix effect, the nearness needs to be relaxed
- // to compare behavior S or earlier.
- if ((channelBit & kConsideredChannels) == 0) {
- // for channels not considered, expect 0 power for legacy downmix
- EXPECT_EQ(0.f, power[0]);
- EXPECT_EQ(0.f, power[1]);
- continue;
- }
- constexpr float POWER_TOLERANCE = 0.01; // for variance sum error.
+ constexpr float POWER_TOLERANCE = 0.001;
+ const float expectedPower =
+ kScaleFromChannelIdxLeft[index] * kScaleFromChannelIdxLeft[index]
+ + kScaleFromChannelIdxRight[index] * kScaleFromChannelIdxRight[index];
+ EXPECT_NEAR(expectedPower, power[0] + power[1], POWER_TOLERANCE);
switch (side) {
case AUDIO_GEOMETRY_SIDE_LEFT:
- EXPECT_NEAR(0.25f, power[0], POWER_TOLERANCE);
+ if (channelBit == AUDIO_CHANNEL_OUT_FRONT_LEFT_OF_CENTER) {
+ break;
+ }
EXPECT_EQ(0.f, power[1]);
break;
case AUDIO_GEOMETRY_SIDE_RIGHT:
+ if (channelBit == AUDIO_CHANNEL_OUT_FRONT_RIGHT_OF_CENTER) {
+ break;
+ }
EXPECT_EQ(0.f, power[0]);
- EXPECT_NEAR(0.25f, power[1], POWER_TOLERANCE);
break;
case AUDIO_GEOMETRY_SIDE_CENTER:
- EXPECT_NEAR(0.125f, power[0], POWER_TOLERANCE);
- EXPECT_NEAR(0.125f, power[1], POWER_TOLERANCE);
+ if (channelBit == AUDIO_CHANNEL_OUT_LOW_FREQUENCY) {
+ if (channelMask & AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+ EXPECT_EQ(0.f, power[1]);
+ break;
+ } else {
+ EXPECT_NEAR_EPSILON(power[0], power[1]); // always true
+ EXPECT_NEAR(expectedPower, power[0] + power[1], POWER_TOLERANCE);
+ break;
+ }
+ } else if (channelBit == AUDIO_CHANNEL_OUT_LOW_FREQUENCY_2) {
+ EXPECT_EQ(0.f, power[0]);
+ EXPECT_NEAR(expectedPower, power[1], POWER_TOLERANCE);
+ break;
+ }
EXPECT_NEAR_EPSILON(power[0], power[1]);
break;
}
@@ -178,6 +249,7 @@
handle_, EFFECT_CMD_SET_CONFIG,
sizeof(effect_config_t), &config_, &replySize, &reply);
ASSERT_EQ(0, err);
+ ASSERT_EQ(0, reply);
err = (downmixApi->command)(
handle_, EFFECT_CMD_ENABLE,
0, nullptr, &replySize, &reply);
@@ -188,6 +260,27 @@
ASSERT_EQ(0, err);
}
+ // This test assumes the channel mask is invalid.
+ void testInvalidChannelMask(audio_channel_mask_t invalidChannelMask) {
+ reconfig(48000 /* sampleRate */, invalidChannelMask);
+ const int32_t sessionId = 0;
+ const int32_t ioId = 0;
+ int32_t err = AUDIO_EFFECT_LIBRARY_INFO_SYM.create_effect(
+ &downmix_uuid_, sessionId, ioId, &handle_);
+ ASSERT_EQ(0, err);
+
+ const struct effect_interface_s * const downmixApi = *handle_;
+ int32_t reply = 0;
+ uint32_t replySize = (uint32_t)sizeof(reply);
+ err = (downmixApi->command)(
+ handle_, EFFECT_CMD_SET_CONFIG,
+ sizeof(effect_config_t), &config_, &replySize, &reply);
+ ASSERT_EQ(0, err);
+ ASSERT_NE(0, reply); // error has occurred.
+ err = AUDIO_EFFECT_LIBRARY_INFO_SYM.release_effect(handle_);
+ ASSERT_EQ(0, err);
+ }
+
private:
void reconfig(int sampleRate, audio_channel_mask_t channelMask) {
config_.inputCfg.accessMode = EFFECT_BUFFER_ACCESS_READ;
@@ -234,6 +327,16 @@
int inputChannelCount_{};
};
+TEST(DownmixTestSimple, invalidChannelMask) {
+ // Fill in a dummy test method to use DownmixTest outside of a parameterized test.
+ class DownmixTestComplete : public DownmixTest {
+ void TestBody() override {}
+ } downmixtest;
+
+ constexpr auto INVALID_CHANNEL_MASK = audio_channel_mask_t(1 << 31);
+ downmixtest.testInvalidChannelMask(INVALID_CHANNEL_MASK);
+}
+
TEST_P(DownmixTest, basic) {
testBalance(kSampleRates[std::get<0>(GetParam())],
kChannelPositionMasks[std::get<1>(GetParam())]);
@@ -244,10 +347,11 @@
::testing::Combine(
::testing::Range(0, (int)std::size(kSampleRates)),
::testing::Range(0, (int)std::size(kChannelPositionMasks))
- ));
-
-int main(int argc, /* const */ char** argv) {
- ::testing::InitGoogleTest(&argc, argv);
- int status = RUN_ALL_TESTS();
- return status;
-}
+ ),
+ [](const testing::TestParamInfo<DownmixTest::ParamType>& info) {
+ const int index = std::get<1>(info.param);
+ const audio_channel_mask_t channelMask = kChannelPositionMasks[index];
+ const std::string name = std::string(audio_channel_out_mask_to_string(channelMask))
+ + "_" + std::to_string(std::get<0>(info.param)) + "_" + std::to_string(index);
+ return name;
+ });
diff --git a/media/libeffects/hapticgenerator/Android.bp b/media/libeffects/hapticgenerator/Android.bp
index a660957..03ce329 100644
--- a/media/libeffects/hapticgenerator/Android.bp
+++ b/media/libeffects/hapticgenerator/Android.bp
@@ -45,6 +45,7 @@
shared_libs: [
"libaudioutils",
+ "libbase",
"libbinder",
"liblog",
"libutils",
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
index 65a20a7..3137e13 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
@@ -22,12 +22,15 @@
#include <algorithm>
#include <memory>
+#include <string>
#include <utility>
#include <errno.h>
#include <inttypes.h>
#include <math.h>
+#include <android-base/parsedouble.h>
+#include <android-base/properties.h>
#include <audio_effects/effect_hapticgenerator.h>
#include <audio_utils/format.h>
#include <system/audio.h>
@@ -35,6 +38,7 @@
static constexpr float DEFAULT_RESONANT_FREQUENCY = 150.0f;
static constexpr float DEFAULT_BSF_ZERO_Q = 8.0f;
static constexpr float DEFAULT_BSF_POLE_Q = 4.0f;
+static constexpr float DEFAULT_DISTORTION_OUTPUT_GAIN = 1.5f;
// This is the only symbol that needs to be exported
__attribute__ ((visibility ("default")))
@@ -81,6 +85,15 @@
namespace {
+float getFloatProperty(const std::string& key, float defaultValue) {
+ float result;
+ std::string value = android::base::GetProperty(key, "");
+ if (!value.empty() && android::base::ParseFloat(value, &result)) {
+ return result;
+ }
+ return defaultValue;
+}
+
int HapticGenerator_Init(struct HapticGeneratorContext *context) {
context->itfe = &gHapticGeneratorInterface;
@@ -114,7 +127,9 @@
context->param.distortionCornerFrequency = 300.0f;
context->param.distortionInputGain = 0.3f;
context->param.distortionCubeThreshold = 0.1f;
- context->param.distortionOutputGain = 1.5f;
+ context->param.distortionOutputGain = getFloatProperty(
+ "vendor.audio.hapticgenerator.distortion.output.gain", DEFAULT_DISTORTION_OUTPUT_GAIN);
+ ALOGD("Using distortion output gain as %f", context->param.distortionOutputGain);
context->state = HAPTICGENERATOR_STATE_INITIALIZED;
return 0;
@@ -287,15 +302,17 @@
break;
}
case HG_PARAM_VIBRATOR_INFO: {
- if (value == nullptr || size != 2 * sizeof(float)) {
+ if (value == nullptr || size != 3 * sizeof(float)) {
return -EINVAL;
}
const float resonantFrequency = *(float*) value;
const float qFactor = *((float *) value + 1);
+ const float maxAmplitude = *((float *) value + 2);
context->param.resonantFrequency =
isnan(resonantFrequency) ? DEFAULT_RESONANT_FREQUENCY : resonantFrequency;
context->param.bsfZeroQ = isnan(qFactor) ? DEFAULT_BSF_POLE_Q : qFactor;
context->param.bsfPoleQ = context->param.bsfZeroQ / 2.0f;
+ context->param.maxHapticAmplitude = maxAmplitude;
if (context->processorsRecord.bpf != nullptr) {
context->processorsRecord.bpf->setCoefficients(
@@ -448,7 +465,8 @@
float* hapticOutBuffer = HapticGenerator_runProcessingChain(
context->processingChain, context->inputBuffer.data(),
context->outputBuffer.data(), inBuffer->frameCount);
- os::scaleHapticData(hapticOutBuffer, hapticSampleCount, context->param.maxHapticIntensity);
+ os::scaleHapticData(hapticOutBuffer, hapticSampleCount, context->param.maxHapticIntensity,
+ context->param.maxHapticAmplitude);
// For haptic data, the haptic playback thread will copy the data from effect input buffer,
// which contains haptic data at the end of the buffer, directly to sink buffer.
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.h b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
index 96b744a..85e961f 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.h
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.h
@@ -51,6 +51,7 @@
// A map from track id to haptic intensity.
std::map<int, os::HapticScale> id2Intensity;
os::HapticScale maxHapticIntensity; // max intensity will be used to scale haptic data.
+ float maxHapticAmplitude; // max amplitude will be used to limit haptic data absolute values.
float resonantFrequency;
float bpfQ;
diff --git a/media/libheadtracking/Android.bp b/media/libheadtracking/Android.bp
new file mode 100644
index 0000000..63b769e
--- /dev/null
+++ b/media/libheadtracking/Android.bp
@@ -0,0 +1,78 @@
+package {
+ // See: http://go/android-license-faq
+ // A large-scale-change added 'default_applicable_licenses' to import
+ // all of the 'license_kinds' from "frameworks_av_license"
+ // to get the below license kinds:
+ // SPDX-license-identifier-Apache-2.0
+ default_applicable_licenses: ["frameworks_av_license"],
+}
+
+cc_library {
+ name: "libheadtracking",
+ host_supported: true,
+ srcs: [
+ "HeadTrackingProcessor.cpp",
+ "ModeSelector.cpp",
+ "Pose.cpp",
+ "PoseDriftCompensator.cpp",
+ "PoseRateLimiter.cpp",
+ "QuaternionUtil.cpp",
+ "ScreenHeadFusion.cpp",
+ "Twist.cpp",
+ ],
+ export_include_dirs: [
+ "include",
+ ],
+ header_libs: [
+ "libeigen",
+ ],
+ export_header_lib_headers: [
+ "libeigen",
+ ],
+}
+
+cc_library {
+ name: "libheadtracking-binding",
+ srcs: [
+ "SensorPoseProvider.cpp",
+ ],
+ shared_libs: [
+ "libheadtracking",
+ "liblog",
+ "libsensor",
+ "libutils",
+ ],
+ export_shared_lib_headers: [
+ "libheadtracking",
+ ],
+}
+
+cc_binary {
+ name: "SensorPoseProvider-example",
+ srcs: [
+ "SensorPoseProvider-example.cpp",
+ ],
+ shared_libs: [
+ "libheadtracking",
+ "libheadtracking-binding",
+ "libsensor",
+ "libutils",
+ ],
+}
+
+cc_test_host {
+ name: "libheadtracking-test",
+ srcs: [
+ "HeadTrackingProcessor-test.cpp",
+ "ModeSelector-test.cpp",
+ "Pose-test.cpp",
+ "PoseDriftCompensator-test.cpp",
+ "PoseRateLimiter-test.cpp",
+ "QuaternionUtil-test.cpp",
+ "ScreenHeadFusion-test.cpp",
+ "Twist-test.cpp",
+ ],
+ shared_libs: [
+ "libheadtracking",
+ ],
+}
diff --git a/media/libheadtracking/HeadTrackingProcessor-test.cpp b/media/libheadtracking/HeadTrackingProcessor-test.cpp
new file mode 100644
index 0000000..299192f
--- /dev/null
+++ b/media/libheadtracking/HeadTrackingProcessor-test.cpp
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/HeadTrackingProcessor.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = HeadTrackingProcessor::Options;
+
+TEST(HeadTrackingProcessor, Initial) {
+ for (auto mode : {HeadTrackingMode::STATIC, HeadTrackingMode::WORLD_RELATIVE,
+ HeadTrackingMode::SCREEN_RELATIVE}) {
+ std::unique_ptr<HeadTrackingProcessor> processor =
+ createHeadTrackingProcessor(Options{}, mode);
+ processor->calculate(0);
+ EXPECT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+ }
+}
+
+TEST(HeadTrackingProcessor, BasicComposition) {
+ const Pose3f worldToHead{{1, 2, 3}, Quaternionf::UnitRandom()};
+ const Pose3f worldToScreen{{4, 5, 6}, Quaternionf::UnitRandom()};
+ const Pose3f screenToStage{{7, 8, 9}, Quaternionf::UnitRandom()};
+ const float physicalToLogical = M_PI_2;
+
+ std::unique_ptr<HeadTrackingProcessor> processor =
+ createHeadTrackingProcessor(Options{}, HeadTrackingMode::SCREEN_RELATIVE);
+
+ // Establish a baseline for the drift compensators.
+ processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+ processor->setWorldToScreenPose(0, Pose3f());
+
+ processor->setDisplayOrientation(physicalToLogical);
+ processor->setWorldToHeadPose(0, worldToHead, Twist3f());
+ processor->setWorldToScreenPose(0, worldToScreen);
+ processor->setScreenToStagePose(screenToStage);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::SCREEN_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(), worldToHead.inverse() * worldToScreen *
+ Pose3f(rotateY(-physicalToLogical)) *
+ screenToStage);
+
+ processor->setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::WORLD_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+
+ processor->setDesiredMode(HeadTrackingMode::STATIC);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+ EXPECT_EQ(processor->getHeadToStagePose(), screenToStage);
+}
+
+TEST(HeadTrackingProcessor, Prediction) {
+ const Pose3f worldToHead{{1, 2, 3}, Quaternionf::UnitRandom()};
+ const Twist3f headTwist{{4, 5, 6}, quaternionToRotationVector(Quaternionf::UnitRandom()) / 10};
+ const Pose3f worldToScreen{{4, 5, 6}, Quaternionf::UnitRandom()};
+
+ std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcessor(
+ Options{.predictionDuration = 2.f}, HeadTrackingMode::WORLD_RELATIVE);
+
+ // Establish a baseline for the drift compensators.
+ processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+ processor->setWorldToScreenPose(0, Pose3f());
+
+ processor->setWorldToHeadPose(0, worldToHead, headTwist);
+ processor->setWorldToScreenPose(0, worldToScreen);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::WORLD_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(), (worldToHead * integrate(headTwist, 2.f)).inverse());
+
+ processor->setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::SCREEN_RELATIVE);
+ EXPECT_EQ(processor->getHeadToStagePose(),
+ (worldToHead * integrate(headTwist, 2.f)).inverse() * worldToScreen);
+
+ processor->setDesiredMode(HeadTrackingMode::STATIC);
+ processor->calculate(0);
+ ASSERT_EQ(processor->getActualMode(), HeadTrackingMode::STATIC);
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+}
+
+TEST(HeadTrackingProcessor, SmoothModeSwitch) {
+ const Pose3f targetHeadToWorld = Pose3f({4, 0, 0}, rotateZ(M_PI / 2));
+
+ std::unique_ptr<HeadTrackingProcessor> processor = createHeadTrackingProcessor(
+ Options{.maxTranslationalVelocity = 1}, HeadTrackingMode::STATIC);
+
+ // Establish a baseline for the drift compensators.
+ processor->setWorldToHeadPose(0, Pose3f(), Twist3f());
+ processor->setWorldToScreenPose(0, Pose3f());
+
+ processor->calculate(0);
+
+ processor->setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ processor->setWorldToHeadPose(0, targetHeadToWorld.inverse(), Twist3f());
+
+ // We're expecting a gradual move to the target.
+ processor->calculate(0);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+
+ processor->calculate(2);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f({2, 0, 0}, rotateZ(M_PI / 4)));
+
+ processor->calculate(4);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), targetHeadToWorld);
+
+ // Now that we've reached the target, we should no longer be rate limiting.
+ processor->setWorldToHeadPose(4, Pose3f(), Twist3f());
+ processor->calculate(5);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, processor->getActualMode());
+ EXPECT_EQ(processor->getHeadToStagePose(), Pose3f());
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/HeadTrackingProcessor.cpp b/media/libheadtracking/HeadTrackingProcessor.cpp
new file mode 100644
index 0000000..47f7cf0
--- /dev/null
+++ b/media/libheadtracking/HeadTrackingProcessor.cpp
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/HeadTrackingProcessor.h"
+
+#include "ModeSelector.h"
+#include "PoseDriftCompensator.h"
+#include "QuaternionUtil.h"
+#include "ScreenHeadFusion.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+class HeadTrackingProcessorImpl : public HeadTrackingProcessor {
+ public:
+ HeadTrackingProcessorImpl(const Options& options, HeadTrackingMode initialMode)
+ : mOptions(options),
+ mHeadPoseDriftCompensator(PoseDriftCompensator::Options{
+ .translationalDriftTimeConstant = options.translationalDriftTimeConstant,
+ .rotationalDriftTimeConstant = options.rotationalDriftTimeConstant,
+ }),
+ mScreenPoseDriftCompensator(PoseDriftCompensator::Options{
+ .translationalDriftTimeConstant = options.translationalDriftTimeConstant,
+ .rotationalDriftTimeConstant = options.rotationalDriftTimeConstant,
+ }),
+ mModeSelector(ModeSelector::Options{.freshnessTimeout = options.freshnessTimeout},
+ initialMode),
+ mRateLimiter(PoseRateLimiter::Options{
+ .maxTranslationalVelocity = options.maxTranslationalVelocity,
+ .maxRotationalVelocity = options.maxRotationalVelocity}) {}
+
+ void setDesiredMode(HeadTrackingMode mode) override { mModeSelector.setDesiredMode(mode); }
+
+ void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead,
+ const Twist3f& headTwist) override {
+ Pose3f predictedWorldToHead =
+ worldToHead * integrate(headTwist, mOptions.predictionDuration);
+ mHeadPoseDriftCompensator.setInput(timestamp, predictedWorldToHead);
+ mWorldToHeadTimestamp = timestamp;
+ }
+
+ void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) override {
+ if (mPhysicalToLogicalAngle != mPendingPhysicalToLogicalAngle) {
+ // We're introducing an artificial discontinuity. Enable the rate limiter.
+ mRateLimiter.enable();
+ mPhysicalToLogicalAngle = mPendingPhysicalToLogicalAngle;
+ }
+
+ mScreenPoseDriftCompensator.setInput(
+ timestamp, worldToScreen * Pose3f(rotateY(-mPhysicalToLogicalAngle)));
+ mWorldToScreenTimestamp = timestamp;
+ }
+
+ void setScreenToStagePose(const Pose3f& screenToStage) override {
+ mModeSelector.setScreenToStagePose(screenToStage);
+ }
+
+ void setDisplayOrientation(float physicalToLogicalAngle) override {
+ mPendingPhysicalToLogicalAngle = physicalToLogicalAngle;
+ }
+
+ void calculate(int64_t timestamp) override {
+ if (mWorldToHeadTimestamp.has_value()) {
+ const Pose3f worldToHead = mHeadPoseDriftCompensator.getOutput();
+ mScreenHeadFusion.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
+ mModeSelector.setWorldToHeadPose(mWorldToHeadTimestamp.value(), worldToHead);
+ }
+
+ if (mWorldToScreenTimestamp.has_value()) {
+ const Pose3f worldToLogicalScreen = mScreenPoseDriftCompensator.getOutput();
+ mScreenHeadFusion.setWorldToScreenPose(mWorldToScreenTimestamp.value(),
+ worldToLogicalScreen);
+ }
+
+ auto maybeScreenToHead = mScreenHeadFusion.calculate();
+ if (maybeScreenToHead.has_value()) {
+ mModeSelector.setScreenToHeadPose(maybeScreenToHead->timestamp,
+ maybeScreenToHead->pose);
+ } else {
+ mModeSelector.setScreenToHeadPose(timestamp, std::nullopt);
+ }
+
+ HeadTrackingMode prevMode = mModeSelector.getActualMode();
+ mModeSelector.calculate(timestamp);
+ if (mModeSelector.getActualMode() != prevMode) {
+ // Mode has changed, enable rate limiting.
+ mRateLimiter.enable();
+ }
+ mRateLimiter.setTarget(mModeSelector.getHeadToStagePose());
+ mHeadToStagePose = mRateLimiter.calculatePose(timestamp);
+ }
+
+ Pose3f getHeadToStagePose() const override { return mHeadToStagePose; }
+
+ HeadTrackingMode getActualMode() const override { return mModeSelector.getActualMode(); }
+
+ void recenter(bool recenterHead, bool recenterScreen) override {
+ if (recenterHead) {
+ mHeadPoseDriftCompensator.recenter();
+ }
+ if (recenterScreen) {
+ mScreenPoseDriftCompensator.recenter();
+ }
+
+ // If a sensor being recentered is included in the current mode, apply rate limiting to
+ // avoid discontinuities.
+ HeadTrackingMode mode = mModeSelector.getActualMode();
+ if ((recenterHead && (mode == HeadTrackingMode::WORLD_RELATIVE ||
+ mode == HeadTrackingMode::SCREEN_RELATIVE)) ||
+ (recenterScreen && mode == HeadTrackingMode::SCREEN_RELATIVE)) {
+ mRateLimiter.enable();
+ }
+ }
+
+ private:
+ const Options mOptions;
+ float mPhysicalToLogicalAngle = 0;
+ // We store the physical to logical angle as "pending" until the next world-to-screen sample it
+ // applies to arrives.
+ float mPendingPhysicalToLogicalAngle = 0;
+ std::optional<int64_t> mWorldToHeadTimestamp;
+ std::optional<int64_t> mWorldToScreenTimestamp;
+ Pose3f mHeadToStagePose;
+ PoseDriftCompensator mHeadPoseDriftCompensator;
+ PoseDriftCompensator mScreenPoseDriftCompensator;
+ ScreenHeadFusion mScreenHeadFusion;
+ ModeSelector mModeSelector;
+ PoseRateLimiter mRateLimiter;
+};
+
+} // namespace
+
+std::unique_ptr<HeadTrackingProcessor> createHeadTrackingProcessor(
+ const HeadTrackingProcessor::Options& options, HeadTrackingMode initialMode) {
+ return std::make_unique<HeadTrackingProcessorImpl>(options, initialMode);
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ModeSelector-test.cpp b/media/libheadtracking/ModeSelector-test.cpp
new file mode 100644
index 0000000..6247d84
--- /dev/null
+++ b/media/libheadtracking/ModeSelector-test.cpp
@@ -0,0 +1,149 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ModeSelector.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+TEST(ModeSelector, Initial) {
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), Pose3f());
+}
+
+TEST(ModeSelector, InitialWorldRelative) {
+ const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options, HeadTrackingMode::WORLD_RELATIVE);
+
+ selector.setWorldToHeadPose(0, worldToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse());
+}
+
+TEST(ModeSelector, InitialScreenRelative) {
+ const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options, HeadTrackingMode::SCREEN_RELATIVE);
+
+ selector.setScreenToHeadPose(0, screenToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::SCREEN_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), screenToHead.inverse());
+}
+
+TEST(ModeSelector, WorldRelative) {
+ const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ selector.setWorldToHeadPose(0, worldToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, WorldRelativeStale) {
+ const Pose3f worldToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options{.freshnessTimeout = 100};
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::WORLD_RELATIVE);
+ selector.setWorldToHeadPose(0, worldToHead);
+ selector.calculate(101);
+ EXPECT_EQ(HeadTrackingMode::STATIC, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelative) {
+ const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ selector.setScreenToHeadPose(0, screenToHead);
+ selector.calculate(0);
+ EXPECT_EQ(HeadTrackingMode::SCREEN_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), screenToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelativeStaleToWorldRelative) {
+ const Pose3f screenToHead({1, 2, 3}, Quaternionf::UnitRandom());
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+ const Pose3f worldToHead({7, 8, 9}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options{.freshnessTimeout = 100};
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ selector.setScreenToHeadPose(0, screenToHead);
+ selector.setWorldToHeadPose(50, worldToHead);
+ selector.calculate(101);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+TEST(ModeSelector, ScreenRelativeInvalidToWorldRelative) {
+ const Pose3f screenToStage({4, 5, 6}, Quaternionf::UnitRandom());
+ const Pose3f worldToHead({7, 8, 9}, Quaternionf::UnitRandom());
+
+ ModeSelector::Options options;
+ ModeSelector selector(options);
+
+ selector.setScreenToStagePose(screenToStage);
+
+ selector.setDesiredMode(HeadTrackingMode::SCREEN_RELATIVE);
+ selector.setScreenToHeadPose(50, std::nullopt);
+ selector.setWorldToHeadPose(50, worldToHead);
+ selector.calculate(101);
+ EXPECT_EQ(HeadTrackingMode::WORLD_RELATIVE, selector.getActualMode());
+ EXPECT_EQ(selector.getHeadToStagePose(), worldToHead.inverse() * screenToStage);
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ModeSelector.cpp b/media/libheadtracking/ModeSelector.cpp
new file mode 100644
index 0000000..16e1712
--- /dev/null
+++ b/media/libheadtracking/ModeSelector.cpp
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ModeSelector.h"
+
+namespace android {
+namespace media {
+
+ModeSelector::ModeSelector(const Options& options, HeadTrackingMode initialMode)
+ : mOptions(options), mDesiredMode(initialMode), mActualMode(initialMode) {}
+
+void ModeSelector::setDesiredMode(HeadTrackingMode mode) {
+ mDesiredMode = mode;
+}
+
+void ModeSelector::setScreenToStagePose(const Pose3f& screenToStage) {
+ mScreenToStage = screenToStage;
+}
+
+void ModeSelector::setScreenToHeadPose(int64_t timestamp,
+ const std::optional<Pose3f>& screenToHead) {
+ mScreenToHead = screenToHead;
+ mScreenToHeadTimestamp = timestamp;
+}
+
+void ModeSelector::setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead) {
+ mWorldToHead = worldToHead;
+ mWorldToHeadTimestamp = timestamp;
+}
+
+void ModeSelector::calculateActualMode(int64_t timestamp) {
+ bool isValidScreenToHead = mScreenToHead.has_value() &&
+ timestamp - mScreenToHeadTimestamp < mOptions.freshnessTimeout;
+ bool isValidWorldToHead = mWorldToHead.has_value() &&
+ timestamp - mWorldToHeadTimestamp < mOptions.freshnessTimeout;
+
+ HeadTrackingMode mode = mDesiredMode;
+
+ // Optional downgrade from screen-relative to world-relative.
+ if (mode == HeadTrackingMode::SCREEN_RELATIVE) {
+ if (!isValidScreenToHead) {
+ mode = HeadTrackingMode::WORLD_RELATIVE;
+ }
+ }
+
+ // Optional downgrade from world-relative to static.
+ if (mode == HeadTrackingMode::WORLD_RELATIVE) {
+ if (!isValidWorldToHead) {
+ mode = HeadTrackingMode::STATIC;
+ }
+ }
+
+ mActualMode = mode;
+}
+
+void ModeSelector::calculate(int64_t timestamp) {
+ calculateActualMode(timestamp);
+
+ switch (mActualMode) {
+ case HeadTrackingMode::STATIC:
+ mHeadToStage = mScreenToStage;
+ break;
+
+ case HeadTrackingMode::WORLD_RELATIVE:
+ mHeadToStage = mWorldToHead.value().inverse() * mScreenToStage;
+ break;
+
+ case HeadTrackingMode::SCREEN_RELATIVE:
+ mHeadToStage = mScreenToHead.value().inverse() * mScreenToStage;
+ break;
+ }
+}
+
+Pose3f ModeSelector::getHeadToStagePose() const {
+ return mHeadToStage;
+}
+
+HeadTrackingMode ModeSelector::getActualMode() const {
+ return mActualMode;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ModeSelector.h b/media/libheadtracking/ModeSelector.h
new file mode 100644
index 0000000..17a5142
--- /dev/null
+++ b/media/libheadtracking/ModeSelector.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/HeadTrackingMode.h"
+#include "media/Pose.h"
+
+#include "PoseRateLimiter.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Head-tracking mode selector.
+ *
+ * This class is responsible for production of the determining pose for audio virtualization, based
+ * on a number of available sources and a selectable mode.
+ *
+ * Typical flow is:
+ * ModeSelector selector(...);
+ * while (...) {
+ * // Set inputs.
+ * selector.setFoo(...);
+ * selector.setBar(...);
+ *
+ * // Update outputs based on inputs.
+ * selector.calculate(...);
+ *
+ * // Get outputs.
+ * Pose3f pose = selector.getHeadToStagePose();
+ * }
+ *
+ * This class is not thread-safe, but thread-compatible.
+ *
+ * For details on the frames of reference involved, their composition and the definitions to the
+ * different modes, refer to:
+ * go/immersive-audio-frames
+ *
+ * The actual mode may deviate from the desired mode in the following cases:
+ * - When we cannot get a valid and fresh estimate of the screen-to-head pose, we will fall back
+ * from screen-relative to world-relative.
+ * - When we cannot get a fresh estimate of the world-to-head pose, we will fall back from
+ * world-relative to static.
+ *
+ * All the timestamps used here are of arbitrary units and origin. They just need to be consistent
+ * between all the calls and with the Options provided for determining freshness and rate limiting.
+ */
+class ModeSelector {
+ public:
+ struct Options {
+ int64_t freshnessTimeout = std::numeric_limits<int64_t>::max();
+ };
+
+ ModeSelector(const Options& options, HeadTrackingMode initialMode = HeadTrackingMode::STATIC);
+
+ /** Sets the desired head-tracking mode. */
+ void setDesiredMode(HeadTrackingMode mode);
+
+ /**
+ * Set the screen-to-stage pose, used in all modes.
+ */
+ void setScreenToStagePose(const Pose3f& screenToStage);
+
+ /**
+ * Set the screen-to-head pose, used in screen-relative mode.
+ * The timestamp needs to reflect how fresh the sample is (not necessarily which point in time
+ * it applies to). nullopt can be used if it is determined that the listener is not in front of
+ * the screen.
+ */
+ void setScreenToHeadPose(int64_t timestamp, const std::optional<Pose3f>& screenToHead);
+
+ /**
+ * Set the world-to-head pose, used in world-relative mode.
+ * The timestamp needs to reflect how fresh the sample is (not necessarily which point in time
+ * it applies to).
+ */
+ void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead);
+
+ /**
+ * Process all the previous inputs and update the outputs.
+ */
+ void calculate(int64_t timestamp);
+
+ /**
+ * Get the aggregate head-to-stage pose (primary output of this module).
+ */
+ Pose3f getHeadToStagePose() const;
+
+ /**
+ * Get the actual head-tracking mode (which may deviate from the desired one as mentioned in the
+ * class documentation above).
+ */
+ HeadTrackingMode getActualMode() const;
+
+ private:
+ const Options mOptions;
+
+ HeadTrackingMode mDesiredMode;
+ Pose3f mScreenToStage;
+ std::optional<Pose3f> mScreenToHead;
+ int64_t mScreenToHeadTimestamp;
+ std::optional<Pose3f> mWorldToHead;
+ int64_t mWorldToHeadTimestamp;
+
+ HeadTrackingMode mActualMode;
+ Pose3f mHeadToStage;
+
+ void calculateActualMode(int64_t timestamp);
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/OWNERS b/media/libheadtracking/OWNERS
new file mode 100644
index 0000000..e5d0370
--- /dev/null
+++ b/media/libheadtracking/OWNERS
@@ -0,0 +1,2 @@
+ytai@google.com
+elaurent@google.com
diff --git a/media/libheadtracking/Pose-test.cpp b/media/libheadtracking/Pose-test.cpp
new file mode 100644
index 0000000..a9e18ce
--- /dev/null
+++ b/media/libheadtracking/Pose-test.cpp
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Pose.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using android::media::Pose3f;
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(Pose, CtorDefault) {
+ Pose3f pose;
+ EXPECT_EQ(pose.translation(), Vector3f::Zero());
+ EXPECT_EQ(pose.rotation(), Quaternionf::Identity());
+}
+
+TEST(Pose, CtorRotation) {
+ Quaternionf rot = Quaternionf::UnitRandom();
+ Pose3f pose(rot);
+ EXPECT_EQ(pose.translation(), Vector3f::Zero());
+ EXPECT_EQ(pose.rotation(), rot);
+}
+
+TEST(Pose, CtorTranslation) {
+ Vector3f trans{1, 2, 3};
+ Pose3f pose(trans);
+ EXPECT_EQ(pose.translation(), trans);
+ EXPECT_EQ(pose.rotation(), Quaternionf::Identity());
+}
+
+TEST(Pose, CtorTranslationRotation) {
+ Quaternionf rot = Quaternionf::UnitRandom();
+ Vector3f trans{1, 2, 3};
+ Pose3f pose(trans, rot);
+ EXPECT_EQ(pose.translation(), trans);
+ EXPECT_EQ(pose.rotation(), rot);
+}
+
+TEST(Pose, Inverse) {
+ Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+ EXPECT_EQ(pose.inverse() * pose, Pose3f());
+ EXPECT_EQ(pose * pose.inverse(), Pose3f());
+}
+
+TEST(Pose, IsApprox) {
+ constexpr float eps = std::numeric_limits<float>::epsilon();
+
+ EXPECT_EQ(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+ Pose3f({1 + eps, 2 + eps, 3 + eps},
+ rotationVectorToQuaternion({4 + eps, 5 + eps, 6 + eps})));
+
+ EXPECT_NE(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+ Pose3f({1.01, 2, 3}, rotationVectorToQuaternion({4, 5, 6})));
+
+ EXPECT_NE(Pose3f({1, 2, 3}, rotationVectorToQuaternion({4, 5, 6})),
+ Pose3f({1, 2, 3}, rotationVectorToQuaternion({4.01, 5, 6})));
+}
+
+TEST(Pose, Compose) {
+ Pose3f p1({1, 2, 3}, rotateZ(M_PI_2));
+ Pose3f p2({4, 5, 6}, rotateX(M_PI_2));
+ Pose3f p3({-4, 6, 9}, p1.rotation() * p2.rotation());
+ EXPECT_EQ(p1 * p2, p3);
+}
+
+TEST(Pose, MoveWithRateLimit_NoLimit) {
+ Pose3f from({1, 1, 1}, Quaternionf::Identity());
+ Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+ auto result = moveWithRateLimit(from, to, 1, 10, 10);
+ EXPECT_EQ(std::get<0>(result), to);
+ EXPECT_FALSE(std::get<1>(result));
+}
+
+TEST(Pose, MoveWithRateLimit_TranslationLimit) {
+ Pose3f from({1, 1, 1}, Quaternionf::Identity());
+ Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+ auto result = moveWithRateLimit(from, to, 1, 0.5f, 10);
+ Pose3f expected({1, 1, 1.5f}, rotateZ(M_PI_4));
+ EXPECT_EQ(std::get<0>(result), expected);
+ EXPECT_TRUE(std::get<1>(result));
+}
+
+TEST(Pose, MoveWithRateLimit_RotationLimit) {
+ Pose3f from({1, 1, 1}, Quaternionf::Identity());
+ Pose3f to({1, 1, 2}, rotateZ(M_PI_2));
+ auto result = moveWithRateLimit(from, to, 1, 10, M_PI_4);
+ Pose3f expected({1, 1, 1.5f}, rotateZ(M_PI_4));
+ EXPECT_EQ(std::get<0>(result), expected);
+ EXPECT_TRUE(std::get<1>(result));
+}
+
+TEST(Pose, FloatVectorRoundTrip1) {
+ // Rotation vector magnitude must be less than Pi.
+ std::vector<float> vec = { 1, 2, 3, 0.4, 0.5, 0.6};
+ std::optional<Pose3f> pose = Pose3f::fromVector(vec);
+ ASSERT_TRUE(pose.has_value());
+ std::vector<float> reconstructed = pose->toVector();
+ EXPECT_EQ(vec, reconstructed);
+}
+
+TEST(Pose, FloatVectorRoundTrip2) {
+ Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+ std::vector<float> vec = pose.toVector();
+ std::optional<Pose3f> reconstructed = Pose3f::fromVector(vec);
+ ASSERT_TRUE(reconstructed.has_value());
+ EXPECT_EQ(pose, reconstructed.value());
+}
+
+TEST(Pose, FloatVectorInvalid) {
+ EXPECT_FALSE(Pose3f::fromVector({}).has_value());
+ EXPECT_FALSE(Pose3f::fromVector({1, 2, 3, 4, 5}).has_value());
+ EXPECT_FALSE(Pose3f::fromVector({1, 2, 3, 4, 5, 6, 7}).has_value());
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/Pose.cpp b/media/libheadtracking/Pose.cpp
new file mode 100644
index 0000000..47241ce
--- /dev/null
+++ b/media/libheadtracking/Pose.cpp
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Pose.h"
+#include "media/Twist.h"
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+
+using Eigen::Vector3f;
+
+std::optional<Pose3f> Pose3f::fromVector(const std::vector<float>& vec) {
+ if (vec.size() != 6) {
+ return std::nullopt;
+ }
+ return Pose3f({vec[0], vec[1], vec[2]}, rotationVectorToQuaternion({vec[3], vec[4], vec[5]}));
+}
+
+std::vector<float> Pose3f::toVector() const {
+ Eigen::Vector3f rot = quaternionToRotationVector(mRotation);
+ return {mTranslation[0], mTranslation[1], mTranslation[2], rot[0], rot[1], rot[2]};
+}
+
+std::tuple<Pose3f, bool> moveWithRateLimit(const Pose3f& from, const Pose3f& to, float t,
+ float maxTranslationalVelocity,
+ float maxRotationalVelocity) {
+ // Never rate limit if both limits are set to infinity.
+ if (isinf(maxTranslationalVelocity) && isinf(maxRotationalVelocity)) {
+ return {to, false};
+ }
+ // Always rate limit if t is 0 (required to avoid division by 0).
+ if (t == 0) {
+ return {from, true};
+ }
+
+ Pose3f fromToTo = from.inverse() * to;
+ Twist3f twist = differentiate(fromToTo, t);
+ float angularRotationalRatio = twist.scalarRotationalVelocity() / maxRotationalVelocity;
+ float translationalVelocityRatio =
+ twist.scalarTranslationalVelocity() / maxTranslationalVelocity;
+ float maxRatio = std::max(angularRotationalRatio, translationalVelocityRatio);
+ if (maxRatio <= 1) {
+ return {to, false};
+ }
+ return {from * integrate(twist, t / maxRatio), true};
+}
+
+std::ostream& operator<<(std::ostream& os, const Pose3f& pose) {
+ os << "translation: " << pose.translation().transpose()
+ << " quaternion: " << pose.rotation().coeffs().transpose();
+ return os;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator-test.cpp b/media/libheadtracking/PoseDriftCompensator-test.cpp
new file mode 100644
index 0000000..df0a05f
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator-test.cpp
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+#include <cmath>
+
+#include "PoseDriftCompensator.h"
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = PoseDriftCompensator::Options;
+
+TEST(PoseDriftCompensator, Initial) {
+ PoseDriftCompensator comp(Options{});
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+TEST(PoseDriftCompensator, NoDrift) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseDriftCompensator comp(Options{});
+
+ // First pose sets the baseline.
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(2000, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+
+ // Recentering resets the baseline.
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(3000, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(4000, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+}
+
+TEST(PoseDriftCompensator, NoDriftZeroTime) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseDriftCompensator comp(Options{});
+
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(1000, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(1000, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+}
+
+TEST(PoseDriftCompensator, Asymptotic) {
+ Pose3f pose({1, 2, 3}, Quaternionf::UnitRandom());
+
+ PoseDriftCompensator comp(
+ Options{.translationalDriftTimeConstant = 1, .rotationalDriftTimeConstant = 1});
+
+ // Set the same pose for a long time.
+ for (int64_t t = 0; t < 1000; ++t) {
+ comp.setInput(t, pose);
+ }
+
+ // Output would have faded to approx. identity.
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+TEST(PoseDriftCompensator, Fast) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f pose2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseDriftCompensator comp(
+ Options{.translationalDriftTimeConstant = 1e7, .rotationalDriftTimeConstant = 1e7});
+
+ comp.setInput(0, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(1, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(2, pose1);
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+
+ comp.setInput(3, pose2);
+ EXPECT_EQ(comp.getOutput(), pose1.inverse() * pose2);
+}
+
+TEST(PoseDriftCompensator, Drift) {
+ Pose3f pose1({1, 2, 3}, rotateZ(-M_PI * 3 / 4));
+ PoseDriftCompensator comp(
+ Options{.translationalDriftTimeConstant = 500, .rotationalDriftTimeConstant = 1000});
+
+ // Establish a baseline.
+ comp.setInput(1000, Pose3f());
+
+ // Initial pose is used as is.
+ comp.setInput(1000, pose1);
+ EXPECT_EQ(comp.getOutput(), pose1);
+
+ // After 1000 ticks, our rotation should be exp(-1) and translation exp(-2) from identity.
+ comp.setInput(2000, pose1);
+ EXPECT_EQ(comp.getOutput(),
+ Pose3f(Vector3f{1, 2, 3} * std::expf(-2), rotateZ(-M_PI * 3 / 4 * std::expf(-1))));
+
+ // As long as the input stays the same, we'll continue to advance towards identity.
+ comp.setInput(3000, pose1);
+ EXPECT_EQ(comp.getOutput(),
+ Pose3f(Vector3f{1, 2, 3} * std::expf(-4), rotateZ(-M_PI * 3 / 4 * std::expf(-2))));
+
+ comp.recenter();
+ EXPECT_EQ(comp.getOutput(), Pose3f());
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator.cpp b/media/libheadtracking/PoseDriftCompensator.cpp
new file mode 100644
index 0000000..0e90cad
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator.cpp
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoseDriftCompensator.h"
+
+#include <cmath>
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+PoseDriftCompensator::PoseDriftCompensator(const Options& options) : mOptions(options) {}
+
+void PoseDriftCompensator::setInput(int64_t timestamp, const Pose3f& input) {
+ if (mTimestamp.has_value()) {
+ // Avoid computation upon first input (only sets the initial state).
+ Pose3f prevInputToInput = mPrevInput.inverse() * input;
+ mOutput = scale(mOutput, timestamp - mTimestamp.value()) * prevInputToInput;
+ }
+ mPrevInput = input;
+ mTimestamp = timestamp;
+}
+
+void PoseDriftCompensator::recenter() {
+ mTimestamp.reset();
+ mOutput = Pose3f();
+}
+
+Pose3f PoseDriftCompensator::getOutput() const {
+ return mOutput;
+}
+
+Pose3f PoseDriftCompensator::scale(const Pose3f& pose, int64_t dt) {
+ // Translation.
+ Vector3f translation = pose.translation();
+ translation *= std::expf(-static_cast<float>(dt) / mOptions.translationalDriftTimeConstant);
+
+ // Rotation.
+ Vector3f rotationVec = quaternionToRotationVector(pose.rotation());
+ rotationVec *= std::expf(-static_cast<float>(dt) / mOptions.rotationalDriftTimeConstant);
+
+ return Pose3f(translation, rotationVectorToQuaternion(rotationVec));
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseDriftCompensator.h b/media/libheadtracking/PoseDriftCompensator.h
new file mode 100644
index 0000000..a71483b
--- /dev/null
+++ b/media/libheadtracking/PoseDriftCompensator.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Drift compensator for a stream of poses.
+ *
+ * This is effectively a high-pass filter for a pose stream, removing any DC-offset / bias. The
+ * provided input stream will be "pulled" toward identity with an exponential decay filter with a
+ * configurable time constant. Rotation and translation are handled separately.
+ *
+ * Typical usage:
+ * PoseDriftCompensator comp(...);
+ *
+ * while (...) {
+ * comp.setInput(...);
+ * Pose3f output = comp.getOutput();
+ * }
+ *
+ * There doesn't need to be a 1:1 correspondence between setInput() and getOutput() calls. The
+ * output timestamp is always that of the last setInput() call. Calling recenter() will reset the
+ * bias to the current output, causing the output to be identity.
+ *
+ * The initial bias point is identity.
+ *
+ * This implementation is thread-compatible, but not thread-safe.
+ */
+class PoseDriftCompensator {
+ public:
+ struct Options {
+ float translationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ float rotationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ };
+
+ explicit PoseDriftCompensator(const Options& options);
+
+ void setInput(int64_t timestamp, const Pose3f& input);
+
+ void recenter();
+
+ Pose3f getOutput() const;
+
+ private:
+ const Options mOptions;
+
+ Pose3f mPrevInput;
+ Pose3f mOutput;
+ std::optional<int64_t> mTimestamp;
+
+ Pose3f scale(const Pose3f& pose, int64_t dt);
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseProcessingGraph.png b/media/libheadtracking/PoseProcessingGraph.png
new file mode 100644
index 0000000..0363068
--- /dev/null
+++ b/media/libheadtracking/PoseProcessingGraph.png
Binary files differ
diff --git a/media/libheadtracking/PoseRateLimiter-test.cpp b/media/libheadtracking/PoseRateLimiter-test.cpp
new file mode 100644
index 0000000..f306183
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter-test.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "PoseRateLimiter.h"
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+using Options = PoseRateLimiter::Options;
+
+TEST(PoseRateLimiter, Initial) {
+ Pose3f target({1, 2, 3}, Quaternionf::UnitRandom());
+ PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 10, .maxRotationalVelocity = 10});
+ limiter.setTarget(target);
+ EXPECT_EQ(limiter.calculatePose(1000), target);
+}
+
+TEST(PoseRateLimiter, UnlimitedZeroTime) {
+ Pose3f target1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f target2({4, 5, 6}, Quaternionf::UnitRandom());
+ PoseRateLimiter limiter(Options{});
+ limiter.setTarget(target1);
+ EXPECT_EQ(limiter.calculatePose(0), target1);
+ limiter.setTarget(target2);
+ EXPECT_EQ(limiter.calculatePose(0), target2);
+ limiter.setTarget(target1);
+ EXPECT_EQ(limiter.calculatePose(0), target1);
+}
+
+TEST(PoseRateLimiter, Limited) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::Identity());
+ Pose3f pose2({1, 2, 8}, rotateZ(M_PI * 5 / 8));
+ PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 1, .maxRotationalVelocity = 10});
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1000), pose2);
+
+ // Rate limiting is inactive. Should track despite the violation.
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1001), pose1);
+
+ // Enable rate limiting and observe gradual motion from pose1 to pose2.
+ limiter.enable();
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1002), Pose3f({1, 2, 4}, rotateZ(M_PI * 1 / 8)));
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1003), Pose3f({1, 2, 5}, rotateZ(M_PI * 2 / 8)));
+ // Skip a tick.
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1005), Pose3f({1, 2, 7}, rotateZ(M_PI * 4 / 8)));
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1006), pose2);
+
+ // We reached the target, so rate limiting should now be disabled.
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1007), pose1);
+}
+
+TEST(PoseRateLimiter, Reset) {
+ Pose3f pose1({1, 2, 3}, Quaternionf::Identity());
+ Pose3f pose2({1, 2, 8}, rotateZ(M_PI * 5 / 8));
+ PoseRateLimiter limiter(Options{.maxTranslationalVelocity = 1, .maxRotationalVelocity = 10});
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1000), pose1);
+
+ // Enable rate limiting and observe gradual motion from pose1 to pose2.
+ limiter.enable();
+ limiter.setTarget(pose2);
+ EXPECT_EQ(limiter.calculatePose(1001), Pose3f({1, 2, 4}, rotateZ(M_PI * 1 / 8)));
+
+ // Reset the pose and disable rate limiting.
+ limiter.reset(pose2);
+ EXPECT_EQ(limiter.calculatePose(1002), pose2);
+
+ // Rate limiting should now be disabled.
+ limiter.setTarget(pose1);
+ EXPECT_EQ(limiter.calculatePose(1003), pose1);
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseRateLimiter.cpp b/media/libheadtracking/PoseRateLimiter.cpp
new file mode 100644
index 0000000..380e22b
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter.cpp
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "PoseRateLimiter.h"
+
+namespace android {
+namespace media {
+
+PoseRateLimiter::PoseRateLimiter(const Options& options) : mOptions(options), mLimiting(false) {}
+
+void PoseRateLimiter::enable() {
+ mLimiting = true;
+}
+
+void PoseRateLimiter::reset(const Pose3f& target) {
+ mLimiting = false;
+ mTargetPose = target;
+}
+
+void PoseRateLimiter::setTarget(const Pose3f& target) {
+ mTargetPose = target;
+}
+
+Pose3f PoseRateLimiter::calculatePose(int64_t timestamp) {
+ assert(mTargetPose.has_value());
+ Pose3f pose;
+ if (mLimiting && mOutput.has_value()) {
+ std::tie(pose, mLimiting) = moveWithRateLimit(
+ mOutput->pose, mTargetPose.value(), timestamp - mOutput->timestamp,
+ mOptions.maxTranslationalVelocity, mOptions.maxRotationalVelocity);
+ } else {
+ pose = mTargetPose.value();
+ }
+ mOutput = Point{pose, timestamp};
+ return pose;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/PoseRateLimiter.h b/media/libheadtracking/PoseRateLimiter.h
new file mode 100644
index 0000000..aa2fe80
--- /dev/null
+++ b/media/libheadtracking/PoseRateLimiter.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Limits a stream of poses to a given maximum translational and rotational velocities.
+ *
+ * Normal operation:
+ *
+ * Pose3f output;
+ * PoseRateLimiter limiter(...);
+ *
+ * // Limiting is disabled. Output will be the same as last input.
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * // Enable limiting. Output will no longer be necessarily the same as last input.
+ * limiter.enable();
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * // When eventually the output has been able to catch up with the last input, the limited will be
+ * // automatically disabled again and the output will match the input again.
+ * limiter.setTarget(...);
+ * output = limiter.calculatePose(...);
+ *
+ * As shown above, the limiter is turned on manually via enable(), but turns off automatically as
+ * soon as the output is able to catch up to the input. The intention is that rate limiting will be
+ * turned on at specific times to smooth out any artificial discontinuities introduced to the pose
+ * stream, but the rest of the time will be a simple passthrough.
+
+ * setTarget(...) and calculatePose(...) don't have to be ordered in any particular way. However,
+ * setTarget or reset() must be called at least once prior to the first calculatePose().
+ *
+ * Calling reset() instead of setTarget() forces the output to the given pose and disables rate
+ * limiting.
+ *
+ * This implementation is thread-compatible, but not thread-safe.
+ */
+class PoseRateLimiter {
+ public:
+ struct Options {
+ float maxTranslationalVelocity = std::numeric_limits<float>::infinity();
+ float maxRotationalVelocity = std::numeric_limits<float>::infinity();
+ };
+
+ explicit PoseRateLimiter(const Options& options);
+
+ void enable();
+
+ void reset(const Pose3f& target);
+ void setTarget(const Pose3f& target);
+
+ Pose3f calculatePose(int64_t timestamp);
+
+ private:
+ struct Point {
+ Pose3f pose;
+ int64_t timestamp;
+ };
+
+ const Options mOptions;
+ bool mLimiting;
+ std::optional<Pose3f> mTargetPose;
+ std::optional<Point> mOutput;
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/QuaternionUtil-test.cpp b/media/libheadtracking/QuaternionUtil-test.cpp
new file mode 100644
index 0000000..e79e54a
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil-test.cpp
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(QuaternionUtil, RotationVectorToQuaternion) {
+ // 90 degrees around Z.
+ Vector3f rot = {0, 0, M_PI_2};
+ Quaternionf quat = rotationVectorToQuaternion(rot);
+ ASSERT_EQ(quat * Vector3f(1, 0, 0), Vector3f(0, 1, 0));
+ ASSERT_EQ(quat * Vector3f(0, 1, 0), Vector3f(-1, 0, 0));
+ ASSERT_EQ(quat * Vector3f(0, 0, 1), Vector3f(0, 0, 1));
+}
+
+TEST(QuaternionUtil, QuaternionToRotationVector) {
+ Quaternionf quat = Quaternionf::FromTwoVectors(Vector3f(1, 0, 0), Vector3f(0, 1, 0));
+ Vector3f rot = quaternionToRotationVector(quat);
+ ASSERT_EQ(rot, Vector3f(0, 0, M_PI_2));
+}
+
+TEST(QuaternionUtil, RoundTripFromQuaternion) {
+ Quaternionf quaternion = Quaternionf::UnitRandom();
+ EXPECT_EQ(quaternion, rotationVectorToQuaternion(quaternionToRotationVector(quaternion)));
+}
+
+TEST(QuaternionUtil, RoundTripFromVector) {
+ Vector3f vec{0.1, 0.2, 0.3};
+ EXPECT_EQ(vec, quaternionToRotationVector(rotationVectorToQuaternion(vec)));
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/QuaternionUtil.cpp b/media/libheadtracking/QuaternionUtil.cpp
new file mode 100644
index 0000000..5d090de
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil.cpp
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "QuaternionUtil.h"
+
+#include <cassert>
+
+namespace android {
+namespace media {
+
+using Eigen::NumTraits;
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace {
+
+Vector3f LogSU2(const Quaternionf& q) {
+ // Implementation of the logarithmic map of SU(2) using atan.
+ // This follows Hertzberg et al. "Integrating Generic Sensor Fusion Algorithms
+ // with Sound State Representations through Encapsulation of Manifolds", Eq.
+ // (31)
+ // We use asin and acos instead of atan to enable the use of Eigen Autodiff
+ // with SU2.
+ const float sign_of_w = q.w() < 0.f ? -1.f : 1.f;
+ const float abs_w = sign_of_w * q.w();
+ const Vector3f v = sign_of_w * q.vec();
+ const float squared_norm_of_v = v.squaredNorm();
+
+ assert(abs(1.f - abs_w * abs_w - squared_norm_of_v) < NumTraits<float>::dummy_precision());
+
+ if (squared_norm_of_v > NumTraits<float>::dummy_precision()) {
+ const float norm_of_v = sqrt(squared_norm_of_v);
+ if (abs_w > NumTraits<float>::dummy_precision()) {
+ // asin(x) = acos(x) at x = 1/sqrt(2).
+ if (norm_of_v <= float(M_SQRT1_2)) {
+ return (asin(norm_of_v) / norm_of_v) * v;
+ }
+ return (acos(abs_w) / norm_of_v) * v;
+ }
+ return (M_PI_2 / norm_of_v) * v;
+ }
+
+ // Taylor expansion at squared_norm_of_v == 0
+ return (1.f / abs_w - squared_norm_of_v / (3.f * pow(abs_w, 3))) * v;
+}
+
+Quaternionf ExpSU2(const Vector3f& delta) {
+ Quaternionf q_delta;
+ const float theta_squared = delta.squaredNorm();
+ if (theta_squared > NumTraits<float>::dummy_precision()) {
+ const float theta = sqrt(theta_squared);
+ q_delta.w() = cos(theta);
+ q_delta.vec() = (sin(theta) / theta) * delta;
+ } else {
+ // taylor expansions around theta == 0
+ q_delta.w() = 1.f - 0.5f * theta_squared;
+ q_delta.vec() = (1.f - 1.f / 6.f * theta_squared) * delta;
+ }
+ return q_delta;
+}
+
+} // namespace
+
+Quaternionf rotationVectorToQuaternion(const Vector3f& rotationVector) {
+ // SU(2) is a double cover of SO(3), thus we have to half the tangent vector
+ // delta
+ const Vector3f half_delta = 0.5f * rotationVector;
+ return ExpSU2(half_delta);
+}
+
+Vector3f quaternionToRotationVector(const Quaternionf& quaternion) {
+ // SU(2) is a double cover of SO(3), thus we have to multiply the tangent
+ // vector delta by two
+ return 2.f * LogSU2(quaternion);
+}
+
+Quaternionf rotateX(float angle) {
+ return rotationVectorToQuaternion(Vector3f(1, 0, 0) * angle);
+}
+
+Quaternionf rotateY(float angle) {
+ return rotationVectorToQuaternion(Vector3f(0, 1, 0) * angle);
+}
+
+Quaternionf rotateZ(float angle) {
+ return rotationVectorToQuaternion(Vector3f(0, 0, 1) * angle);
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/QuaternionUtil.h b/media/libheadtracking/QuaternionUtil.h
new file mode 100644
index 0000000..f7a2ca9
--- /dev/null
+++ b/media/libheadtracking/QuaternionUtil.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <Eigen/Geometry>
+
+namespace android {
+namespace media {
+
+/**
+ * Converts a rotation vector to an equivalent quaternion.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Quaternionf rotationVectorToQuaternion(const Eigen::Vector3f& rotationVector);
+
+/**
+ * Converts a quaternion to an equivalent rotation vector.
+ * The rotation vector is given as a 3-vector whose direction represents the rotation axis and its
+ * magnitude the rotation angle (in radians) around that axis.
+ */
+Eigen::Vector3f quaternionToRotationVector(const Eigen::Quaternionf& quaternion);
+
+/**
+ * Returns a quaternion representing a rotation around the X-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateX(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Y-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateY(float angle);
+
+/**
+ * Returns a quaternion representing a rotation around the Z-axis with the given amount (in
+ * radians).
+ */
+Eigen::Quaternionf rotateZ(float angle);
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/README.md b/media/libheadtracking/README.md
new file mode 100644
index 0000000..3d5b71a
--- /dev/null
+++ b/media/libheadtracking/README.md
@@ -0,0 +1,185 @@
+# Head-Tracking Library For Immersive Audio
+
+This library handles the processing of head-tracking information, necessary for
+Immersive Audio functionality. It goes from bare sensor reading into the final
+pose fed into a virtualizer.
+
+## Basic Usage
+
+The main entry point into this library is the `HeadTrackingProcessor` class.
+This class is provided with the following inputs:
+
+- Head pose, relative to some arbitrary world frame.
+- Screen pose, relative to some arbitrary world frame.
+- Display orientation, defined as the angle between the "physical" screen and
+ the "logical" screen.
+- Transform between the screen and the sound stage.
+- Desired operational mode:
+ - Static: only the sound stage pose is taken into account. This will result
+ in an experience where the sound stage moved with the listener's head.
+ - World-relative: both the head pose and stage pose are taken into account.
+ This will result in an experience where the sound stage is perceived to be
+ located at a fixed place in the world.
+ - Screen-relative: the head pose, screen pose and stage pose are all taken
+ into account. This will result in an experience where the sound stage is
+ perceived to be located at a fixed place relative to the screen.
+
+Once inputs are provided, the `calculate()` method will make the following
+output available:
+
+- Stage pose, relative to the head. This aggregates all the inputs mentioned
+ above and is ready to be fed into a virtualizer.
+- Actual operational mode. May deviate from the desired one in cases where the
+ desired mode cannot be calculated (for example, as result of dropped messages
+ from one of the sensors).
+
+A `recenter()` operation is also available, which indicates to the system that
+whatever pose the screen and head are currently at should be considered as the
+"center" pose, or frame of reference.
+
+## Pose-Related Conventions
+
+### Naming and Composition
+
+When referring to poses in code, it is always good practice to follow
+conventional naming, which highlights the reference and target frames clearly:
+
+Bad:
+
+```
+Pose3f headPose;
+```
+
+Good:
+
+```
+Pose3f worldToHead; // “world” is the reference frame,
+ // “head” is the target frame.
+```
+
+By following this convention, it is easy to follow correct composition of poses,
+by making sure adjacent frames are identical:
+
+```
+Pose3f aToD = aToB * bToC * cToD;
+```
+
+And similarly, inverting the transform simply flips the reference and target:
+
+```
+Pose3f aToB = bToA.inverse();
+```
+
+### Twist
+
+“Twist” is to pose what velocity is to distance: it is the time-derivative of a
+pose, representing the change in pose over a short period of time. Its naming
+convention always states one frame, e.g.:
+Twist3f headTwist;
+
+This means that this twist represents the head-at-time-T to head-at-time-T+dt
+transform. Twists are not composable in the same way as poses.
+
+### Frames of Interest
+
+The frames of interest in this library are defined as follows:
+
+#### Head
+
+This is the listener’s head. The origin is at the center point between the
+ear-drums, the X-axis goes from left ear to right ear, Y-axis goes from the back
+of the head towards the face and Z-axis goes from the bottom of the head to the
+top.
+
+#### Screen
+
+This is the primary screen that the user will be looking at, which is relevant
+for some Immersive Audio use-cases, such as watching a movie. We will follow a
+different convention for this frame than what the Sensor framework uses. The
+origin is at the center of the screen. X-axis goes from left to right, Z-axis
+goes from the screen bottom to the screen top, Y-axis goes “into” the screen (
+from the direction of the viewer). The up/down/left/right of the screen are
+defined as the logical directions used for display. So when flipping the display
+orientation between “landscape” and “portrait”, the frame of reference will
+change with respect to the physical screen.
+
+#### Stage
+
+This is the frame of reference used by the virtualizer for positioning sound
+objects. It is not associated with any physical frame. In a typical
+multi-channel scenario, the listener is at the origin, the X-axis goes from left
+to right, Y-axis from back to front and Z-axis from down to up. For example, a
+front-right speaker is located at positive X, Y and Z=0, a height speaker will
+have a positive Z.
+
+#### World
+
+It is sometimes convenient to use an intermediate frame when dealing with
+head-to-screen transforms. The “world” frame is an arbitrary frame of reference
+in the physical world, relative to which we can measure the head pose and screen
+pose. In (very common) cases when we can’t establish such an absolute frame, we
+can take each measurement relative to a separate, arbitrary frame and high-pass
+the result.
+
+## Processing Description
+
+
+
+The diagram above illustrates the processing that takes place from the inputs to
+the outputs.
+
+### Predictor
+
+The Predictor block gets pose + twist (pose derivative) and extrapolates to
+obtain a predicted head pose (w/ given latency).
+
+### Drift / Bias Compensator
+
+The Drift / Bias Compensator blocks serve two purposes:
+
+- Compensate for floating reference axes by applying a high-pass filter, which
+ slowly pulls the pose toward identity.
+- Establish the reference frame for the poses by having the ability to set the
+ current pose as the reference for future poses (recentering). Effectively,
+ this is resetting the filter state to identity.
+
+### Orientation Compensation
+
+The Orientation Compensation block applies the display orientation to the screen
+pose to obtain the pose of the “logical screen” frame, in which the Y-axis is
+pointing in the direction of the logical screen “up” rather than the physical
+one.
+
+### Screen-Relative Pose
+
+The Screen-Relative Pose block is provided with a head pose and a screen pose
+and estimates the pose of the head relative to the screen. Optionally, this
+module may indicate that the user is likely not in front of the screen via the
+“valid” output.
+
+### Mode Selector
+
+The Mode Selector block aggregates the various sources of pose information into
+a head-to-stage pose that is going to feed the virtualizer. It is controlled by
+the “desired mode” signal that indicates whether the preference is to be in
+either static, world-relative or screen-relative.
+
+The actual mode may diverge from the desired mode. It is determined as follows:
+
+- If the desired mode is static, the actual mode is static.
+- If the desired mode is world-relative:
+ - If head poses are fresh, the actual mode is world-relative.
+ - Otherwise the actual mode is static.
+- If the desired mode is screen-relative:
+ - If head and screen poses are fresh and the ‘valid’ signal is asserted, the
+ actual mode is screen-relative.
+ - Otherwise, apply the same rules as the desired mode being world-relative.
+
+### Rate Limiter
+
+A Rate Limiter block is applied to the final output to smooth out any abrupt
+transitions caused by any of the following events:
+
+- Mode switch.
+- Display orientation switch.
+- Recenter operation.
diff --git a/media/libheadtracking/ScreenHeadFusion-test.cpp b/media/libheadtracking/ScreenHeadFusion-test.cpp
new file mode 100644
index 0000000..ecf27f5
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion-test.cpp
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <gtest/gtest.h>
+
+#include "ScreenHeadFusion.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(ScreenHeadFusion, Init) {
+ ScreenHeadFusion fusion;
+ EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate_NoHead) {
+ ScreenHeadFusion fusion;
+ fusion.setWorldToScreenPose(0, Pose3f());
+ EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate_NoScreen) {
+ ScreenHeadFusion fusion;
+ fusion.setWorldToHeadPose(0, Pose3f());
+ EXPECT_FALSE(fusion.calculate().has_value());
+}
+
+TEST(ScreenHeadFusion, Calculate) {
+ Pose3f worldToScreen1({1, 2, 3}, Quaternionf::UnitRandom());
+ Pose3f worldToHead1({4, 5, 6}, Quaternionf::UnitRandom());
+ Pose3f worldToScreen2({11, 12, 13}, Quaternionf::UnitRandom());
+ Pose3f worldToHead2({14, 15, 16}, Quaternionf::UnitRandom());
+
+ ScreenHeadFusion fusion;
+ fusion.setWorldToHeadPose(123, worldToHead1);
+ fusion.setWorldToScreenPose(456, worldToScreen1);
+ auto result = fusion.calculate();
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(123, result->timestamp);
+ EXPECT_EQ(worldToScreen1.inverse() * worldToHead1, result->pose);
+
+ fusion.setWorldToHeadPose(567, worldToHead2);
+ result = fusion.calculate();
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(456, result->timestamp);
+ EXPECT_EQ(worldToScreen1.inverse() * worldToHead2, result->pose);
+
+ fusion.setWorldToScreenPose(678, worldToScreen2);
+ result = fusion.calculate();
+ ASSERT_TRUE(result.has_value());
+ EXPECT_EQ(567, result->timestamp);
+ EXPECT_EQ(worldToScreen2.inverse() * worldToHead2, result->pose);
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ScreenHeadFusion.cpp b/media/libheadtracking/ScreenHeadFusion.cpp
new file mode 100644
index 0000000..f023570
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion.cpp
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"){}
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "ScreenHeadFusion.h"
+
+namespace android {
+namespace media {
+
+void ScreenHeadFusion::setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead) {
+ mWorldToHead = TimestampedPose{.timestamp = timestamp, .pose = worldToHead};
+}
+
+void ScreenHeadFusion::setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) {
+ mWorldToScreen = TimestampedPose{.timestamp = timestamp, .pose = worldToScreen};
+}
+
+std::optional<ScreenHeadFusion::TimestampedPose> ScreenHeadFusion::calculate() {
+ // TODO: this is temporary, simplistic logic.
+ if (!mWorldToHead.has_value() || !mWorldToScreen.has_value()) {
+ return std::nullopt;
+ }
+ return TimestampedPose{
+ .timestamp = std::min(mWorldToHead->timestamp, mWorldToScreen->timestamp),
+ .pose = mWorldToScreen->pose.inverse() * mWorldToHead->pose};
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/ScreenHeadFusion.h b/media/libheadtracking/ScreenHeadFusion.h
new file mode 100644
index 0000000..ee81100
--- /dev/null
+++ b/media/libheadtracking/ScreenHeadFusion.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+
+#include "media/Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Combines world-to-head pose with world-to-screen pose to obtain screen-to-head.
+ *
+ * Input poses may arrive separately. The last pose of each kind is taken into account. The
+ * timestamp of the output is the ealier (older) timestamp of the two inputs.
+ *
+ * Output may be nullopt in the following cases:
+ * - Either one of the inputs has not yet been provided.
+ * - It is estimated that the user is no longer facing the screen.
+ *
+ * Typical usage:
+ *
+ * ScreenHeadFusion fusion(...);
+ * fusion.setWorldToHeadPose(...);
+ * fusion.setWorldToScreenPose(...);
+ * auto output = fusion.calculate();
+ *
+ * This class is not thread-safe, but thread-compatible.
+ */
+class ScreenHeadFusion {
+ public:
+ struct TimestampedPose {
+ int64_t timestamp;
+ Pose3f pose;
+ };
+
+ void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead);
+
+ void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen);
+
+ /**
+ * Returns the screen-to-head pose, or nullopt if invalid.
+ */
+ std::optional<TimestampedPose> calculate();
+
+ private:
+ std::optional<TimestampedPose> mWorldToHead;
+ std::optional<TimestampedPose> mWorldToScreen;
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/SensorPoseProvider-example.cpp b/media/libheadtracking/SensorPoseProvider-example.cpp
new file mode 100644
index 0000000..88e222e
--- /dev/null
+++ b/media/libheadtracking/SensorPoseProvider-example.cpp
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <unistd.h>
+#include <iostream>
+
+#include <android/sensor.h>
+#include <hardware/sensors.h>
+#include <utils/SystemClock.h>
+
+#include <media/SensorPoseProvider.h>
+#include <sensor/Sensor.h>
+#include <sensor/SensorManager.h>
+
+using android::elapsedRealtimeNano;
+using android::Sensor;
+using android::SensorManager;
+using android::String16;
+using android::media::Pose3f;
+using android::media::SensorPoseProvider;
+using android::media::Twist3f;
+
+using namespace std::chrono_literals;
+
+const char kPackageName[] = "SensorPoseProvider-example";
+
+class Listener : public SensorPoseProvider::Listener {
+ public:
+ void onPose(int64_t timestamp, int32_t handle, const Pose3f& pose,
+ const std::optional<Twist3f>& twist, bool isNewReference) override {
+ int64_t now = elapsedRealtimeNano();
+
+ std::cout << "onPose t=" << timestamp
+ << " lag=" << ((now - timestamp) / 1e6) << "[ms]"
+ << " sensor=" << handle
+ << " pose=" << pose
+ << " twist=";
+ if (twist.has_value()) {
+ std::cout << twist.value();
+ } else {
+ std::cout << "<none>";
+ }
+ std::cout << " isNewReference=" << isNewReference << std::endl;
+ }
+};
+
+int main() {
+ SensorManager& sensorManager = SensorManager::getInstanceForPackage(String16(kPackageName));
+
+ const Sensor* headSensor = sensorManager.getDefaultSensor(SENSOR_TYPE_GAME_ROTATION_VECTOR);
+ const Sensor* screenSensor = sensorManager.getDefaultSensor(SENSOR_TYPE_ROTATION_VECTOR);
+
+ Listener listener;
+
+ std::unique_ptr<SensorPoseProvider> provider =
+ SensorPoseProvider::create(kPackageName, &listener);
+ if (!provider->startSensor(headSensor->getHandle(), 500ms)) {
+ std::cout << "Failed to start head sensor" << std::endl;
+ }
+ sleep(2);
+ if (!provider->startSensor(screenSensor->getHandle(), 500ms)) {
+ std::cout << "Failed to start screenSensor sensor" << std::endl;
+ }
+ sleep(2);
+ provider->stopSensor(headSensor->getHandle());
+ sleep(2);
+ return 0;
+}
diff --git a/media/libheadtracking/SensorPoseProvider.cpp b/media/libheadtracking/SensorPoseProvider.cpp
new file mode 100644
index 0000000..ec5e1ec
--- /dev/null
+++ b/media/libheadtracking/SensorPoseProvider.cpp
@@ -0,0 +1,355 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <media/SensorPoseProvider.h>
+
+#define LOG_TAG "SensorPoseProvider"
+
+#include <inttypes.h>
+
+#include <future>
+#include <map>
+#include <thread>
+
+#include <android-base/thread_annotations.h>
+#include <log/log_main.h>
+#include <sensor/Sensor.h>
+#include <sensor/SensorEventQueue.h>
+#include <sensor/SensorManager.h>
+#include <utils/Looper.h>
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+namespace {
+
+// Identifier to use for our event queue on the loop.
+// The number 19 is arbitrary, only useful if using multiple objects on the same looper.
+constexpr int kIdent = 19;
+
+static inline Looper* ALooper_to_Looper(ALooper* alooper) {
+ return reinterpret_cast<Looper*>(alooper);
+}
+
+static inline ALooper* Looper_to_ALooper(Looper* looper) {
+ return reinterpret_cast<ALooper*>(looper);
+}
+
+/**
+ * RAII-wrapper around SensorEventQueue, which unregisters it on destruction.
+ */
+class EventQueueGuard {
+ public:
+ EventQueueGuard(const sp<SensorEventQueue>& queue, Looper* looper) : mQueue(queue) {
+ mQueue->looper = Looper_to_ALooper(looper);
+ mQueue->requestAdditionalInfo = false;
+ looper->addFd(mQueue->getFd(), kIdent, ALOOPER_EVENT_INPUT, nullptr, nullptr);
+ }
+
+ ~EventQueueGuard() {
+ if (mQueue) {
+ ALooper_to_Looper(mQueue->looper)->removeFd(mQueue->getFd());
+ }
+ }
+
+ EventQueueGuard(const EventQueueGuard&) = delete;
+ EventQueueGuard& operator=(const EventQueueGuard&) = delete;
+
+ [[nodiscard]] SensorEventQueue* get() const { return mQueue.get(); }
+
+ private:
+ sp<SensorEventQueue> mQueue;
+};
+
+/**
+ * RAII-wrapper around an enabled sensor, which disables it upon destruction.
+ */
+class SensorEnableGuard {
+ public:
+ SensorEnableGuard(const sp<SensorEventQueue>& queue, int32_t sensor)
+ : mQueue(queue), mSensor(sensor) {}
+
+ ~SensorEnableGuard() {
+ if (mSensor != SensorPoseProvider::INVALID_HANDLE) {
+ int ret = mQueue->disableSensor(mSensor);
+ if (ret) {
+ ALOGE("Failed to disable sensor: %s", strerror(ret));
+ }
+ }
+ }
+
+ SensorEnableGuard(const SensorEnableGuard&) = delete;
+ SensorEnableGuard& operator=(const SensorEnableGuard&) = delete;
+
+ // Enable moving.
+ SensorEnableGuard(SensorEnableGuard&& other) : mQueue(other.mQueue), mSensor(other.mSensor) {
+ other.mSensor = SensorPoseProvider::INVALID_HANDLE;
+ }
+
+ private:
+ sp<SensorEventQueue> const mQueue;
+ int32_t mSensor;
+};
+
+/**
+ * Streams the required events to a PoseListener, based on events originating from the Sensor stack.
+ */
+class SensorPoseProviderImpl : public SensorPoseProvider {
+ public:
+ static std::unique_ptr<SensorPoseProvider> create(const char* packageName, Listener* listener) {
+ std::unique_ptr<SensorPoseProviderImpl> result(
+ new SensorPoseProviderImpl(packageName, listener));
+ return result->waitInitFinished() ? std::move(result) : nullptr;
+ }
+
+ ~SensorPoseProviderImpl() override {
+ // Disable all active sensors.
+ mEnabledSensors.clear();
+ mLooper->wake();
+ mThread.join();
+ }
+
+ bool startSensor(int32_t sensor, std::chrono::microseconds samplingPeriod) override {
+ // Figure out the sensor's data format.
+ DataFormat format = getSensorFormat(sensor);
+ if (format == DataFormat::kUnknown) {
+ ALOGE("Unknown format for sensor %" PRId32, sensor);
+ return false;
+ }
+
+ {
+ std::lock_guard lock(mMutex);
+ mEnabledSensorFormats.emplace(sensor, format);
+ }
+
+ // Enable the sensor.
+ if (mQueue->enableSensor(sensor, samplingPeriod.count(), 0, 0)) {
+ ALOGE("Failed to enable sensor");
+ std::lock_guard lock(mMutex);
+ mEnabledSensorFormats.erase(sensor);
+ return false;
+ }
+
+ mEnabledSensors.emplace(sensor, SensorEnableGuard(mQueue.get(), sensor));
+ return true;
+ }
+
+ void stopSensor(int handle) override {
+ mEnabledSensors.erase(handle);
+ std::lock_guard lock(mMutex);
+ mEnabledSensorFormats.erase(handle);
+ }
+
+ private:
+ enum DataFormat {
+ kUnknown,
+ kQuaternion,
+ kRotationVectorsAndFlags,
+ };
+
+ struct PoseEvent {
+ Pose3f pose;
+ std::optional<Twist3f> twist;
+ bool isNewReference;
+ };
+
+ sp<Looper> mLooper;
+ Listener* const mListener;
+ SensorManager* const mSensorManager;
+ std::thread mThread;
+ std::mutex mMutex;
+ std::map<int32_t, SensorEnableGuard> mEnabledSensors;
+ std::map<int32_t, DataFormat> mEnabledSensorFormats GUARDED_BY(mMutex);
+ sp<SensorEventQueue> mQueue;
+
+ // We must do some of the initialization operations on the worker thread, because the API relies
+ // on the thread-local looper. In addition, as a matter of convenience, we store some of the
+ // state on the stack.
+ // For that reason, we use a two-step initialization approach, where the ctor mostly just starts
+ // the worker thread and that thread would notify, via the promise below whenever initialization
+ // is finished, and whether it was successful.
+ std::promise<bool> mInitPromise;
+
+ SensorPoseProviderImpl(const char* packageName, Listener* listener)
+ : mListener(listener),
+ mSensorManager(&SensorManager::getInstanceForPackage(String16(packageName))),
+ mThread([this] { threadFunc(); }) {}
+
+ void initFinished(bool success) { mInitPromise.set_value(success); }
+
+ bool waitInitFinished() { return mInitPromise.get_future().get(); }
+
+ void threadFunc() {
+ // Obtain looper.
+ mLooper = Looper::prepare(ALOOPER_PREPARE_ALLOW_NON_CALLBACKS);
+
+ // Create event queue.
+ mQueue = mSensorManager->createEventQueue();
+
+ if (mQueue == nullptr) {
+ ALOGE("Failed to create a sensor event queue");
+ initFinished(false);
+ return;
+ }
+
+ EventQueueGuard eventQueueGuard(mQueue, mLooper.get());
+
+ initFinished(true);
+
+ while (true) {
+ int ret = mLooper->pollOnce(-1 /* no timeout */, nullptr, nullptr, nullptr);
+
+ switch (ret) {
+ case ALOOPER_POLL_WAKE:
+ // Normal way to exit.
+ return;
+
+ case kIdent:
+ // Possible events on our queue.
+ break;
+
+ default:
+ ALOGE("Unexpected status out of Looper::pollOnce: %d", ret);
+ }
+
+ // Process an event.
+ ASensorEvent event;
+ ssize_t actual = mQueue->read(&event, 1);
+ if (actual > 0) {
+ mQueue->sendAck(&event, actual);
+ }
+ ssize_t size = mQueue->filterEvents(&event, actual);
+
+ if (size < 0 || size > 1) {
+ ALOGE("Unexpected return value from SensorEventQueue::filterEvents: %zd", size);
+ break;
+ }
+ if (size == 0) {
+ // No events.
+ continue;
+ }
+
+ handleEvent(event);
+ }
+ }
+
+ void handleEvent(const ASensorEvent& event) {
+ DataFormat format;
+ {
+ std::lock_guard lock(mMutex);
+ auto iter = mEnabledSensorFormats.find(event.sensor);
+ if (iter == mEnabledSensorFormats.end()) {
+ // This can happen if we have any pending events shortly after stopping.
+ return;
+ }
+ format = iter->second;
+ }
+ auto value = parseEvent(event, format);
+ mListener->onPose(event.timestamp, event.sensor, value.pose, value.twist,
+ value.isNewReference);
+ }
+
+ DataFormat getSensorFormat(int32_t handle) {
+ std::optional<const Sensor> sensor = getSensorByHandle(handle);
+ if (!sensor) {
+ ALOGE("Sensor not found: %d", handle);
+ return DataFormat::kUnknown;
+ }
+ if (sensor->getType() == ASENSOR_TYPE_ROTATION_VECTOR ||
+ sensor->getType() == ASENSOR_TYPE_GAME_ROTATION_VECTOR) {
+ return DataFormat::kQuaternion;
+ }
+
+ if (sensor->getStringType() == "com.google.hardware.sensor.hid_dynamic.headtracker") {
+ return DataFormat::kRotationVectorsAndFlags;
+ }
+
+ return DataFormat::kUnknown;
+ }
+
+ std::optional<const Sensor> getSensorByHandle(int32_t handle) {
+ const Sensor* const* list;
+ ssize_t size;
+
+ // Search static sensor list.
+ size = mSensorManager->getSensorList(&list);
+ if (size < 0) {
+ ALOGE("getSensorList failed with error code %zd", size);
+ return std::nullopt;
+ }
+ for (size_t i = 0; i < size; ++i) {
+ if (list[i]->getHandle() == handle) {
+ return *list[i];
+ }
+ }
+
+ // Search dynamic sensor list.
+ Vector<Sensor> dynList;
+ size = mSensorManager->getDynamicSensorList(dynList);
+ if (size < 0) {
+ ALOGE("getDynamicSensorList failed with error code %zd", size);
+ return std::nullopt;
+ }
+ for (size_t i = 0; i < size; ++i) {
+ if (dynList[i].getHandle() == handle) {
+ return dynList[i];
+ }
+ }
+
+ return std::nullopt;
+ }
+
+ static PoseEvent parseEvent(const ASensorEvent& event, DataFormat format) {
+ // TODO(ytai): Add more types.
+ switch (format) {
+ case DataFormat::kQuaternion: {
+ Eigen::Quaternionf quat(event.data[3], event.data[0], event.data[1], event.data[2]);
+ // Adapt to different frame convention.
+ quat *= rotateX(-M_PI_2);
+ return PoseEvent{Pose3f(quat), std::optional<Twist3f>(), false};
+ }
+
+ case DataFormat::kRotationVectorsAndFlags: {
+ // Custom sensor, assumed to contain:
+ // 3 floats representing orientation as a rotation vector (in rad).
+ // 3 floats representing angular velocity as a rotation vector (in rad/s).
+ // 1 uint32_t of flags, where:
+ // - LSb is '1' iff the given sample is the first one in a new frame of reference.
+ // - The rest of the bits are reserved for future use.
+ Eigen::Vector3f rotation = {event.data[0], event.data[1], event.data[2]};
+ Eigen::Vector3f twist = {event.data[3], event.data[4], event.data[5]};
+ Eigen::Quaternionf quat = rotationVectorToQuaternion(rotation);
+ uint32_t flags = *reinterpret_cast<const uint32_t*>(&event.data[6]);
+ return PoseEvent{Pose3f(quat), Twist3f(Eigen::Vector3f::Zero(), twist),
+ (flags & (1 << 0)) != 0};
+ }
+
+ default:
+ LOG_ALWAYS_FATAL("Unexpected sensor type: %d", static_cast<int>(format));
+ }
+ }
+};
+
+} // namespace
+
+std::unique_ptr<SensorPoseProvider> SensorPoseProvider::create(const char* packageName,
+ Listener* listener) {
+ return SensorPoseProviderImpl::create(packageName, listener);
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/TestUtil.h b/media/libheadtracking/TestUtil.h
new file mode 100644
index 0000000..4636d86
--- /dev/null
+++ b/media/libheadtracking/TestUtil.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <gtest/gtest.h>
+
+#include "media/Pose.h"
+#include "media/Twist.h"
+
+namespace {
+
+constexpr float kPoseComparisonPrecision = 1e-5;
+
+} // namespace
+
+// These specializations make {EXPECT,ASSERT}_{EQ,NE} work correctly for Pose3f, Twist3f, Vector3f
+// and Quaternionf.
+namespace testing {
+namespace internal {
+
+template <>
+inline AssertionResult CmpHelperEQ<android::media::Pose3f, android::media::Pose3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Pose3f& lhs,
+ const android::media::Pose3f& rhs) {
+ if (lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<android::media::Pose3f, android::media::Pose3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Pose3f& lhs,
+ const android::media::Pose3f& rhs) {
+ if (!lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<android::media::Twist3f, android::media::Twist3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Twist3f& lhs,
+ const android::media::Twist3f& rhs) {
+ if (lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<android::media::Twist3f, android::media::Twist3f>(
+ const char* lhs_expression, const char* rhs_expression, const android::media::Twist3f& lhs,
+ const android::media::Twist3f& rhs) {
+ if (!lhs.isApprox(rhs, kPoseComparisonPrecision)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<Eigen::Vector3f, Eigen::Vector3f>(const char* lhs_expression,
+ const char* rhs_expression,
+ const Eigen::Vector3f& lhs,
+ const Eigen::Vector3f& rhs) {
+ if (lhs.isApprox(rhs)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<Eigen::Vector3f, Eigen::Vector3f>(const char* lhs_expression,
+ const char* rhs_expression,
+ const Eigen::Vector3f& lhs,
+ const Eigen::Vector3f& rhs) {
+ if (!lhs.isApprox(rhs)) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperEQ<Eigen::Quaternionf, Eigen::Quaternionf>(
+ const char* lhs_expression, const char* rhs_expression, const Eigen::Quaternionf& lhs,
+ const Eigen::Quaternionf& rhs) {
+ // Negating the coefs results in an equivalent quaternion.
+ if (lhs.isApprox(rhs) || lhs.isApprox(Eigen::Quaternionf(-rhs.coeffs()))) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+template <>
+inline AssertionResult CmpHelperNE<Eigen::Quaternionf, Eigen::Quaternionf>(
+ const char* lhs_expression, const char* rhs_expression, const Eigen::Quaternionf& lhs,
+ const Eigen::Quaternionf& rhs) {
+ // Negating the coefs results in an equivalent quaternion.
+ if (!(lhs.isApprox(rhs) || lhs.isApprox(Eigen::Quaternionf(-rhs.coeffs())))) {
+ return AssertionSuccess();
+ }
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+} // namespace internal
+} // namespace testing
diff --git a/media/libheadtracking/Twist-test.cpp b/media/libheadtracking/Twist-test.cpp
new file mode 100644
index 0000000..7984e1e
--- /dev/null
+++ b/media/libheadtracking/Twist-test.cpp
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Twist.h"
+
+#include <gtest/gtest.h>
+
+#include "QuaternionUtil.h"
+#include "TestUtil.h"
+
+using Eigen::Quaternionf;
+using Eigen::Vector3f;
+
+namespace android {
+namespace media {
+namespace {
+
+TEST(Twist, DefaultCtor) {
+ Twist3f twist;
+ EXPECT_EQ(twist.translationalVelocity(), Vector3f::Zero());
+ EXPECT_EQ(twist.rotationalVelocity(), Vector3f::Zero());
+ EXPECT_FLOAT_EQ(twist.scalarRotationalVelocity(), 0);
+ EXPECT_FLOAT_EQ(twist.scalarTranslationalVelocity(), 0);
+}
+
+TEST(Twist, FullCtor) {
+ Vector3f rot{1, 2, 3};
+ Vector3f trans{4, 5, 6};
+ Twist3f twist(trans, rot);
+ EXPECT_EQ(twist.translationalVelocity(), trans);
+ EXPECT_EQ(twist.rotationalVelocity(), rot);
+ EXPECT_FLOAT_EQ(twist.scalarRotationalVelocity(), std::sqrt(14.f));
+ EXPECT_FLOAT_EQ(twist.scalarTranslationalVelocity(), std::sqrt(77.f));
+}
+
+TEST(Twist, Integrate) {
+ Vector3f trans{1, 2, 3};
+ // 45 deg/sec around Z.
+ Vector3f rot{0, 0, M_PI_4};
+ Twist3f twist(trans, rot);
+ Pose3f pose = integrate(twist, 2.f);
+
+ EXPECT_EQ(pose, Pose3f(Vector3f{2, 4, 6}, rotateZ(M_PI_2)));
+}
+
+TEST(Twist, Differentiate) {
+ Pose3f pose(Vector3f{2, 4, 6}, rotateZ(M_PI_2));
+ Twist3f twist = differentiate(pose, 2.f);
+ EXPECT_EQ(twist, Twist3f(Vector3f(1, 2, 3), Vector3f(0, 0, M_PI_4)));
+}
+
+} // namespace
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/Twist.cpp b/media/libheadtracking/Twist.cpp
new file mode 100644
index 0000000..664c4d5
--- /dev/null
+++ b/media/libheadtracking/Twist.cpp
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "media/Twist.h"
+
+#include "QuaternionUtil.h"
+
+namespace android {
+namespace media {
+
+Pose3f integrate(const Twist3f& twist, float dt) {
+ Eigen::Vector3f translation = twist.translationalVelocity() * dt;
+ Eigen::Vector3f rotationVector = twist.rotationalVelocity() * dt;
+ return Pose3f(translation, rotationVectorToQuaternion(rotationVector));
+}
+
+Twist3f differentiate(const Pose3f& pose, float dt) {
+ Eigen::Vector3f translationalVelocity = pose.translation() / dt;
+ Eigen::Vector3f rotationalVelocity = quaternionToRotationVector(pose.rotation()) / dt;
+ return Twist3f(translationalVelocity, rotationalVelocity);
+}
+
+std::ostream& operator<<(std::ostream& os, const Twist3f& twist) {
+ os << "translation: " << twist.translationalVelocity().transpose()
+ << " rotation vector: " << twist.rotationalVelocity().transpose();
+ return os;
+}
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/HeadTrackingMode.h b/media/libheadtracking/include/media/HeadTrackingMode.h
new file mode 100644
index 0000000..38496e8
--- /dev/null
+++ b/media/libheadtracking/include/media/HeadTrackingMode.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+namespace android {
+namespace media {
+
+/**
+ * Mode of head-tracking.
+ */
+enum class HeadTrackingMode {
+ /** No head-tracking - screen-to-head pose is assumed to be identity. */
+ STATIC,
+ /** Head tracking enabled - world-to-screen pose is assumed to be identity. */
+ WORLD_RELATIVE,
+ /** Full screen-to-head tracking enabled. */
+ SCREEN_RELATIVE,
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/HeadTrackingProcessor.h b/media/libheadtracking/include/media/HeadTrackingProcessor.h
new file mode 100644
index 0000000..9fea273
--- /dev/null
+++ b/media/libheadtracking/include/media/HeadTrackingProcessor.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <limits>
+
+#include "HeadTrackingMode.h"
+#include "Pose.h"
+#include "Twist.h"
+
+namespace android {
+namespace media {
+
+/**
+ * Main entry-point for this library.
+ * This interfaces encompasses all the processing required for determining the head-to-stage pose
+ * used for audio virtualization.
+ * The usage involves periodic setting of the inputs, calling calculate() and obtaining the outputs.
+ * This class is not thread-safe, but thread-compatible.
+ */
+class HeadTrackingProcessor {
+ public:
+ virtual ~HeadTrackingProcessor() = default;
+
+ struct Options {
+ float maxTranslationalVelocity = std::numeric_limits<float>::infinity();
+ float maxRotationalVelocity = std::numeric_limits<float>::infinity();
+ float translationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ float rotationalDriftTimeConstant = std::numeric_limits<float>::infinity();
+ int64_t freshnessTimeout = std::numeric_limits<int64_t>::max();
+ float predictionDuration = 0;
+ };
+
+ /** Sets the desired head-tracking mode. */
+ virtual void setDesiredMode(HeadTrackingMode mode) = 0;
+
+ /**
+ * Sets the world-to-head pose and head twist (velocity).
+ * headTwist is given in the head coordinate frame.
+ */
+ virtual void setWorldToHeadPose(int64_t timestamp, const Pose3f& worldToHead,
+ const Twist3f& headTwist) = 0;
+
+ /**
+ * Sets the world-to-screen pose.
+ */
+ virtual void setWorldToScreenPose(int64_t timestamp, const Pose3f& worldToScreen) = 0;
+
+ /**
+ * Set the screen-to-stage pose, used in all modes.
+ */
+ virtual void setScreenToStagePose(const Pose3f& screenToStage) = 0;
+
+ /**
+ * Sets the display orientation.
+ * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+ * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+ * viewed while facing the screen are positive.
+ */
+ virtual void setDisplayOrientation(float physicalToLogicalAngle) = 0;
+
+ /**
+ * Process all the previous inputs and update the outputs.
+ */
+ virtual void calculate(int64_t timestamp) = 0;
+
+ /**
+ * Get the aggregate head-to-stage pose (primary output of this module).
+ */
+ virtual Pose3f getHeadToStagePose() const = 0;
+
+ /**
+ * Get the actual head-tracking mode (which may deviate from the desired one as mentioned in the
+ * class documentation above).
+ */
+ virtual HeadTrackingMode getActualMode() const = 0;
+
+ /**
+ * This causes the current poses for both the head and/or screen to be considered "center".
+ */
+ virtual void recenter(bool recenterHead = true, bool recenterScreen = true) = 0;
+};
+
+/**
+ * Creates an instance featuring a default implementation of the HeadTrackingProcessor interface.
+ */
+std::unique_ptr<HeadTrackingProcessor> createHeadTrackingProcessor(
+ const HeadTrackingProcessor::Options& options,
+ HeadTrackingMode initialMode = HeadTrackingMode::STATIC);
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/Pose.h b/media/libheadtracking/include/media/Pose.h
new file mode 100644
index 0000000..e660bb9
--- /dev/null
+++ b/media/libheadtracking/include/media/Pose.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <optional>
+#include <vector>
+#include <Eigen/Geometry>
+
+namespace android {
+namespace media {
+
+/**
+ * A 6-DoF pose.
+ * This class represents a proper rigid transformation (translation + rotation) between a reference
+ * frame and a target frame,
+ *
+ * See https://en.wikipedia.org/wiki/Six_degrees_of_freedom
+ */
+class Pose3f {
+ public:
+ /** Typical precision for isApprox comparisons. */
+ static constexpr float kDummyPrecision = 1e-5f;
+
+ Pose3f(const Eigen::Vector3f& translation, const Eigen::Quaternionf& rotation)
+ : mTranslation(translation), mRotation(rotation) {}
+
+ explicit Pose3f(const Eigen::Vector3f& translation)
+ : Pose3f(translation, Eigen::Quaternionf::Identity()) {}
+
+ explicit Pose3f(const Eigen::Quaternionf& rotation)
+ : Pose3f(Eigen::Vector3f::Zero(), rotation) {}
+
+ Pose3f() : Pose3f(Eigen::Vector3f::Zero(), Eigen::Quaternionf::Identity()) {}
+
+ Pose3f(const Pose3f& other) { *this = other; }
+
+ /**
+ * Create instance from a vector-of-floats representation.
+ * The vector is expected to have exactly 6 elements, where the first three are a translation
+ * vector and the last three are a rotation vector.
+ *
+ * Returns nullopt if the input vector is illegal.
+ */
+ static std::optional<Pose3f> fromVector(const std::vector<float>& vec);
+
+ /**
+ * Convert instance to a vector-of-floats representation.
+ * The vector will have exactly 6 elements, where the first three are a translation vector and
+ * the last three are a rotation vector.
+ */
+ std::vector<float> toVector() const;
+
+ Pose3f& operator=(const Pose3f& other) {
+ mTranslation = other.mTranslation;
+ mRotation = other.mRotation;
+ return *this;
+ }
+
+ Eigen::Vector3f translation() const { return mTranslation; };
+ Eigen::Quaternionf rotation() const { return mRotation; };
+
+ /**
+ * Reverses the reference and target frames.
+ */
+ Pose3f inverse() const {
+ Eigen::Quaternionf invRotation = mRotation.inverse();
+ return Pose3f(-(invRotation * translation()), invRotation);
+ }
+
+ /**
+ * Composes (chains) together two poses. By convention, this only makes sense if the target
+ * frame of the left-hand pose is the same the reference frame of the right-hand pose.
+ * Note that this operator is not commutative.
+ */
+ Pose3f operator*(const Pose3f& other) const {
+ Pose3f result = *this;
+ result *= other;
+ return result;
+ }
+
+ Pose3f& operator*=(const Pose3f& other) {
+ mTranslation += mRotation * other.mTranslation;
+ mRotation *= other.mRotation;
+ return *this;
+ }
+
+ /**
+ * This is an imprecise "fuzzy" comparison, which is only to be used for validity-testing
+ * purposes.
+ */
+ bool isApprox(const Pose3f& other, float prec = kDummyPrecision) const {
+ return (mTranslation - other.mTranslation).norm() < prec &&
+ // Quaternions are equivalent under sign inversion.
+ ((mRotation.coeffs() - other.mRotation.coeffs()).norm() < prec ||
+ (mRotation.coeffs() + other.mRotation.coeffs()).norm() < prec);
+ }
+
+ private:
+ Eigen::Vector3f mTranslation;
+ Eigen::Quaternionf mRotation;
+};
+
+/**
+ * Pretty-printer for Pose3f.
+ */
+std::ostream& operator<<(std::ostream& os, const Pose3f& pose);
+
+/**
+ * Move between the 'from' pose and the 'to' pose, while making sure velocity limits are enforced.
+ * If velocity limits are not violated, returns the 'to' pose and false.
+ * If velocity limits are violated, returns pose farthest along the path that can be reached within
+ * the limits, and true.
+ */
+std::tuple<Pose3f, bool> moveWithRateLimit(const Pose3f& from, const Pose3f& to, float t,
+ float maxTranslationalVelocity,
+ float maxRotationalVelocity);
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/SensorPoseProvider.h b/media/libheadtracking/include/media/SensorPoseProvider.h
new file mode 100644
index 0000000..d2a6b77
--- /dev/null
+++ b/media/libheadtracking/include/media/SensorPoseProvider.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <chrono>
+#include <memory>
+#include <optional>
+
+#include <android/sensor.h>
+
+#include "Pose.h"
+#include "Twist.h"
+
+namespace android {
+namespace media {
+
+/**
+ * A utility providing streaming of pose data from motion sensors provided by the Sensor Framework.
+ *
+ * A live instance of this interface keeps around some resources required for accessing sensor
+ * readings (e.g. a thread and a queue). Those would be released when the instance is deleted.
+ *
+ * Once alive, individual sensors can be subscribed to using startSensor() and updates can be
+ * stopped via stopSensor(). Those two methods should not be called concurrently and correct usage
+ * is assumed.
+ */
+class SensorPoseProvider {
+ public:
+ static constexpr int32_t INVALID_HANDLE = ASENSOR_INVALID;
+
+ /**
+ * Interface for consuming pose-related sensor events.
+ *
+ * The listener will be provided with a stream of events, each including:
+ * - A handle of the sensor responsible for the event.
+ * - Timestamp.
+ * - Pose.
+ * - Optional twist (time-derivative of pose).
+ *
+ * Sensors having only orientation data will have the translation part of the pose set to
+ * identity.
+ *
+ * Events are delivered in a serialized manner (i.e. callbacks do not need to be reentrant).
+ * Callbacks should not block.
+ */
+ class Listener {
+ public:
+ virtual ~Listener() = default;
+
+ virtual void onPose(int64_t timestamp, int32_t handle, const Pose3f& pose,
+ const std::optional<Twist3f>& twist, bool isNewReference) = 0;
+ };
+
+ /**
+ * Creates a new SensorPoseProvider instance.
+ * Events will be delivered to the listener as long as the returned instance is kept alive.
+ * @param packageName Client's package name.
+ * @param listener The listener that will get the events.
+ * @return The new instance, or nullptr in case of failure.
+ */
+ static std::unique_ptr<SensorPoseProvider> create(const char* packageName, Listener* listener);
+
+ virtual ~SensorPoseProvider() = default;
+
+ /**
+ * Start receiving pose updates from a given sensor.
+ * Attempting to start a sensor that has already been started results in undefined behavior.
+ * @param sensor The sensor to subscribe to.
+ * @param samplingPeriod Sampling interval, in microseconds. Actual rate might be slightly
+ * different.
+ * @return true iff succeeded.
+ */
+ virtual bool startSensor(int32_t sensor, std::chrono::microseconds samplingPeriod) = 0;
+
+ /**
+ * Stop a sensor, previously started with startSensor(). It is not required to stop all sensors
+ * before deleting the SensorPoseProvider instance.
+ * @param handle The sensor handle, as provided to startSensor().
+ */
+ virtual void stopSensor(int32_t handle) = 0;
+};
+
+} // namespace media
+} // namespace android
diff --git a/media/libheadtracking/include/media/Twist.h b/media/libheadtracking/include/media/Twist.h
new file mode 100644
index 0000000..e2fc203
--- /dev/null
+++ b/media/libheadtracking/include/media/Twist.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <Eigen/Geometry>
+
+#include "Pose.h"
+
+namespace android {
+namespace media {
+
+/**
+ * A 6-DoF twist.
+ * This class represents the translational and rotational velocity of a rigid object, typically
+ * relative to its own coordinate-frame.
+ * It is created by two 3-vectors, one representing linear motion per time-unit and the other, a
+ * rotation-vector in radians per time-unit (right-handed).
+ */
+class Twist3f {
+ public:
+ Twist3f(const Eigen::Vector3f& translationalVelocity, const Eigen::Vector3f& rotationalVelocity)
+ : mTranslationalVelocity(translationalVelocity), mRotationalVelocity(rotationalVelocity) {}
+
+ Twist3f() : Twist3f(Eigen::Vector3f::Zero(), Eigen::Vector3f::Zero()) {}
+
+ Twist3f(const Twist3f& other) { *this = other; }
+
+ Twist3f& operator=(const Twist3f& other) {
+ mTranslationalVelocity = other.mTranslationalVelocity;
+ mRotationalVelocity = other.mRotationalVelocity;
+ return *this;
+ }
+
+ Eigen::Vector3f translationalVelocity() const { return mTranslationalVelocity; }
+ Eigen::Vector3f rotationalVelocity() const { return mRotationalVelocity; }
+
+ float scalarTranslationalVelocity() const { return mTranslationalVelocity.norm(); }
+ float scalarRotationalVelocity() const { return mRotationalVelocity.norm(); }
+
+ bool isApprox(const Twist3f& other,
+ float prec = Eigen::NumTraits<float>::dummy_precision()) const {
+ return mTranslationalVelocity.isApprox(other.mTranslationalVelocity, prec) &&
+ mRotationalVelocity.isApprox(other.mRotationalVelocity, prec);
+ }
+
+ private:
+ Eigen::Vector3f mTranslationalVelocity;
+ Eigen::Vector3f mRotationalVelocity;
+};
+
+/**
+ * Integrate a twist over time to obtain a pose.
+ * dt is the time over which to integration.
+ * The resulting pose represents the transformation between the starting point and the ending point
+ * of the motion over the time period.
+ */
+Pose3f integrate(const Twist3f& twist, float dt);
+
+/**
+ * Differentiate pose to obtain a twist.
+ * dt is the time of the motion between the reference and the target frames of the pose.
+ */
+Twist3f differentiate(const Pose3f& pose, float dt);
+
+/**
+ * Pretty-printer for twist.
+ */
+std::ostream& operator<<(std::ostream& os, const Twist3f& twist);
+
+} // namespace media
+} // namespace android
diff --git a/media/libmediahelper/Android.bp b/media/libmediahelper/Android.bp
index a433fc6..b9d795d 100644
--- a/media/libmediahelper/Android.bp
+++ b/media/libmediahelper/Android.bp
@@ -29,6 +29,7 @@
cc_library {
name: "libmedia_helper",
vendor_available: true,
+ min_sdk_version: "29",
vndk: {
enabled: true,
},
@@ -58,4 +59,9 @@
enabled: false,
},
},
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.media",
+ "test_com.android.media",
+ ],
}
diff --git a/media/libmediahelper/AudioValidator.cpp b/media/libmediahelper/AudioValidator.cpp
index 7eddbe1..5a0d517 100644
--- a/media/libmediahelper/AudioValidator.cpp
+++ b/media/libmediahelper/AudioValidator.cpp
@@ -47,8 +47,7 @@
const effect_descriptor_t& desc, std::string_view bugNumber)
{
status_t status = NO_ERROR;
- if (checkStringOverflow(desc.name)
- | /* always */ checkStringOverflow(desc.implementor)) {
+ if (checkStringOverflow(desc.name) || checkStringOverflow(desc.implementor)) {
status = BAD_VALUE;
}
return safetyNetLog(status, bugNumber);
diff --git a/media/libmediahelper/TypeConverter.cpp b/media/libmediahelper/TypeConverter.cpp
index d3a517f..97b5b95 100644
--- a/media/libmediahelper/TypeConverter.cpp
+++ b/media/libmediahelper/TypeConverter.cpp
@@ -50,6 +50,8 @@
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_MUTE_HAPTIC),
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NO_SYSTEM_CAPTURE),
MAKE_STRING_FROM_ENUM(AUDIO_FLAG_CAPTURE_PRIVATE),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_CONTENT_SPATIALIZED),
+ MAKE_STRING_FROM_ENUM(AUDIO_FLAG_NEVER_SPATIALIZE),
TERMINATOR
};
diff --git a/media/libmediametrics/Android.bp b/media/libmediametrics/Android.bp
index d758391..4a3973e6 100644
--- a/media/libmediametrics/Android.bp
+++ b/media/libmediametrics/Android.bp
@@ -65,6 +65,7 @@
"//frameworks/base/apex/media/framework",
"//frameworks/base/core/jni",
"//frameworks/base/media/jni",
+ "//packages/modules/Media/apex/framework",
],
}
diff --git a/media/libmediametrics/MediaMetricsItem.cpp b/media/libmediametrics/MediaMetricsItem.cpp
index d597a4d..57fc49d 100644
--- a/media/libmediametrics/MediaMetricsItem.cpp
+++ b/media/libmediametrics/MediaMetricsItem.cpp
@@ -23,6 +23,7 @@
#include <mutex>
#include <set>
+#include <unordered_map>
#include <binder/Parcel.h>
#include <cutils/properties.h>
@@ -51,6 +52,33 @@
// the service is off.
#define SVC_TRIES 2
+static const std::unordered_map<std::string, int32_t>& getErrorStringMap() {
+ // DO NOT MODIFY VALUES (OK to add new ones).
+ // This may be found in frameworks/av/media/libmediametrics/include/MediaMetricsConstants.h
+ static std::unordered_map<std::string, int32_t> map{
+ {"", NO_ERROR},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_OK, NO_ERROR},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT, BAD_VALUE},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_IO, DEAD_OBJECT},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY, NO_MEMORY},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY, PERMISSION_DENIED},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_STATE, INVALID_OPERATION},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT, WOULD_BLOCK},
+ {AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN, UNKNOWN_ERROR},
+ };
+ return map;
+}
+
+status_t statusStringToStatus(const char *error) {
+ const auto& map = getErrorStringMap();
+ if (error == nullptr || error[0] == '\0') return NO_ERROR;
+ auto it = map.find(error);
+ if (it != map.end()) {
+ return it->second;
+ }
+ return UNKNOWN_ERROR;
+}
+
mediametrics::Item* mediametrics::Item::convert(mediametrics_handle_t handle) {
mediametrics::Item *item = (android::mediametrics::Item *) handle;
return item;
diff --git a/media/libmediametrics/include/MediaMetricsConstants.h b/media/libmediametrics/include/MediaMetricsConstants.h
index a09a673..2bf72a7 100644
--- a/media/libmediametrics/include/MediaMetricsConstants.h
+++ b/media/libmediametrics/include/MediaMetricsConstants.h
@@ -61,6 +61,9 @@
#define AMEDIAMETRICS_KEY_AUDIO_FLINGER AMEDIAMETRICS_KEY_PREFIX_AUDIO "flinger"
#define AMEDIAMETRICS_KEY_AUDIO_POLICY AMEDIAMETRICS_KEY_PREFIX_AUDIO "policy"
+// Error keys
+#define AMEDIAMETRICS_KEY_AUDIO_TRACK_ERROR AMEDIAMETRICS_KEY_PREFIX_AUDIO_TRACK "error"
+
/*
* MediaMetrics Properties are unified space for consistency and readability.
*/
@@ -115,6 +118,7 @@
#define AMEDIAMETRICS_PROP_DIRECTION "direction" // string AAudio input or output
#define AMEDIAMETRICS_PROP_DURATIONNS "durationNs" // int64 duration time span
#define AMEDIAMETRICS_PROP_ENCODING "encoding" // string value of format
+
#define AMEDIAMETRICS_PROP_EVENT "event#" // string value (often func name)
#define AMEDIAMETRICS_PROP_EXECUTIONTIMENS "executionTimeNs" // time to execute the event
@@ -146,7 +150,17 @@
#define AMEDIAMETRICS_PROP_STARTUPMS "startupMs" // double value
// State is "ACTIVE" or "STOPPED" for AudioRecord
#define AMEDIAMETRICS_PROP_STATE "state" // string
-#define AMEDIAMETRICS_PROP_STATUS "status" // int32 status_t
+#define AMEDIAMETRICS_PROP_STATUS "status#" // int32 status_t
+ // AAudio uses their own status codes
+// Supplemental information to the status code.
+#define AMEDIAMETRICS_PROP_STATUSSUBCODE "statusSubCode" // int32, specific code
+ // used in conjunction with status.
+#define AMEDIAMETRICS_PROP_STATUSMESSAGE "statusMessage" // string, supplemental info.
+ // Arbitrary information treated as
+ // informational, may be logcat msg,
+ // or an exception with stack trace.
+ // Treated as "debug" information.
+
#define AMEDIAMETRICS_PROP_STREAMTYPE "streamType" // string (AudioTrack)
#define AMEDIAMETRICS_PROP_THREADID "threadId" // int32 value io handle
#define AMEDIAMETRICS_PROP_THROTTLEMS "throttleMs" // double
@@ -215,4 +229,78 @@
#define AMEDIAMETRICS_PROP_CALLERNAME_VALUE_TONEGENERATOR "tonegenerator" // dial tones
#define AMEDIAMETRICS_PROP_CALLERNAME_VALUE_UNKNOWN "unknown" // callerName not set
+// MediaMetrics errors are expected to cover the following sources:
+// https://docs.oracle.com/javase/7/docs/api/java/lang/RuntimeException.html
+// https://docs.oracle.com/javase/7/docs/api/java/lang/Exception.html
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/native/libs/binder/include/binder/Status.h;drc=88e25c0861499ee3ab885814dddc097ab234cb7b;l=57
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/base/media/java/android/media/AudioSystem.java;drc=3ac246c43294d7f7012bdcb0ccb7bae1aa695bd4;l=785
+// https://cs.android.com/android/platform/superproject/+/master:frameworks/av/media/libaaudio/include/aaudio/AAudio.h;drc=cfd3a6fa3aaaf712a890dc02452b38ef401083b8;l=120
+// https://abseil.io/docs/cpp/guides/status-codes
+
+// Status errors:
+// An empty status string or "ok" is interpreted as no error.
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_OK "ok"
+
+// Error category: argument
+// IllegalArgumentException
+// NullPointerException
+// BAD_VALUE
+// absl::INVALID_ARGUMENT
+// absl::OUT_OF_RANGE
+// Out of range, out of bounds.
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT "argument"
+
+// Error category: io
+// IOException
+// android.os.DeadObjectException, android.os.RemoteException
+// DEAD_OBJECT
+// FAILED_TRANSACTION
+// IO_ERROR
+// file or ioctl failure
+// Service, rpc, binder, or socket failure.
+// Hardware or device failure.
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_IO "io"
+
+// Error category: outOfMemory
+// OutOfMemoryException
+// NO_MEMORY
+// absl::RESOURCE_EXHAUSTED
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY "memory"
+
+// Error category: security
+// SecurityException
+// PERMISSION_DENIED
+// absl::PERMISSION_DENIED
+// absl::UNAUTHENTICATED
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY "security"
+
+// Error category: state
+// IllegalStateException
+// UnsupportedOperationException
+// INVALID_OPERATION
+// NO_INIT
+// absl::NOT_FOUND
+// absl::ALREADY_EXISTS
+// absl::FAILED_PRECONDITION
+// absl::UNAVAILABLE
+// absl::UNIMPLEMENTED
+// Functionality not implemented (argument may or may not be correct).
+// Call unexpected or out of order.
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_STATE "state"
+
+// Error category: timeout
+// TimeoutException
+// WOULD_BLOCK
+// absl::DEADLINE_EXCEEDED
+// absl::ABORTED
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT "timeout"
+
+// Error category: unknown
+// Exception (Java specified not listed above, or custom app/service)
+// UNKNOWN_ERROR
+// absl::INTERNAL
+// absl::DATA_LOSS
+// Catch-all bucket for errors not listed above.
+#define AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN "unknown"
+
#endif // ANDROID_MEDIA_MEDIAMETRICSCONSTANTS_H
diff --git a/media/libmediametrics/include/media/MediaMetricsItem.h b/media/libmediametrics/include/media/MediaMetricsItem.h
index 428992c..de56665 100644
--- a/media/libmediametrics/include/media/MediaMetricsItem.h
+++ b/media/libmediametrics/include/media/MediaMetricsItem.h
@@ -27,6 +27,7 @@
#include <variant>
#include <binder/Parcel.h>
+#include <log/log.h>
#include <utils/Errors.h>
#include <utils/Timers.h> // nsecs_t
@@ -105,6 +106,36 @@
};
/*
+ * Helper for status conversions
+ */
+
+inline constexpr const char* statusToStatusString(status_t status) {
+ switch (status) {
+ case BAD_VALUE:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT;
+ case DEAD_OBJECT:
+ case FAILED_TRANSACTION:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_IO;
+ case NO_MEMORY:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY;
+ case PERMISSION_DENIED:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY;
+ case NO_INIT:
+ case INVALID_OPERATION:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_STATE;
+ case WOULD_BLOCK:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT;
+ default:
+ if (status >= 0) return AMEDIAMETRICS_PROP_STATUS_VALUE_OK; // non-negative values "OK"
+ [[fallthrough]]; // negative values are error.
+ case UNKNOWN_ERROR:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN;
+ }
+}
+
+status_t statusStringToStatus(const char *error);
+
+/*
* Time printing
*
* kPrintFormatLong time string is 19 characters (including null termination).
@@ -469,16 +500,16 @@
template <> // static
status_t extract(std::string *val, const char **bufferpptr, const char *bufferptrmax) {
const char *ptr = *bufferpptr;
- while (*ptr != 0) {
+ do {
if (ptr >= bufferptrmax) {
ALOGE("%s: buffer exceeded", __func__);
+ android_errorWriteLog(0x534e4554, "204445255");
return BAD_VALUE;
}
- ++ptr;
- }
- const size_t size = (ptr - *bufferpptr) + 1;
+ } while (*ptr++ != 0);
+ // ptr is terminator+1, == bufferptrmax if we finished entire buffer
*val = *bufferpptr;
- *bufferpptr += size;
+ *bufferpptr = ptr;
return NO_ERROR;
}
template <> // static
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 9f86544..5da32c9 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -35,6 +35,7 @@
#include <media/stagefright/FrameCaptureProcessor.h>
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MediaCodec.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaDefs.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
@@ -192,6 +193,13 @@
*dstBpp = 4;
return true;
}
+ case HAL_PIXEL_FORMAT_RGBA_1010102:
+ {
+ *dstFormat = (OMX_COLOR_FORMATTYPE)COLOR_Format32bitABGR2101010;
+ *captureFormat = ui::PixelFormat::RGBA_1010102;
+ *dstBpp = 4;
+ return true;
+ }
default:
{
ALOGE("Unsupported color format: %d", colorFormat);
@@ -523,8 +531,12 @@
return NULL;
}
- // TODO: Use Flexible color instead
- videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+ if (dstFormat() == COLOR_Format32bitABGR2101010) {
+ videoFormat->setInt32("color-format", COLOR_FormatYUVP010);
+ } else {
+ // TODO: Use Flexible color instead
+ videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+ }
// For the thumbnail extraction case, try to allocate single buffer in both
// input and output ports, if seeking to a sync frame. NOTE: This request may
@@ -632,6 +644,11 @@
crop_bottom = height - 1;
}
+ int32_t slice_height;
+ if (outputFormat->findInt32("slice-height", &slice_height) && slice_height > 0) {
+ height = slice_height;
+ }
+
if (mFrame == NULL) {
sp<IMemory> frameMem = allocVideoFrame(
trackMeta(),
@@ -793,8 +810,16 @@
if (overrideMeta == NULL) {
// check if we're dealing with a tiled heif
int32_t tileWidth, tileHeight, gridRows, gridCols;
+ int32_t widthColsProduct = 0;
+ int32_t heightRowsProduct = 0;
if (findGridInfo(trackMeta(), &tileWidth, &tileHeight, &gridRows, &gridCols)) {
- if (mWidth <= tileWidth * gridCols && mHeight <= tileHeight * gridRows) {
+ if (__builtin_mul_overflow(tileWidth, gridCols, &widthColsProduct) ||
+ __builtin_mul_overflow(tileHeight, gridRows, &heightRowsProduct)) {
+ ALOGE("Multiplication overflowed Grid size: %dx%d, Picture size: %dx%d",
+ gridCols, gridRows, tileWidth, tileHeight);
+ return nullptr;
+ }
+ if (mWidth <= widthColsProduct && mHeight <= heightRowsProduct) {
ALOGV("grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
@@ -823,8 +848,12 @@
return NULL;
}
- // TODO: Use Flexible color instead
- videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+ if (dstFormat() == COLOR_Format32bitABGR2101010) {
+ videoFormat->setInt32("color-format", COLOR_FormatYUVP010);
+ } else {
+ // TODO: Use Flexible color instead
+ videoFormat->setInt32("color-format", OMX_COLOR_FormatYUV420Planar);
+ }
if ((mGridRows == 1) && (mGridCols == 1)) {
videoFormat->setInt32("android._num-input-buffers", 1);
@@ -930,6 +959,11 @@
crop_bottom = height - 1;
}
+ int32_t slice_height;
+ if (outputFormat->findInt32("slice-height", &slice_height) && slice_height > 0) {
+ height = slice_height;
+ }
+
int32_t crop_width, crop_height;
crop_width = crop_right - crop_left + 1;
crop_height = crop_bottom - crop_top + 1;
diff --git a/media/libstagefright/MPEG4Writer.cpp b/media/libstagefright/MPEG4Writer.cpp
index 7c7fcac..df4ff47 100644
--- a/media/libstagefright/MPEG4Writer.cpp
+++ b/media/libstagefright/MPEG4Writer.cpp
@@ -36,6 +36,7 @@
#include <media/stagefright/MediaSource.h>
#include <media/stagefright/foundation/ADebug.h>
#include <media/stagefright/foundation/AMessage.h>
+#include <media/stagefright/foundation/ALookup.h>
#include <media/stagefright/foundation/AUtils.h>
#include <media/stagefright/foundation/ByteUtils.h>
#include <media/stagefright/foundation/ColorUtils.h>
@@ -44,6 +45,7 @@
#include <media/stagefright/MediaBuffer.h>
#include <media/stagefright/MetaData.h>
#include <media/stagefright/MediaDefs.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaErrors.h>
#include <media/stagefright/Utils.h>
#include <media/mediarecorder.h>
@@ -156,7 +158,7 @@
bool isHeic() const { return mIsHeic; }
bool isAudio() const { return mIsAudio; }
bool isMPEG4() const { return mIsMPEG4; }
- bool usePrefix() const { return mIsAvc || mIsHevc || mIsHeic; }
+ bool usePrefix() const { return mIsAvc || mIsHevc || mIsHeic || mIsDovi; }
bool isExifData(MediaBufferBase *buffer, uint32_t *tiffHdrOffset) const;
void addChunkOffset(off64_t offset);
void addItemOffsetAndSize(off64_t offset, size_t size, bool isExif);
@@ -164,6 +166,7 @@
TrackId& getTrackId() { return mTrackId; }
status_t dump(int fd, const Vector<String16>& args) const;
static const char *getFourCCForMime(const char *mime);
+ const char *getDoviFourCC() const;
const char *getTrackType() const;
void resetInternal();
int64_t trackMetaDataSize();
@@ -316,6 +319,7 @@
volatile bool mStarted;
bool mIsAvc;
bool mIsHevc;
+ bool mIsDovi;
bool mIsAudio;
bool mIsVideo;
bool mIsHeic;
@@ -370,6 +374,8 @@
uint8_t mProfileCompatible;
uint8_t mLevelIdc;
+ int32_t mDoviProfile;
+
void *mCodecSpecificData;
size_t mCodecSpecificDataSize;
bool mGotAllCodecSpecificData;
@@ -422,6 +428,8 @@
status_t parseHEVCCodecSpecificData(
const uint8_t *data, size_t size, HevcParameterSets ¶mSets);
+ status_t getDolbyVisionProfile();
+
// Track authoring progress status
void trackProgressStatus(int64_t timeUs, status_t err = OK);
void initTrackingProgressStatus(MetaData *params);
@@ -459,6 +467,7 @@
void writePaspBox();
void writeAvccBox();
void writeHvccBox();
+ void writeDoviConfigBox();
void writeUrlBox();
void writeDrefBox();
void writeDinfBox();
@@ -617,6 +626,17 @@
return OK;
}
+const char *MPEG4Writer::Track::getDoviFourCC() const {
+ if (mDoviProfile == DolbyVisionProfileDvheStn) {
+ return "dvh1";
+ } else if (mDoviProfile == DolbyVisionProfileDvheSt) {
+ return "hvc1";
+ } else if (mDoviProfile == DolbyVisionProfileDvavSe) {
+ return "avc1";
+ }
+ return nullptr;
+}
+
// static
const char *MPEG4Writer::Track::getFourCCForMime(const char *mime) {
if (mime == NULL) {
@@ -671,7 +691,14 @@
mIsBackgroundMode |= isBackgroundMode;
}
- if (Track::getFourCCForMime(mime) == NULL) {
+ if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
+ // For MEDIA_MIMETYPE_VIDEO_DOLBY_VISION,
+ // getFourCCForMime() requires profile information
+ // to decide the final FourCC codes.
+ // So we let the creation of the new track now and
+ // assign FourCC codes later using getDoviFourCC()
+ ALOGV("Add source mime '%s'", mime);
+ } else if (Track::getFourCCForMime(mime) == NULL) {
ALOGE("Unsupported mime '%s'", mime);
return ERROR_UNSUPPORTED;
}
@@ -2150,6 +2177,7 @@
mMinCttsOffsetTimeUs(0),
mMinCttsOffsetTicks(0),
mMaxCttsOffsetTicks(0),
+ mDoviProfile(0),
mCodecSpecificData(NULL),
mCodecSpecificDataSize(0),
mGotAllCodecSpecificData(false),
@@ -2176,6 +2204,7 @@
mMeta->findCString(kKeyMIMEType, &mime);
mIsAvc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
mIsHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
+ mIsDovi = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION);
mIsAudio = !strncasecmp(mime, "audio/", 6);
mIsVideo = !strncasecmp(mime, "video/", 6);
mIsHeic = !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC);
@@ -2610,7 +2639,12 @@
!strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC)) {
mMeta->findData(kKeyHVCC, &type, &data, &size);
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
- mMeta->findData(kKeyDVCC, &type, &data, &size);
+ getDolbyVisionProfile();
+ if (!mMeta->findData(kKeyAVCC, &type, &data, &size) &&
+ !mMeta->findData(kKeyHVCC, &type, &data, &size)) {
+ ALOGE("Failed: No HVCC/AVCC for Dolby Vision ..\n");
+ return;
+ }
} else if (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG4) ||
!strcasecmp(mime, MEDIA_MIMETYPE_AUDIO_AAC)) {
if (mMeta->findData(kKeyESDS, &type, &data, &size)) {
@@ -2651,6 +2685,7 @@
free(mCodecSpecificData);
mCodecSpecificData = NULL;
}
+
}
void MPEG4Writer::Track::initTrackingProgressStatus(MetaData *params) {
@@ -3329,6 +3364,40 @@
return OK;
}
+status_t MPEG4Writer::Track::getDolbyVisionProfile() {
+ uint32_t type;
+ const void *data = NULL;
+ size_t size = 0;
+
+ if (!mMeta->findData(kKeyDVCC, &type, &data, &size) &&
+ !mMeta->findData(kKeyDVVC, &type, &data, &size) &&
+ !mMeta->findData(kKeyDVWC, &type, &data, &size)) {
+ ALOGE("Failed getting Dovi config for Dolby Vision %d", (int)size);
+ return ERROR_MALFORMED;
+ }
+ static const ALookup<uint8_t, int32_t> dolbyVisionProfileMap = {
+ {1, DolbyVisionProfileDvavPen},
+ {3, DolbyVisionProfileDvheDen},
+ {4, DolbyVisionProfileDvheDtr},
+ {5, DolbyVisionProfileDvheStn},
+ {6, DolbyVisionProfileDvheDth},
+ {7, DolbyVisionProfileDvheDtb},
+ {8, DolbyVisionProfileDvheSt},
+ {9, DolbyVisionProfileDvavSe},
+ {10, DolbyVisionProfileDvav110}
+ };
+
+ // Dolby Vision profile information is extracted as per
+ // https://dolby.my.salesforce.com/sfc/p/#700000009YuG/a/4u000000l6FB/076wHYEmyEfz09m0V1bo85_25hlUJjaiWTbzorNmYY4
+ uint8_t dv_profile = ((((uint8_t *)data)[2] >> 1) & 0x7f);
+
+ if (!dolbyVisionProfileMap.map(dv_profile, &mDoviProfile)) {
+ ALOGE("Failed to get Dolby Profile from DV Config data");
+ return ERROR_MALFORMED;
+ }
+ return OK;
+}
+
/*
* Updates the drift time from the audio track so that
* the video track can get the updated drift time information
@@ -3474,8 +3543,27 @@
err = copyCodecSpecificData((const uint8_t *)buffer->data() + buffer->range_offset(),
buffer->range_length());
}
+ if (mIsDovi) {
+ err = getDolbyVisionProfile();
+ if(err == OK) {
+ const void *data = NULL;
+ size_t size = 0;
+ uint32_t type = 0;
+ if (mDoviProfile == DolbyVisionProfileDvavSe) {
+ mMeta->findData(kKeyAVCC, &type, &data, &size);
+ } else if (mDoviProfile < DolbyVisionProfileDvavSe) {
+ mMeta->findData(kKeyHVCC, &type, &data, &size);
+ } else {
+ ALOGW("DV Profiles > DolbyVisionProfileDvavSe are not supported");
+ err = ERROR_MALFORMED;
+ }
+ if (err == OK && data != NULL &&
+ copyCodecSpecificData((uint8_t *)data, size) == OK) {
+ mGotAllCodecSpecificData = true;
+ }
+ }
+ }
}
-
buffer->release();
buffer = NULL;
if (OK != err) {
@@ -4173,6 +4261,7 @@
!strcasecmp(MEDIA_MIMETYPE_VIDEO_MPEG4, mime) ||
!strcasecmp(MEDIA_MIMETYPE_VIDEO_AVC, mime) ||
!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime) ||
+ !strcasecmp(MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, mime) ||
!strcasecmp(MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC, mime)) {
if (!mCodecSpecificData ||
mCodecSpecificDataSize <= 0) {
@@ -4297,7 +4386,13 @@
const char *mime;
bool success = mMeta->findCString(kKeyMIMEType, &mime);
CHECK(success);
- const char *fourcc = getFourCCForMime(mime);
+ const char *fourcc;
+ if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_DOLBY_VISION)) {
+ fourcc = getDoviFourCC();
+ } else {
+ fourcc = getFourCCForMime(mime);
+ }
+
if (fourcc == NULL) {
ALOGE("Unknown mime type '%s'.", mime);
TRESPASS();
@@ -4337,6 +4432,15 @@
writeAvccBox();
} else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_HEVC, mime)) {
writeHvccBox();
+ } else if (!strcasecmp(MEDIA_MIMETYPE_VIDEO_DOLBY_VISION, mime)) {
+ if (mDoviProfile <= DolbyVisionProfileDvheSt) {
+ writeHvccBox();
+ } else if (mDoviProfile == DolbyVisionProfileDvavSe) {
+ writeAvccBox();
+ } else {
+ TRESPASS("Unsupported Dolby Vision profile");
+ }
+ writeDoviConfigBox();
}
writePaspBox();
@@ -4349,30 +4453,31 @@
memset(&aspects, 0, sizeof(aspects));
// Color metadata may have changed.
sp<MetaData> meta = mSource->getFormat();
- // TRICKY: using | instead of || because we want to execute all findInt32-s
- if (meta->findInt32(kKeyColorPrimaries, (int32_t*)&aspects.mPrimaries)
- | meta->findInt32(kKeyTransferFunction, (int32_t*)&aspects.mTransfer)
- | meta->findInt32(kKeyColorMatrix, (int32_t*)&aspects.mMatrixCoeffs)
- | meta->findInt32(kKeyColorRange, (int32_t*)&aspects.mRange)) {
- int32_t primaries, transfer, coeffs;
- bool fullRange;
- ALOGV("primaries=%s transfer=%s matrix=%s range=%s",
- asString(aspects.mPrimaries),
- asString(aspects.mTransfer),
- asString(aspects.mMatrixCoeffs),
- asString(aspects.mRange));
- ColorUtils::convertCodecColorAspectsToIsoAspects(
- aspects, &primaries, &transfer, &coeffs, &fullRange);
- mOwner->beginBox("colr");
- mOwner->writeFourcc("nclx");
- mOwner->writeInt16(primaries);
- mOwner->writeInt16(transfer);
- mOwner->writeInt16(coeffs);
- mOwner->writeInt8(int8_t(fullRange ? 0x80 : 0x0));
- mOwner->endBox(); // colr
- } else {
+ bool findPrimaries = meta->findInt32(kKeyColorPrimaries, (int32_t*)&aspects.mPrimaries);
+ bool findTransfer = meta->findInt32(kKeyTransferFunction, (int32_t*)&aspects.mTransfer);
+ bool findMatrix = meta->findInt32(kKeyColorMatrix, (int32_t*)&aspects.mMatrixCoeffs);
+ bool findRange = meta->findInt32(kKeyColorRange, (int32_t*)&aspects.mRange);
+ if (!findPrimaries && !findTransfer && !findMatrix && !findRange) {
ALOGV("no color information");
+ return;
}
+
+ int32_t primaries, transfer, coeffs;
+ bool fullRange;
+ ALOGV("primaries=%s transfer=%s matrix=%s range=%s",
+ asString(aspects.mPrimaries),
+ asString(aspects.mTransfer),
+ asString(aspects.mMatrixCoeffs),
+ asString(aspects.mRange));
+ ColorUtils::convertCodecColorAspectsToIsoAspects(
+ aspects, &primaries, &transfer, &coeffs, &fullRange);
+ mOwner->beginBox("colr");
+ mOwner->writeFourcc("nclx");
+ mOwner->writeInt16(primaries);
+ mOwner->writeInt16(transfer);
+ mOwner->writeInt16(coeffs);
+ mOwner->writeInt8(int8_t(fullRange ? 0x80 : 0x0));
+ mOwner->endBox(); // colr
}
void MPEG4Writer::Track::writeAudioFourCCBox() {
@@ -4829,12 +4934,11 @@
mOwner->endBox(); // avcC
}
-
void MPEG4Writer::Track::writeHvccBox() {
CHECK(mCodecSpecificData);
CHECK_GE(mCodecSpecificDataSize, 5u);
- // Patch avcc's lengthSize field to match the number
+ // Patch hvcc's lengthSize field to match the number
// of bytes we use to indicate the size of a nal unit.
uint8_t *ptr = (uint8_t *)mCodecSpecificData;
ptr[21] = (ptr[21] & 0xfc) | (mOwner->useNalLengthFour() ? 3 : 1);
@@ -4843,6 +4947,32 @@
mOwner->endBox(); // hvcC
}
+void MPEG4Writer::Track::writeDoviConfigBox() {
+ CHECK_NE(mDoviProfile, 0u);
+
+ uint32_t type = 0;
+ const void *data = nullptr;
+ size_t size = 0;
+ // check to see which key has the configuration box.
+ if (mMeta->findData(kKeyDVCC, &type, &data, &size) ||
+ mMeta->findData(kKeyDVVC, &type, &data, &size) ||
+ mMeta->findData(kKeyDVWC, &type, &data, &size)) {
+
+ // if this box is present we write the box, or
+ // this mp4 will be interpreted as a backward
+ // compatible stream.
+ if (mDoviProfile > DolbyVisionProfileDvav110) {
+ mOwner->beginBox("dvwC");
+ } else if (mDoviProfile > DolbyVisionProfileDvheDtb) {
+ mOwner->beginBox("dvvC");
+ } else {
+ mOwner->beginBox("dvcC");
+ }
+ mOwner->write(data, size);
+ mOwner->endBox(); // dvwC/dvvC/dvcC
+ }
+}
+
void MPEG4Writer::Track::writeD263Box() {
mOwner->beginBox("d263");
mOwner->writeInt32(0); // vendor
diff --git a/media/libstagefright/MediaCodec.cpp b/media/libstagefright/MediaCodec.cpp
index 2851dc4..e42b538 100644
--- a/media/libstagefright/MediaCodec.cpp
+++ b/media/libstagefright/MediaCodec.cpp
@@ -112,6 +112,13 @@
static const char *kCodecCaptureRate = "android.media.mediacodec.capture-rate";
static const char *kCodecOperatingRate = "android.media.mediacodec.operating-rate";
static const char *kCodecPriority = "android.media.mediacodec.priority";
+static const char *kCodecConfigColorStandard = "android.media.mediacodec.config-color-standard";
+static const char *kCodecConfigColorRange = "android.media.mediacodec.config-color-range";
+static const char *kCodecConfigColorTransfer = "android.media.mediacodec.config-color-transfer";
+static const char *kCodecParsedColorStandard = "android.media.mediacodec.parsed-color-standard";
+static const char *kCodecParsedColorRange = "android.media.mediacodec.parsed-color-range";
+static const char *kCodecParsedColorTransfer = "android.media.mediacodec.parsed-color-transfer";
+static const char *kCodecHDRMetadataFlags = "android.media.mediacodec.hdr-metadata-flags";
// Min/Max QP before shaping
static const char *kCodecOriginalVideoQPIMin = "android.media.mediacodec.original-video-qp-i-min";
@@ -748,6 +755,7 @@
mVideoWidth(0),
mVideoHeight(0),
mRotationDegrees(0),
+ mHDRMetadataFlags(0),
mDequeueInputTimeoutGeneration(0),
mDequeueInputReplyID(0),
mDequeueOutputTimeoutGeneration(0),
@@ -898,6 +906,8 @@
mediametrics_setInt64(mMetricsHandle, kCodecFirstFrameIndexLowLatencyModeOn,
mIndexOfFirstFrameWhenLowLatencyOn);
}
+
+ mediametrics_setInt32(mMetricsHandle, kCodecHDRMetadataFlags, mHDRMetadataFlags);
#if 0
// enable for short term, only while debugging
updateEphemeralMediametrics(mMetricsHandle);
@@ -1511,6 +1521,9 @@
uint32_t flags) {
sp<AMessage> msg = new AMessage(kWhatConfigure, this);
+ // TODO: validity check log-session-id: it should be a 32-hex-digit.
+ format->findString("log-session-id", &mLogSessionId);
+
if (mMetricsHandle != 0) {
int32_t profile = 0;
if (format->findInt32("profile", &profile)) {
@@ -1522,11 +1535,11 @@
}
mediametrics_setInt32(mMetricsHandle, kCodecEncoder,
(flags & CONFIGURE_FLAG_ENCODE) ? 1 : 0);
+
+ mediametrics_setCString(mMetricsHandle, kCodecLogSessionId, mLogSessionId.c_str());
}
if (mIsVideo) {
- // TODO: validity check log-session-id: it should be a 32-hex-digit.
- format->findString("log-session-id", &mLogSessionId);
format->findInt32("width", &mVideoWidth);
format->findInt32("height", &mVideoHeight);
if (!format->findInt32("rotation-degrees", &mRotationDegrees)) {
@@ -1534,7 +1547,6 @@
}
if (mMetricsHandle != 0) {
- mediametrics_setCString(mMetricsHandle, kCodecLogSessionId, mLogSessionId.c_str());
mediametrics_setInt32(mMetricsHandle, kCodecWidth, mVideoWidth);
mediametrics_setInt32(mMetricsHandle, kCodecHeight, mVideoHeight);
mediametrics_setInt32(mMetricsHandle, kCodecRotation, mRotationDegrees);
@@ -1566,6 +1578,23 @@
if (format->findInt32("priority", &priority)) {
mediametrics_setInt32(mMetricsHandle, kCodecPriority, priority);
}
+ int32_t colorStandard = -1;
+ if (format->findInt32(KEY_COLOR_STANDARD, &colorStandard)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecConfigColorStandard, colorStandard);
+ }
+ int32_t colorRange = -1;
+ if (format->findInt32(KEY_COLOR_RANGE, &colorRange)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecConfigColorRange, colorRange);
+ }
+ int32_t colorTransfer = -1;
+ if (format->findInt32(KEY_COLOR_TRANSFER, &colorTransfer)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecConfigColorTransfer, colorTransfer);
+ }
+ HDRStaticInfo info;
+ if (ColorUtils::getHDRStaticInfoFromFormat(format, &info)
+ && ColorUtils::isHDRStaticInfoValid(&info)) {
+ mHDRMetadataFlags |= kFlagHDRStaticInfo;
+ }
}
// Prevent possible integer overflow in downstream code.
@@ -3032,10 +3061,8 @@
case STOPPING:
{
if (mFlags & kFlagSawMediaServerDie) {
- bool postPendingReplies = true;
if (mState == RELEASING && !mReplyID) {
ALOGD("Releasing asynchronously, so nothing to reply here.");
- postPendingReplies = false;
}
// MediaServer died, there definitely won't
// be a shutdown complete notification after
@@ -3048,8 +3075,11 @@
if (mState == RELEASING) {
mComponentName.clear();
}
- if (postPendingReplies) {
+ if (mReplyID) {
postPendingRepliesAndDeferredMessages(origin + ":dead");
+ } else {
+ ALOGD("no pending replies: %s:dead following %s",
+ origin.c_str(), mLastReplyOrigin.c_str());
}
sendErrorResponse = false;
} else if (!mReplyID) {
@@ -4516,6 +4546,9 @@
HDRStaticInfo info;
if (ColorUtils::getHDRStaticInfoFromFormat(mOutputFormat, &info)) {
setNativeWindowHdrMetadata(mSurface.get(), &info);
+ if (ColorUtils::isHDRStaticInfoValid(&info)) {
+ mHDRMetadataFlags |= kFlagHDRStaticInfo;
+ }
}
}
@@ -4524,6 +4557,7 @@
&& hdr10PlusInfo != nullptr && hdr10PlusInfo->size() > 0) {
native_window_set_buffers_hdr10_plus_metadata(mSurface.get(),
hdr10PlusInfo->size(), hdr10PlusInfo->data());
+ mHDRMetadataFlags |= kFlagHDR10PlusInfo;
}
if (mime.startsWithIgnoreCase("video/")) {
@@ -4568,6 +4602,21 @@
mCrypto->notifyResolution(width, height);
}
}
+
+ if (mMetricsHandle != 0) {
+ int32_t colorStandard = -1;
+ if (format->findInt32(KEY_COLOR_STANDARD, &colorStandard)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecParsedColorStandard, colorStandard);
+ }
+ int32_t colorRange = -1;
+ if (format->findInt32( KEY_COLOR_RANGE, &colorRange)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecParsedColorRange, colorRange);
+ }
+ int32_t colorTransfer = -1;
+ if (format->findInt32(KEY_COLOR_TRANSFER, &colorTransfer)) {
+ mediametrics_setInt32(mMetricsHandle, kCodecParsedColorTransfer, colorTransfer);
+ }
+ }
}
void MediaCodec::extractCSD(const sp<AMessage> &format) {
diff --git a/media/libstagefright/TEST_MAPPING b/media/libstagefright/TEST_MAPPING
index 53181cc..22885c9 100644
--- a/media/libstagefright/TEST_MAPPING
+++ b/media/libstagefright/TEST_MAPPING
@@ -13,7 +13,7 @@
"presubmit-large": [
{
- "name": "CtsMediaTestCases",
+ "name": "CtsMediaMiscTestCases",
"options": [
{
"include-annotation": "android.platform.test.annotations.Presubmit"
@@ -42,6 +42,17 @@
]
},
{
+ "name": "CtsMediaDecoderTestCases",
+ "options": [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ },
+ {
+ "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+ }
+ ]
+ },
+ {
"name": "CtsMediaEncoderTestCases",
"options": [
{
@@ -53,6 +64,17 @@
]
},
{
+ "name": "CtsMediaCodecTestCases",
+ "options": [
+ {
+ "include-annotation": "android.platform.test.annotations.Presubmit"
+ },
+ {
+ "exclude-annotation": "android.platform.test.annotations.RequiresDevice"
+ }
+ ]
+ },
+ {
"name": "CtsMediaPlayerTestCases",
"options": [
{
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index a6df5bb..4b6470a 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -28,9 +28,6 @@
#include "include/HevcUtils.h"
#include <cutils/properties.h>
-#include <media/openmax/OMX_Audio.h>
-#include <media/openmax/OMX_Video.h>
-#include <media/openmax/OMX_VideoExt.h>
#include <media/stagefright/CodecBase.h>
#include <media/stagefright/foundation/ABuffer.h>
#include <media/stagefright/foundation/ADebug.h>
@@ -57,6 +54,14 @@
#define AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS \
"mpegh-compatible-sets"
+namespace {
+ // TODO: this should possibly be handled in an else
+ constexpr static int32_t AACObjectNull = 0;
+
+ // TODO: decide if we should just not transmit the level in this case
+ constexpr static int32_t DolbyVisionLevelUnknown = 0;
+}
+
namespace android {
static status_t copyNALUToABuffer(sp<ABuffer> *buffer, const uint8_t *ptr, size_t length) {
@@ -156,21 +161,22 @@
audioObjectType >>= 11;
}
- const static ALookup<uint16_t, OMX_AUDIO_AACPROFILETYPE> profiles {
- { 1, OMX_AUDIO_AACObjectMain },
- { 2, OMX_AUDIO_AACObjectLC },
- { 3, OMX_AUDIO_AACObjectSSR },
- { 4, OMX_AUDIO_AACObjectLTP },
- { 5, OMX_AUDIO_AACObjectHE },
- { 6, OMX_AUDIO_AACObjectScalable },
- { 17, OMX_AUDIO_AACObjectERLC },
- { 23, OMX_AUDIO_AACObjectLD },
- { 29, OMX_AUDIO_AACObjectHE_PS },
- { 39, OMX_AUDIO_AACObjectELD },
- { 42, OMX_AUDIO_AACObjectXHE },
+
+ const static ALookup<uint16_t, int32_t> profiles {
+ { 1, AACObjectMain },
+ { 2, AACObjectLC },
+ { 3, AACObjectSSR },
+ { 4, AACObjectLTP },
+ { 5, AACObjectHE },
+ { 6, AACObjectScalable },
+ { 17, AACObjectERLC },
+ { 23, AACObjectLD },
+ { 29, AACObjectHE_PS },
+ { 39, AACObjectELD },
+ { 42, AACObjectXHE },
};
- OMX_AUDIO_AACPROFILETYPE profile;
+ int32_t profile;
if (profiles.map(audioObjectType, &profile)) {
format->setInt32("profile", profile);
}
@@ -184,59 +190,92 @@
const uint8_t constraints = ptr[2];
const uint8_t level = ptr[3];
- const static ALookup<uint8_t, OMX_VIDEO_AVCLEVELTYPE> levels {
- { 9, OMX_VIDEO_AVCLevel1b }, // technically, 9 is only used for High+ profiles
- { 10, OMX_VIDEO_AVCLevel1 },
- { 11, OMX_VIDEO_AVCLevel11 }, // prefer level 1.1 for the value 11
- { 11, OMX_VIDEO_AVCLevel1b },
- { 12, OMX_VIDEO_AVCLevel12 },
- { 13, OMX_VIDEO_AVCLevel13 },
- { 20, OMX_VIDEO_AVCLevel2 },
- { 21, OMX_VIDEO_AVCLevel21 },
- { 22, OMX_VIDEO_AVCLevel22 },
- { 30, OMX_VIDEO_AVCLevel3 },
- { 31, OMX_VIDEO_AVCLevel31 },
- { 32, OMX_VIDEO_AVCLevel32 },
- { 40, OMX_VIDEO_AVCLevel4 },
- { 41, OMX_VIDEO_AVCLevel41 },
- { 42, OMX_VIDEO_AVCLevel42 },
- { 50, OMX_VIDEO_AVCLevel5 },
- { 51, OMX_VIDEO_AVCLevel51 },
- { 52, OMX_VIDEO_AVCLevel52 },
- { 60, OMX_VIDEO_AVCLevel6 },
- { 61, OMX_VIDEO_AVCLevel61 },
- { 62, OMX_VIDEO_AVCLevel62 },
+ const static ALookup<uint8_t, int32_t> levels {
+ { 9, AVCLevel1b }, // technically, 9 is only used for High+ profiles
+ { 10, AVCLevel1 },
+ { 11, AVCLevel11 }, // prefer level 1.1 for the value 11
+ { 11, AVCLevel1b },
+ { 12, AVCLevel12 },
+ { 13, AVCLevel13 },
+ { 20, AVCLevel2 },
+ { 21, AVCLevel21 },
+ { 22, AVCLevel22 },
+ { 30, AVCLevel3 },
+ { 31, AVCLevel31 },
+ { 32, AVCLevel32 },
+ { 40, AVCLevel4 },
+ { 41, AVCLevel41 },
+ { 42, AVCLevel42 },
+ { 50, AVCLevel5 },
+ { 51, AVCLevel51 },
+ { 52, AVCLevel52 },
+ { 60, AVCLevel6 },
+ { 61, AVCLevel61 },
+ { 62, AVCLevel62 },
};
- const static ALookup<uint8_t, OMX_VIDEO_AVCPROFILETYPE> profiles {
- { 66, OMX_VIDEO_AVCProfileBaseline },
- { 77, OMX_VIDEO_AVCProfileMain },
- { 88, OMX_VIDEO_AVCProfileExtended },
- { 100, OMX_VIDEO_AVCProfileHigh },
- { 110, OMX_VIDEO_AVCProfileHigh10 },
- { 122, OMX_VIDEO_AVCProfileHigh422 },
- { 244, OMX_VIDEO_AVCProfileHigh444 },
+ const static ALookup<uint8_t, int32_t> profiles {
+ { 66, AVCProfileBaseline },
+ { 77, AVCProfileMain },
+ { 88, AVCProfileExtended },
+ { 100, AVCProfileHigh },
+ { 110, AVCProfileHigh10 },
+ { 122, AVCProfileHigh422 },
+ { 244, AVCProfileHigh444 },
};
// set profile & level if they are recognized
- OMX_VIDEO_AVCPROFILETYPE codecProfile;
- OMX_VIDEO_AVCLEVELTYPE codecLevel;
+ int32_t codecProfile;
+ int32_t codecLevel;
if (profiles.map(profile, &codecProfile)) {
if (profile == 66 && (constraints & 0x40)) {
- codecProfile = (OMX_VIDEO_AVCPROFILETYPE)OMX_VIDEO_AVCProfileConstrainedBaseline;
+ codecProfile = AVCProfileConstrainedBaseline;
} else if (profile == 100 && (constraints & 0x0C) == 0x0C) {
- codecProfile = (OMX_VIDEO_AVCPROFILETYPE)OMX_VIDEO_AVCProfileConstrainedHigh;
+ codecProfile = AVCProfileConstrainedHigh;
}
format->setInt32("profile", codecProfile);
if (levels.map(level, &codecLevel)) {
// for 9 && 11 decide level based on profile and constraint_set3 flag
if (level == 11 && (profile == 66 || profile == 77 || profile == 88)) {
- codecLevel = (constraints & 0x10) ? OMX_VIDEO_AVCLevel1b : OMX_VIDEO_AVCLevel11;
+ codecLevel = (constraints & 0x10) ? AVCLevel1b : AVCLevel11;
}
format->setInt32("level", codecLevel);
}
}
}
+static const ALookup<uint8_t, int32_t>& getDolbyVisionProfileTable() {
+ static const ALookup<uint8_t, int32_t> profileTable = {
+ {1, DolbyVisionProfileDvavPen},
+ {3, DolbyVisionProfileDvheDen},
+ {4, DolbyVisionProfileDvheDtr},
+ {5, DolbyVisionProfileDvheStn},
+ {6, DolbyVisionProfileDvheDth},
+ {7, DolbyVisionProfileDvheDtb},
+ {8, DolbyVisionProfileDvheSt},
+ {9, DolbyVisionProfileDvavSe},
+ {10, DolbyVisionProfileDvav110},
+ };
+ return profileTable;
+}
+
+static const ALookup<uint8_t, int32_t>& getDolbyVisionLevelsTable() {
+ static const ALookup<uint8_t, int32_t> levelsTable = {
+ {0, DolbyVisionLevelUnknown},
+ {1, DolbyVisionLevelHd24},
+ {2, DolbyVisionLevelHd30},
+ {3, DolbyVisionLevelFhd24},
+ {4, DolbyVisionLevelFhd30},
+ {5, DolbyVisionLevelFhd60},
+ {6, DolbyVisionLevelUhd24},
+ {7, DolbyVisionLevelUhd30},
+ {8, DolbyVisionLevelUhd48},
+ {9, DolbyVisionLevelUhd60},
+ {10, DolbyVisionLevelUhd120},
+ {11, DolbyVisionLevel8k30},
+ {12, DolbyVisionLevel8k60},
+ };
+ return levelsTable;
+}
static void parseDolbyVisionProfileLevelFromDvcc(const uint8_t *ptr, size_t size, sp<AMessage> &format) {
// dv_major.dv_minor Should be 1.0 or 2.1
if (size != 24 || ((ptr[0] != 1 || ptr[1] != 0) && (ptr[0] != 2 || ptr[1] != 1))) {
@@ -256,41 +295,20 @@
// All Dolby Profiles will have profile and level info in MediaFormat
// Profile 8 and 9 will have bl_compatibility_id too.
- const static ALookup<uint8_t, OMX_VIDEO_DOLBYVISIONPROFILETYPE> profiles{
- {1, OMX_VIDEO_DolbyVisionProfileDvavPen},
- {3, OMX_VIDEO_DolbyVisionProfileDvheDen},
- {4, OMX_VIDEO_DolbyVisionProfileDvheDtr},
- {5, OMX_VIDEO_DolbyVisionProfileDvheStn},
- {6, OMX_VIDEO_DolbyVisionProfileDvheDth},
- {7, OMX_VIDEO_DolbyVisionProfileDvheDtb},
- {8, OMX_VIDEO_DolbyVisionProfileDvheSt},
- {9, OMX_VIDEO_DolbyVisionProfileDvavSe},
- {10, OMX_VIDEO_DolbyVisionProfileDvav110},
- };
+ const ALookup<uint8_t, int32_t> &profiles = getDolbyVisionProfileTable();
+ const ALookup<uint8_t, int32_t> &levels = getDolbyVisionLevelsTable();
- const static ALookup<uint8_t, OMX_VIDEO_DOLBYVISIONLEVELTYPE> levels{
- {0, OMX_VIDEO_DolbyVisionLevelUnknown},
- {1, OMX_VIDEO_DolbyVisionLevelHd24},
- {2, OMX_VIDEO_DolbyVisionLevelHd30},
- {3, OMX_VIDEO_DolbyVisionLevelFhd24},
- {4, OMX_VIDEO_DolbyVisionLevelFhd30},
- {5, OMX_VIDEO_DolbyVisionLevelFhd60},
- {6, OMX_VIDEO_DolbyVisionLevelUhd24},
- {7, OMX_VIDEO_DolbyVisionLevelUhd30},
- {8, OMX_VIDEO_DolbyVisionLevelUhd48},
- {9, OMX_VIDEO_DolbyVisionLevelUhd60},
- };
// set rpuAssoc
if (rpu_present_flag && el_present_flag && !bl_present_flag) {
format->setInt32("rpuAssoc", 1);
}
// set profile & level if they are recognized
- OMX_VIDEO_DOLBYVISIONPROFILETYPE codecProfile;
- OMX_VIDEO_DOLBYVISIONLEVELTYPE codecLevel;
+ int32_t codecProfile;
+ int32_t codecLevel;
if (profiles.map(profile, &codecProfile)) {
format->setInt32("profile", codecProfile);
- if (codecProfile == OMX_VIDEO_DolbyVisionProfileDvheSt ||
- codecProfile == OMX_VIDEO_DolbyVisionProfileDvavSe) {
+ if (codecProfile == DolbyVisionProfileDvheSt ||
+ codecProfile == DolbyVisionProfileDvavSe) {
format->setInt32("bl_compatibility_id", bl_compatibility_id);
}
if (levels.map(level, &codecLevel)) {
@@ -307,32 +325,32 @@
const uint8_t profile = ptr[6];
const uint8_t level = ptr[5];
- const static ALookup<uint8_t, OMX_VIDEO_H263PROFILETYPE> profiles {
- { 0, OMX_VIDEO_H263ProfileBaseline },
- { 1, OMX_VIDEO_H263ProfileH320Coding },
- { 2, OMX_VIDEO_H263ProfileBackwardCompatible },
- { 3, OMX_VIDEO_H263ProfileISWV2 },
- { 4, OMX_VIDEO_H263ProfileISWV3 },
- { 5, OMX_VIDEO_H263ProfileHighCompression },
- { 6, OMX_VIDEO_H263ProfileInternet },
- { 7, OMX_VIDEO_H263ProfileInterlace },
- { 8, OMX_VIDEO_H263ProfileHighLatency },
+ const static ALookup<uint8_t, int32_t> profiles {
+ { 0, H263ProfileBaseline },
+ { 1, H263ProfileH320Coding },
+ { 2, H263ProfileBackwardCompatible },
+ { 3, H263ProfileISWV2 },
+ { 4, H263ProfileISWV3 },
+ { 5, H263ProfileHighCompression },
+ { 6, H263ProfileInternet },
+ { 7, H263ProfileInterlace },
+ { 8, H263ProfileHighLatency },
};
- const static ALookup<uint8_t, OMX_VIDEO_H263LEVELTYPE> levels {
- { 10, OMX_VIDEO_H263Level10 },
- { 20, OMX_VIDEO_H263Level20 },
- { 30, OMX_VIDEO_H263Level30 },
- { 40, OMX_VIDEO_H263Level40 },
- { 45, OMX_VIDEO_H263Level45 },
- { 50, OMX_VIDEO_H263Level50 },
- { 60, OMX_VIDEO_H263Level60 },
- { 70, OMX_VIDEO_H263Level70 },
+ const static ALookup<uint8_t, int32_t> levels {
+ { 10, H263Level10 },
+ { 20, H263Level20 },
+ { 30, H263Level30 },
+ { 40, H263Level40 },
+ { 45, H263Level45 },
+ { 50, H263Level50 },
+ { 60, H263Level60 },
+ { 70, H263Level70 },
};
// set profile & level if they are recognized
- OMX_VIDEO_H263PROFILETYPE codecProfile;
- OMX_VIDEO_H263LEVELTYPE codecLevel;
+ int32_t codecProfile;
+ int32_t codecLevel;
if (profiles.map(profile, &codecProfile)) {
format->setInt32("profile", codecProfile);
if (levels.map(level, &codecLevel)) {
@@ -350,59 +368,59 @@
const uint8_t tier = (ptr[1] & 0x20) >> 5;
const uint8_t level = ptr[12];
- const static ALookup<std::pair<uint8_t, uint8_t>, OMX_VIDEO_HEVCLEVELTYPE> levels {
- { { 0, 30 }, OMX_VIDEO_HEVCMainTierLevel1 },
- { { 0, 60 }, OMX_VIDEO_HEVCMainTierLevel2 },
- { { 0, 63 }, OMX_VIDEO_HEVCMainTierLevel21 },
- { { 0, 90 }, OMX_VIDEO_HEVCMainTierLevel3 },
- { { 0, 93 }, OMX_VIDEO_HEVCMainTierLevel31 },
- { { 0, 120 }, OMX_VIDEO_HEVCMainTierLevel4 },
- { { 0, 123 }, OMX_VIDEO_HEVCMainTierLevel41 },
- { { 0, 150 }, OMX_VIDEO_HEVCMainTierLevel5 },
- { { 0, 153 }, OMX_VIDEO_HEVCMainTierLevel51 },
- { { 0, 156 }, OMX_VIDEO_HEVCMainTierLevel52 },
- { { 0, 180 }, OMX_VIDEO_HEVCMainTierLevel6 },
- { { 0, 183 }, OMX_VIDEO_HEVCMainTierLevel61 },
- { { 0, 186 }, OMX_VIDEO_HEVCMainTierLevel62 },
- { { 1, 30 }, OMX_VIDEO_HEVCHighTierLevel1 },
- { { 1, 60 }, OMX_VIDEO_HEVCHighTierLevel2 },
- { { 1, 63 }, OMX_VIDEO_HEVCHighTierLevel21 },
- { { 1, 90 }, OMX_VIDEO_HEVCHighTierLevel3 },
- { { 1, 93 }, OMX_VIDEO_HEVCHighTierLevel31 },
- { { 1, 120 }, OMX_VIDEO_HEVCHighTierLevel4 },
- { { 1, 123 }, OMX_VIDEO_HEVCHighTierLevel41 },
- { { 1, 150 }, OMX_VIDEO_HEVCHighTierLevel5 },
- { { 1, 153 }, OMX_VIDEO_HEVCHighTierLevel51 },
- { { 1, 156 }, OMX_VIDEO_HEVCHighTierLevel52 },
- { { 1, 180 }, OMX_VIDEO_HEVCHighTierLevel6 },
- { { 1, 183 }, OMX_VIDEO_HEVCHighTierLevel61 },
- { { 1, 186 }, OMX_VIDEO_HEVCHighTierLevel62 },
+ const static ALookup<std::pair<uint8_t, uint8_t>, int32_t> levels {
+ { { 0, 30 }, HEVCMainTierLevel1 },
+ { { 0, 60 }, HEVCMainTierLevel2 },
+ { { 0, 63 }, HEVCMainTierLevel21 },
+ { { 0, 90 }, HEVCMainTierLevel3 },
+ { { 0, 93 }, HEVCMainTierLevel31 },
+ { { 0, 120 }, HEVCMainTierLevel4 },
+ { { 0, 123 }, HEVCMainTierLevel41 },
+ { { 0, 150 }, HEVCMainTierLevel5 },
+ { { 0, 153 }, HEVCMainTierLevel51 },
+ { { 0, 156 }, HEVCMainTierLevel52 },
+ { { 0, 180 }, HEVCMainTierLevel6 },
+ { { 0, 183 }, HEVCMainTierLevel61 },
+ { { 0, 186 }, HEVCMainTierLevel62 },
+ { { 1, 30 }, HEVCHighTierLevel1 },
+ { { 1, 60 }, HEVCHighTierLevel2 },
+ { { 1, 63 }, HEVCHighTierLevel21 },
+ { { 1, 90 }, HEVCHighTierLevel3 },
+ { { 1, 93 }, HEVCHighTierLevel31 },
+ { { 1, 120 }, HEVCHighTierLevel4 },
+ { { 1, 123 }, HEVCHighTierLevel41 },
+ { { 1, 150 }, HEVCHighTierLevel5 },
+ { { 1, 153 }, HEVCHighTierLevel51 },
+ { { 1, 156 }, HEVCHighTierLevel52 },
+ { { 1, 180 }, HEVCHighTierLevel6 },
+ { { 1, 183 }, HEVCHighTierLevel61 },
+ { { 1, 186 }, HEVCHighTierLevel62 },
};
- const static ALookup<uint8_t, OMX_VIDEO_HEVCPROFILETYPE> profiles {
- { 1, OMX_VIDEO_HEVCProfileMain },
- { 2, OMX_VIDEO_HEVCProfileMain10 },
+ const static ALookup<uint8_t, int32_t> profiles {
+ { 1, HEVCProfileMain },
+ { 2, HEVCProfileMain10 },
// use Main for Main Still Picture decoding
- { 3, OMX_VIDEO_HEVCProfileMain },
+ { 3, HEVCProfileMain },
};
// set profile & level if they are recognized
- OMX_VIDEO_HEVCPROFILETYPE codecProfile;
- OMX_VIDEO_HEVCLEVELTYPE codecLevel;
+ int32_t codecProfile;
+ int32_t codecLevel;
if (!profiles.map(profile, &codecProfile)) {
if (ptr[2] & 0x40 /* general compatibility flag 1 */) {
// Note that this case covers Main Still Picture too
- codecProfile = OMX_VIDEO_HEVCProfileMain;
+ codecProfile = HEVCProfileMain;
} else if (ptr[2] & 0x20 /* general compatibility flag 2 */) {
- codecProfile = OMX_VIDEO_HEVCProfileMain10;
+ codecProfile = HEVCProfileMain10;
} else {
return;
}
}
// bump to HDR profile
- if (isHdr(format) && codecProfile == OMX_VIDEO_HEVCProfileMain10) {
- codecProfile = OMX_VIDEO_HEVCProfileMain10HDR10;
+ if (isHdr(format) && codecProfile == HEVCProfileMain10) {
+ codecProfile = HEVCProfileMain10HDR10;
}
format->setInt32("profile", codecProfile);
@@ -422,36 +440,36 @@
}
const uint8_t indication = ((seq[4] & 0xF) << 4) | ((seq[5] & 0xF0) >> 4);
- const static ALookup<uint8_t, OMX_VIDEO_MPEG2PROFILETYPE> profiles {
- { 0x50, OMX_VIDEO_MPEG2ProfileSimple },
- { 0x40, OMX_VIDEO_MPEG2ProfileMain },
- { 0x30, OMX_VIDEO_MPEG2ProfileSNR },
- { 0x20, OMX_VIDEO_MPEG2ProfileSpatial },
- { 0x10, OMX_VIDEO_MPEG2ProfileHigh },
+ const static ALookup<uint8_t, int32_t> profiles {
+ { 0x50, MPEG2ProfileSimple },
+ { 0x40, MPEG2ProfileMain },
+ { 0x30, MPEG2ProfileSNR },
+ { 0x20, MPEG2ProfileSpatial },
+ { 0x10, MPEG2ProfileHigh },
};
- const static ALookup<uint8_t, OMX_VIDEO_MPEG2LEVELTYPE> levels {
- { 0x0A, OMX_VIDEO_MPEG2LevelLL },
- { 0x08, OMX_VIDEO_MPEG2LevelML },
- { 0x06, OMX_VIDEO_MPEG2LevelH14 },
- { 0x04, OMX_VIDEO_MPEG2LevelHL },
- { 0x02, OMX_VIDEO_MPEG2LevelHP },
+ const static ALookup<uint8_t, int32_t> levels {
+ { 0x0A, MPEG2LevelLL },
+ { 0x08, MPEG2LevelML },
+ { 0x06, MPEG2LevelH14 },
+ { 0x04, MPEG2LevelHL },
+ { 0x02, MPEG2LevelHP },
};
const static ALookup<uint8_t,
- std::pair<OMX_VIDEO_MPEG2PROFILETYPE, OMX_VIDEO_MPEG2LEVELTYPE>> escapes {
+ std::pair<int32_t, int32_t>> escapes {
/* unsupported
- { 0x8E, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelLL } },
- { 0x8D, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelML } },
- { 0x8B, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelH14 } },
- { 0x8A, { XXX_MPEG2ProfileMultiView, OMX_VIDEO_MPEG2LevelHL } }, */
- { 0x85, { OMX_VIDEO_MPEG2Profile422, OMX_VIDEO_MPEG2LevelML } },
- { 0x82, { OMX_VIDEO_MPEG2Profile422, OMX_VIDEO_MPEG2LevelHL } },
+ { 0x8E, { XXX_MPEG2ProfileMultiView, MPEG2LevelLL } },
+ { 0x8D, { XXX_MPEG2ProfileMultiView, MPEG2LevelML } },
+ { 0x8B, { XXX_MPEG2ProfileMultiView, MPEG2LevelH14 } },
+ { 0x8A, { XXX_MPEG2ProfileMultiView, MPEG2LevelHL } }, */
+ { 0x85, { MPEG2Profile422, MPEG2LevelML } },
+ { 0x82, { MPEG2Profile422, MPEG2LevelHL } },
};
- OMX_VIDEO_MPEG2PROFILETYPE profile;
- OMX_VIDEO_MPEG2LEVELTYPE level;
- std::pair<OMX_VIDEO_MPEG2PROFILETYPE, OMX_VIDEO_MPEG2LEVELTYPE> profileLevel;
+ int32_t profile;
+ int32_t level;
+ std::pair<int32_t, int32_t> profileLevel;
if (escapes.map(indication, &profileLevel)) {
format->setInt32("profile", profileLevel.first);
format->setInt32("level", profileLevel.second);
@@ -468,16 +486,16 @@
// esds seems to only contain the profile for MPEG-2
uint8_t objType;
if (esds.getObjectTypeIndication(&objType) == OK) {
- const static ALookup<uint8_t, OMX_VIDEO_MPEG2PROFILETYPE> profiles{
- { 0x60, OMX_VIDEO_MPEG2ProfileSimple },
- { 0x61, OMX_VIDEO_MPEG2ProfileMain },
- { 0x62, OMX_VIDEO_MPEG2ProfileSNR },
- { 0x63, OMX_VIDEO_MPEG2ProfileSpatial },
- { 0x64, OMX_VIDEO_MPEG2ProfileHigh },
- { 0x65, OMX_VIDEO_MPEG2Profile422 },
+ const static ALookup<uint8_t, int32_t> profiles{
+ { 0x60, MPEG2ProfileSimple },
+ { 0x61, MPEG2ProfileMain },
+ { 0x62, MPEG2ProfileSNR },
+ { 0x63, MPEG2ProfileSpatial },
+ { 0x64, MPEG2ProfileHigh },
+ { 0x65, MPEG2Profile422 },
};
- OMX_VIDEO_MPEG2PROFILETYPE profile;
+ int32_t profile;
if (profiles.map(objType, &profile)) {
format->setInt32("profile", profile);
}
@@ -492,82 +510,82 @@
const uint8_t indication = seq[4];
const static ALookup<uint8_t,
- std::pair<OMX_VIDEO_MPEG4PROFILETYPE, OMX_VIDEO_MPEG4LEVELTYPE>> table {
- { 0b00000001, { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level1 } },
- { 0b00000010, { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level2 } },
- { 0b00000011, { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level3 } },
- { 0b00000100, { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level4a } },
- { 0b00000101, { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level5 } },
- { 0b00000110, { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level6 } },
- { 0b00001000, { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level0 } },
- { 0b00001001, { OMX_VIDEO_MPEG4ProfileSimple, OMX_VIDEO_MPEG4Level0b } },
- { 0b00010000, { OMX_VIDEO_MPEG4ProfileSimpleScalable, OMX_VIDEO_MPEG4Level0 } },
- { 0b00010001, { OMX_VIDEO_MPEG4ProfileSimpleScalable, OMX_VIDEO_MPEG4Level1 } },
- { 0b00010010, { OMX_VIDEO_MPEG4ProfileSimpleScalable, OMX_VIDEO_MPEG4Level2 } },
+ std::pair<int32_t, int32_t>> table {
+ { 0b00000001, { MPEG4ProfileSimple, MPEG4Level1 } },
+ { 0b00000010, { MPEG4ProfileSimple, MPEG4Level2 } },
+ { 0b00000011, { MPEG4ProfileSimple, MPEG4Level3 } },
+ { 0b00000100, { MPEG4ProfileSimple, MPEG4Level4a } },
+ { 0b00000101, { MPEG4ProfileSimple, MPEG4Level5 } },
+ { 0b00000110, { MPEG4ProfileSimple, MPEG4Level6 } },
+ { 0b00001000, { MPEG4ProfileSimple, MPEG4Level0 } },
+ { 0b00001001, { MPEG4ProfileSimple, MPEG4Level0b } },
+ { 0b00010000, { MPEG4ProfileSimpleScalable, MPEG4Level0 } },
+ { 0b00010001, { MPEG4ProfileSimpleScalable, MPEG4Level1 } },
+ { 0b00010010, { MPEG4ProfileSimpleScalable, MPEG4Level2 } },
/* unsupported
- { 0b00011101, { XXX_MPEG4ProfileSimpleScalableER, OMX_VIDEO_MPEG4Level0 } },
- { 0b00011110, { XXX_MPEG4ProfileSimpleScalableER, OMX_VIDEO_MPEG4Level1 } },
- { 0b00011111, { XXX_MPEG4ProfileSimpleScalableER, OMX_VIDEO_MPEG4Level2 } }, */
- { 0b00100001, { OMX_VIDEO_MPEG4ProfileCore, OMX_VIDEO_MPEG4Level1 } },
- { 0b00100010, { OMX_VIDEO_MPEG4ProfileCore, OMX_VIDEO_MPEG4Level2 } },
- { 0b00110010, { OMX_VIDEO_MPEG4ProfileMain, OMX_VIDEO_MPEG4Level2 } },
- { 0b00110011, { OMX_VIDEO_MPEG4ProfileMain, OMX_VIDEO_MPEG4Level3 } },
- { 0b00110100, { OMX_VIDEO_MPEG4ProfileMain, OMX_VIDEO_MPEG4Level4 } },
+ { 0b00011101, { XXX_MPEG4ProfileSimpleScalableER, MPEG4Level0 } },
+ { 0b00011110, { XXX_MPEG4ProfileSimpleScalableER, MPEG4Level1 } },
+ { 0b00011111, { XXX_MPEG4ProfileSimpleScalableER, MPEG4Level2 } }, */
+ { 0b00100001, { MPEG4ProfileCore, MPEG4Level1 } },
+ { 0b00100010, { MPEG4ProfileCore, MPEG4Level2 } },
+ { 0b00110010, { MPEG4ProfileMain, MPEG4Level2 } },
+ { 0b00110011, { MPEG4ProfileMain, MPEG4Level3 } },
+ { 0b00110100, { MPEG4ProfileMain, MPEG4Level4 } },
/* deprecated
- { 0b01000010, { OMX_VIDEO_MPEG4ProfileNbit, OMX_VIDEO_MPEG4Level2 } }, */
- { 0b01010001, { OMX_VIDEO_MPEG4ProfileScalableTexture, OMX_VIDEO_MPEG4Level1 } },
- { 0b01100001, { OMX_VIDEO_MPEG4ProfileSimpleFace, OMX_VIDEO_MPEG4Level1 } },
- { 0b01100010, { OMX_VIDEO_MPEG4ProfileSimpleFace, OMX_VIDEO_MPEG4Level2 } },
- { 0b01100011, { OMX_VIDEO_MPEG4ProfileSimpleFBA, OMX_VIDEO_MPEG4Level1 } },
- { 0b01100100, { OMX_VIDEO_MPEG4ProfileSimpleFBA, OMX_VIDEO_MPEG4Level2 } },
- { 0b01110001, { OMX_VIDEO_MPEG4ProfileBasicAnimated, OMX_VIDEO_MPEG4Level1 } },
- { 0b01110010, { OMX_VIDEO_MPEG4ProfileBasicAnimated, OMX_VIDEO_MPEG4Level2 } },
- { 0b10000001, { OMX_VIDEO_MPEG4ProfileHybrid, OMX_VIDEO_MPEG4Level1 } },
- { 0b10000010, { OMX_VIDEO_MPEG4ProfileHybrid, OMX_VIDEO_MPEG4Level2 } },
- { 0b10010001, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level1 } },
- { 0b10010010, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level2 } },
- { 0b10010011, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level3 } },
- { 0b10010100, { OMX_VIDEO_MPEG4ProfileAdvancedRealTime, OMX_VIDEO_MPEG4Level4 } },
- { 0b10100001, { OMX_VIDEO_MPEG4ProfileCoreScalable, OMX_VIDEO_MPEG4Level1 } },
- { 0b10100010, { OMX_VIDEO_MPEG4ProfileCoreScalable, OMX_VIDEO_MPEG4Level2 } },
- { 0b10100011, { OMX_VIDEO_MPEG4ProfileCoreScalable, OMX_VIDEO_MPEG4Level3 } },
- { 0b10110001, { OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level1 } },
- { 0b10110010, { OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level2 } },
- { 0b10110011, { OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level3 } },
- { 0b10110100, { OMX_VIDEO_MPEG4ProfileAdvancedCoding, OMX_VIDEO_MPEG4Level4 } },
- { 0b11000001, { OMX_VIDEO_MPEG4ProfileAdvancedCore, OMX_VIDEO_MPEG4Level1 } },
- { 0b11000010, { OMX_VIDEO_MPEG4ProfileAdvancedCore, OMX_VIDEO_MPEG4Level2 } },
- { 0b11010001, { OMX_VIDEO_MPEG4ProfileAdvancedScalable, OMX_VIDEO_MPEG4Level1 } },
- { 0b11010010, { OMX_VIDEO_MPEG4ProfileAdvancedScalable, OMX_VIDEO_MPEG4Level2 } },
- { 0b11010011, { OMX_VIDEO_MPEG4ProfileAdvancedScalable, OMX_VIDEO_MPEG4Level3 } },
+ { 0b01000010, { MPEG4ProfileNbit, MPEG4Level2 } }, */
+ { 0b01010001, { MPEG4ProfileScalableTexture, MPEG4Level1 } },
+ { 0b01100001, { MPEG4ProfileSimpleFace, MPEG4Level1 } },
+ { 0b01100010, { MPEG4ProfileSimpleFace, MPEG4Level2 } },
+ { 0b01100011, { MPEG4ProfileSimpleFBA, MPEG4Level1 } },
+ { 0b01100100, { MPEG4ProfileSimpleFBA, MPEG4Level2 } },
+ { 0b01110001, { MPEG4ProfileBasicAnimated, MPEG4Level1 } },
+ { 0b01110010, { MPEG4ProfileBasicAnimated, MPEG4Level2 } },
+ { 0b10000001, { MPEG4ProfileHybrid, MPEG4Level1 } },
+ { 0b10000010, { MPEG4ProfileHybrid, MPEG4Level2 } },
+ { 0b10010001, { MPEG4ProfileAdvancedRealTime, MPEG4Level1 } },
+ { 0b10010010, { MPEG4ProfileAdvancedRealTime, MPEG4Level2 } },
+ { 0b10010011, { MPEG4ProfileAdvancedRealTime, MPEG4Level3 } },
+ { 0b10010100, { MPEG4ProfileAdvancedRealTime, MPEG4Level4 } },
+ { 0b10100001, { MPEG4ProfileCoreScalable, MPEG4Level1 } },
+ { 0b10100010, { MPEG4ProfileCoreScalable, MPEG4Level2 } },
+ { 0b10100011, { MPEG4ProfileCoreScalable, MPEG4Level3 } },
+ { 0b10110001, { MPEG4ProfileAdvancedCoding, MPEG4Level1 } },
+ { 0b10110010, { MPEG4ProfileAdvancedCoding, MPEG4Level2 } },
+ { 0b10110011, { MPEG4ProfileAdvancedCoding, MPEG4Level3 } },
+ { 0b10110100, { MPEG4ProfileAdvancedCoding, MPEG4Level4 } },
+ { 0b11000001, { MPEG4ProfileAdvancedCore, MPEG4Level1 } },
+ { 0b11000010, { MPEG4ProfileAdvancedCore, MPEG4Level2 } },
+ { 0b11010001, { MPEG4ProfileAdvancedScalable, MPEG4Level1 } },
+ { 0b11010010, { MPEG4ProfileAdvancedScalable, MPEG4Level2 } },
+ { 0b11010011, { MPEG4ProfileAdvancedScalable, MPEG4Level3 } },
/* unsupported
- { 0b11100001, { XXX_MPEG4ProfileSimpleStudio, OMX_VIDEO_MPEG4Level1 } },
- { 0b11100010, { XXX_MPEG4ProfileSimpleStudio, OMX_VIDEO_MPEG4Level2 } },
- { 0b11100011, { XXX_MPEG4ProfileSimpleStudio, OMX_VIDEO_MPEG4Level3 } },
- { 0b11100100, { XXX_MPEG4ProfileSimpleStudio, OMX_VIDEO_MPEG4Level4 } },
- { 0b11100101, { XXX_MPEG4ProfileCoreStudio, OMX_VIDEO_MPEG4Level1 } },
- { 0b11100110, { XXX_MPEG4ProfileCoreStudio, OMX_VIDEO_MPEG4Level2 } },
- { 0b11100111, { XXX_MPEG4ProfileCoreStudio, OMX_VIDEO_MPEG4Level3 } },
- { 0b11101000, { XXX_MPEG4ProfileCoreStudio, OMX_VIDEO_MPEG4Level4 } },
- { 0b11101011, { XXX_MPEG4ProfileSimpleStudio, OMX_VIDEO_MPEG4Level5 } },
- { 0b11101100, { XXX_MPEG4ProfileSimpleStudio, OMX_VIDEO_MPEG4Level6 } }, */
- { 0b11110000, { OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level0 } },
- { 0b11110001, { OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level1 } },
- { 0b11110010, { OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level2 } },
- { 0b11110011, { OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level3 } },
- { 0b11110100, { OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level4 } },
- { 0b11110101, { OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level5 } },
- { 0b11110111, { OMX_VIDEO_MPEG4ProfileAdvancedSimple, OMX_VIDEO_MPEG4Level3b } },
+ { 0b11100001, { XXX_MPEG4ProfileSimpleStudio, MPEG4Level1 } },
+ { 0b11100010, { XXX_MPEG4ProfileSimpleStudio, MPEG4Level2 } },
+ { 0b11100011, { XXX_MPEG4ProfileSimpleStudio, MPEG4Level3 } },
+ { 0b11100100, { XXX_MPEG4ProfileSimpleStudio, MPEG4Level4 } },
+ { 0b11100101, { XXX_MPEG4ProfileCoreStudio, MPEG4Level1 } },
+ { 0b11100110, { XXX_MPEG4ProfileCoreStudio, MPEG4Level2 } },
+ { 0b11100111, { XXX_MPEG4ProfileCoreStudio, MPEG4Level3 } },
+ { 0b11101000, { XXX_MPEG4ProfileCoreStudio, MPEG4Level4 } },
+ { 0b11101011, { XXX_MPEG4ProfileSimpleStudio, MPEG4Level5 } },
+ { 0b11101100, { XXX_MPEG4ProfileSimpleStudio, MPEG4Level6 } }, */
+ { 0b11110000, { MPEG4ProfileAdvancedSimple, MPEG4Level0 } },
+ { 0b11110001, { MPEG4ProfileAdvancedSimple, MPEG4Level1 } },
+ { 0b11110010, { MPEG4ProfileAdvancedSimple, MPEG4Level2 } },
+ { 0b11110011, { MPEG4ProfileAdvancedSimple, MPEG4Level3 } },
+ { 0b11110100, { MPEG4ProfileAdvancedSimple, MPEG4Level4 } },
+ { 0b11110101, { MPEG4ProfileAdvancedSimple, MPEG4Level5 } },
+ { 0b11110111, { MPEG4ProfileAdvancedSimple, MPEG4Level3b } },
/* deprecated
- { 0b11111000, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level0 } },
- { 0b11111001, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level1 } },
- { 0b11111010, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level2 } },
- { 0b11111011, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level3 } },
- { 0b11111100, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level4 } },
- { 0b11111101, { XXX_MPEG4ProfileFineGranularityScalable, OMX_VIDEO_MPEG4Level5 } }, */
+ { 0b11111000, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level0 } },
+ { 0b11111001, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level1 } },
+ { 0b11111010, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level2 } },
+ { 0b11111011, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level3 } },
+ { 0b11111100, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level4 } },
+ { 0b11111101, { XXX_MPEG4ProfileFineGranularityScalable, MPEG4Level5 } }, */
};
- std::pair<OMX_VIDEO_MPEG4PROFILETYPE, OMX_VIDEO_MPEG4LEVELTYPE> profileLevel;
+ std::pair<int32_t, int32_t> profileLevel;
if (table.map(indication, &profileLevel)) {
format->setInt32("profile", profileLevel.first);
format->setInt32("level", profileLevel.second);
@@ -590,19 +608,19 @@
switch (id) {
case 1 /* profileId */:
if (length >= 1) {
- const static ALookup<uint8_t, OMX_VIDEO_VP9PROFILETYPE> profiles {
- { 0, OMX_VIDEO_VP9Profile0 },
- { 1, OMX_VIDEO_VP9Profile1 },
- { 2, OMX_VIDEO_VP9Profile2 },
- { 3, OMX_VIDEO_VP9Profile3 },
+ const static ALookup<uint8_t, int32_t> profiles {
+ { 0, VP9Profile0 },
+ { 1, VP9Profile1 },
+ { 2, VP9Profile2 },
+ { 3, VP9Profile3 },
};
- const static ALookup<OMX_VIDEO_VP9PROFILETYPE, OMX_VIDEO_VP9PROFILETYPE> toHdr {
- { OMX_VIDEO_VP9Profile2, OMX_VIDEO_VP9Profile2HDR },
- { OMX_VIDEO_VP9Profile3, OMX_VIDEO_VP9Profile3HDR },
+ const static ALookup<int32_t, int32_t> toHdr {
+ { VP9Profile2, VP9Profile2HDR },
+ { VP9Profile3, VP9Profile3HDR },
};
- OMX_VIDEO_VP9PROFILETYPE profile;
+ int32_t profile;
if (profiles.map(data[0], &profile)) {
// convert to HDR profile
if (isHdr(format)) {
@@ -615,24 +633,24 @@
break;
case 2 /* levelId */:
if (length >= 1) {
- const static ALookup<uint8_t, OMX_VIDEO_VP9LEVELTYPE> levels {
- { 10, OMX_VIDEO_VP9Level1 },
- { 11, OMX_VIDEO_VP9Level11 },
- { 20, OMX_VIDEO_VP9Level2 },
- { 21, OMX_VIDEO_VP9Level21 },
- { 30, OMX_VIDEO_VP9Level3 },
- { 31, OMX_VIDEO_VP9Level31 },
- { 40, OMX_VIDEO_VP9Level4 },
- { 41, OMX_VIDEO_VP9Level41 },
- { 50, OMX_VIDEO_VP9Level5 },
- { 51, OMX_VIDEO_VP9Level51 },
- { 52, OMX_VIDEO_VP9Level52 },
- { 60, OMX_VIDEO_VP9Level6 },
- { 61, OMX_VIDEO_VP9Level61 },
- { 62, OMX_VIDEO_VP9Level62 },
+ const static ALookup<uint8_t, int32_t> levels {
+ { 10, VP9Level1 },
+ { 11, VP9Level11 },
+ { 20, VP9Level2 },
+ { 21, VP9Level21 },
+ { 30, VP9Level3 },
+ { 31, VP9Level31 },
+ { 40, VP9Level4 },
+ { 41, VP9Level41 },
+ { 50, VP9Level5 },
+ { 51, VP9Level51 },
+ { 52, VP9Level52 },
+ { 60, VP9Level6 },
+ { 61, VP9Level61 },
+ { 62, VP9Level62 },
};
- OMX_VIDEO_VP9LEVELTYPE level;
+ int32_t level;
if (levels.map(data[0], &level)) {
format->setInt32("level", level);
}
@@ -1504,10 +1522,21 @@
msg->setBuffer("csd-0", buffer);
}
- if (meta->findData(kKeyDVCC, &type, &data, &size)) {
+ if (meta->findData(kKeyDVCC, &type, &data, &size)
+ || meta->findData(kKeyDVVC, &type, &data, &size)
+ || meta->findData(kKeyDVWC, &type, &data, &size)) {
const uint8_t *ptr = (const uint8_t *)data;
ALOGV("DV: calling parseDolbyVisionProfileLevelFromDvcc with data size %zu", size);
parseDolbyVisionProfileLevelFromDvcc(ptr, size, msg);
+ sp<ABuffer> buffer = new (std::nothrow) ABuffer(size);
+ if (buffer.get() == nullptr || buffer->base() == nullptr) {
+ return NO_MEMORY;
+ }
+ memcpy(buffer->data(), data, size);
+
+ buffer->meta()->setInt32("csd", true);
+ buffer->meta()->setInt64("timeUs", 0);
+ msg->setBuffer("csd-2", buffer);
}
*format = msg;
@@ -2009,30 +2038,147 @@
mime == MEDIA_MIMETYPE_IMAGE_AVIF) {
meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size());
} else if (mime == MEDIA_MIMETYPE_VIDEO_DOLBY_VISION) {
- if (msg->findBuffer("csd-2", &csd2)) {
- //dvcc should be 24
- if (csd2->size() == 24) {
- meta->setData(kKeyDVCC, kTypeDVCC, csd2->data(), csd2->size());
- uint8_t *dvcc = csd2->data();
- const uint8_t profile = dvcc[2] >> 1;
- if (profile > 1 && profile < 9) {
- std::vector<uint8_t> hvcc(csd0size + 1024);
- size_t outsize = reassembleHVCC(csd0, hvcc.data(), hvcc.size(), 4);
- meta->setData(kKeyHVCC, kTypeHVCC, hvcc.data(), outsize);
- } else if (DolbyVisionProfileDvav110 == profile) {
- meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size());
- } else {
- sp<ABuffer> csd1;
- if (msg->findBuffer("csd-1", &csd1)) {
- std::vector<char> avcc(csd0size + csd1->size() + 1024);
- size_t outsize = reassembleAVCC(csd0, csd1, avcc.data());
- meta->setData(kKeyAVCC, kTypeAVCC, avcc.data(), outsize);
- }
- }
+ int32_t profile = -1;
+ uint8_t blCompatibilityId = -1;
+ int32_t level = 0;
+ uint8_t profileVal = -1;
+ uint8_t profileVal1 = -1;
+ uint8_t profileVal2 = -1;
+ constexpr size_t dvccSize = 24;
+
+ const ALookup<uint8_t, int32_t> &profiles =
+ getDolbyVisionProfileTable();
+ const ALookup<uint8_t, int32_t> &levels =
+ getDolbyVisionLevelsTable();
+
+ if (!msg->findBuffer("csd-2", &csd2)) {
+ // MP4 extractors are expected to generate csd buffer
+ // some encoders might not be generating it, in which
+ // case we populate the track metadata dv (cc|vc|wc)
+ // from the 'profile' and 'level' info.
+ // This is done according to Dolby Vision ISOBMFF spec
+
+ if (!msg->findInt32("profile", &profile)) {
+ ALOGE("Dolby Vision profile not found");
+ return BAD_VALUE;
}
+ msg->findInt32("level", &level);
+
+ if (profile == DolbyVisionProfileDvheSt) {
+ if (!profiles.rlookup(DolbyVisionProfileDvheSt, &profileVal)) { // dvhe.08
+ ALOGE("Dolby Vision profile lookup error");
+ return BAD_VALUE;
+ }
+ blCompatibilityId = 4;
+ } else if (profile == DolbyVisionProfileDvavSe) {
+ if (!profiles.rlookup(DolbyVisionProfileDvavSe, &profileVal)) { // dvav.09
+ ALOGE("Dolby Vision profile lookup error");
+ return BAD_VALUE;
+ }
+ blCompatibilityId = 2;
+ } else {
+ ALOGE("Dolby Vision profile look up error");
+ return BAD_VALUE;
+ }
+
+ profile = (int32_t) profileVal;
+
+ uint8_t level_val = 0;
+ if (!levels.map(level, &level_val)) {
+ ALOGE("Dolby Vision level lookup error");
+ return BAD_VALUE;
+ }
+
+ std::vector<uint8_t> dvcc(dvccSize);
+
+ dvcc[0] = 1; // major version
+ dvcc[1] = 0; // minor version
+ dvcc[2] = (uint8_t)((profile & 0x7f) << 1); // dolby vision profile
+ dvcc[2] = (uint8_t)((dvcc[2] | (uint8_t)((level_val >> 5) & 0x1)) & 0xff);
+ dvcc[3] = (uint8_t)((level_val & 0x1f) << 3); // dolby vision level
+ dvcc[3] = (uint8_t)(dvcc[3] | (1 << 2)); // rpu_present_flag
+ dvcc[3] = (uint8_t)(dvcc[3] | (1)); // bl_present_flag
+ dvcc[4] = (uint8_t)(blCompatibilityId << 4); // bl_compatibility id
+
+ profiles.rlookup(DolbyVisionProfileDvav110, &profileVal);
+ profiles.rlookup(DolbyVisionProfileDvheDtb, &profileVal1);
+ if (profile > (int32_t) profileVal) {
+ meta->setData(kKeyDVWC, kTypeDVWC, dvcc.data(), dvccSize);
+ } else if (profile > (int32_t) profileVal1) {
+ meta->setData(kKeyDVVC, kTypeDVVC, dvcc.data(), dvccSize);
+ } else {
+ meta->setData(kKeyDVCC, kTypeDVCC, dvcc.data(), dvccSize);
+ }
+
} else {
- ALOGE("We need csd-2!!. %s", msg->debugString().c_str());
- return BAD_VALUE;
+ // we have csd-2, just use that to populate dvcc
+ if (csd2->size() == dvccSize) {
+ uint8_t *dvcc = csd2->data();
+ profile = dvcc[2] >> 1;
+
+ profiles.rlookup(DolbyVisionProfileDvav110, &profileVal);
+ profiles.rlookup(DolbyVisionProfileDvheDtb, &profileVal1);
+ if (profile > (int32_t) profileVal) {
+ meta->setData(kKeyDVWC, kTypeDVWC, csd2->data(), csd2->size());
+ } else if (profile > (int32_t) profileVal1) {
+ meta->setData(kKeyDVVC, kTypeDVVC, csd2->data(), csd2->size());
+ } else {
+ meta->setData(kKeyDVCC, kTypeDVCC, csd2->data(), csd2->size());
+ }
+
+ } else {
+ ALOGE("Convert MessageToMetadata csd-2 is present but not valid");
+ return BAD_VALUE;
+ }
+ }
+ profiles.rlookup(DolbyVisionProfileDvavPen, &profileVal);
+ profiles.rlookup(DolbyVisionProfileDvavSe, &profileVal1);
+ profiles.rlookup(DolbyVisionProfileDvav110, &profileVal2);
+ if ((profile > (int32_t) profileVal) && (profile < (int32_t) profileVal1)) {
+ std::vector<uint8_t> hvcc(csd0size + 1024);
+ size_t outsize = reassembleHVCC(csd0, hvcc.data(), hvcc.size(), 4);
+ meta->setData(kKeyHVCC, kTypeHVCC, hvcc.data(), outsize);
+ } else if (profile == (int32_t) profileVal2) {
+ meta->setData(kKeyAV1C, 0, csd0->data(), csd0->size());
+ } else {
+ sp<ABuffer> csd1;
+ if (msg->findBuffer("csd-1", &csd1)) {
+ std::vector<char> avcc(csd0size + csd1->size() + 1024);
+ size_t outsize = reassembleAVCC(csd0, csd1, avcc.data());
+ meta->setData(kKeyAVCC, kTypeAVCC, avcc.data(), outsize);
+ }
+ else {
+ // for dolby vision avc, csd0 also holds csd1
+ size_t i = 0;
+ int csd0realsize = 0;
+ do {
+ i = findNextNalStartCode(csd0->data() + i,
+ csd0->size() - i) - csd0->data();
+ if (i > 0) {
+ csd0realsize = i;
+ break;
+ }
+ i += 4;
+ } while(i < csd0->size());
+ // buffer0 -> csd0
+ sp<ABuffer> buffer0 = new (std::nothrow) ABuffer(csd0realsize);
+ if (buffer0.get() == NULL || buffer0->base() == NULL) {
+ return NO_MEMORY;
+ }
+ memcpy(buffer0->data(), csd0->data(), csd0realsize);
+ // buffer1 -> csd1
+ sp<ABuffer> buffer1 = new (std::nothrow)
+ ABuffer(csd0->size() - csd0realsize);
+ if (buffer1.get() == NULL || buffer1->base() == NULL) {
+ return NO_MEMORY;
+ }
+ memcpy(buffer1->data(), csd0->data()+csd0realsize,
+ csd0->size() - csd0realsize);
+
+ std::vector<char> avcc(csd0->size() + 1024);
+ size_t outsize = reassembleAVCC(buffer0, buffer1, avcc.data());
+ meta->setData(kKeyAVCC, kTypeAVCC, avcc.data(), outsize);
+ }
}
} else if (mime == MEDIA_MIMETYPE_VIDEO_VP9) {
meta->setData(kKeyVp9CodecPrivate, 0, csd0->data(), csd0->size());
@@ -2173,29 +2319,29 @@
}
struct aac_format_conv_t {
- OMX_AUDIO_AACPROFILETYPE eAacProfileType;
+ int32_t eAacProfileType;
audio_format_t format;
};
static const struct aac_format_conv_t profileLookup[] = {
- { OMX_AUDIO_AACObjectMain, AUDIO_FORMAT_AAC_MAIN},
- { OMX_AUDIO_AACObjectLC, AUDIO_FORMAT_AAC_LC},
- { OMX_AUDIO_AACObjectSSR, AUDIO_FORMAT_AAC_SSR},
- { OMX_AUDIO_AACObjectLTP, AUDIO_FORMAT_AAC_LTP},
- { OMX_AUDIO_AACObjectHE, AUDIO_FORMAT_AAC_HE_V1},
- { OMX_AUDIO_AACObjectScalable, AUDIO_FORMAT_AAC_SCALABLE},
- { OMX_AUDIO_AACObjectERLC, AUDIO_FORMAT_AAC_ERLC},
- { OMX_AUDIO_AACObjectLD, AUDIO_FORMAT_AAC_LD},
- { OMX_AUDIO_AACObjectHE_PS, AUDIO_FORMAT_AAC_HE_V2},
- { OMX_AUDIO_AACObjectELD, AUDIO_FORMAT_AAC_ELD},
- { OMX_AUDIO_AACObjectXHE, AUDIO_FORMAT_AAC_XHE},
- { OMX_AUDIO_AACObjectNull, AUDIO_FORMAT_AAC},
+ { AACObjectMain, AUDIO_FORMAT_AAC_MAIN},
+ { AACObjectLC, AUDIO_FORMAT_AAC_LC},
+ { AACObjectSSR, AUDIO_FORMAT_AAC_SSR},
+ { AACObjectLTP, AUDIO_FORMAT_AAC_LTP},
+ { AACObjectHE, AUDIO_FORMAT_AAC_HE_V1},
+ { AACObjectScalable, AUDIO_FORMAT_AAC_SCALABLE},
+ { AACObjectERLC, AUDIO_FORMAT_AAC_ERLC},
+ { AACObjectLD, AUDIO_FORMAT_AAC_LD},
+ { AACObjectHE_PS, AUDIO_FORMAT_AAC_HE_V2},
+ { AACObjectELD, AUDIO_FORMAT_AAC_ELD},
+ { AACObjectXHE, AUDIO_FORMAT_AAC_XHE},
+ { AACObjectNull, AUDIO_FORMAT_AAC},
};
void mapAACProfileToAudioFormat( audio_format_t& format, uint64_t eAacProfile)
{
-const struct aac_format_conv_t* p = &profileLookup[0];
- while (p->eAacProfileType != OMX_AUDIO_AACObjectNull) {
+ const struct aac_format_conv_t* p = &profileLookup[0];
+ while (p->eAacProfileType != AACObjectNull) {
if (eAacProfile == p->eAacProfileType) {
format = p->format;
return;
@@ -2235,7 +2381,7 @@
// Offloading depends on audio DSP capabilities.
int32_t aacaot = -1;
if (meta->findInt32(kKeyAACAOT, &aacaot)) {
- mapAACProfileToAudioFormat(info->format,(OMX_AUDIO_AACPROFILETYPE) aacaot);
+ mapAACProfileToAudioFormat(info->format, aacaot);
}
int32_t srate = -1;
diff --git a/media/libstagefright/colorconversion/ColorConverter.cpp b/media/libstagefright/colorconversion/ColorConverter.cpp
index c7dc415..6004cf8 100644
--- a/media/libstagefright/colorconversion/ColorConverter.cpp
+++ b/media/libstagefright/colorconversion/ColorConverter.cpp
@@ -23,6 +23,7 @@
#include <media/stagefright/foundation/ALooper.h>
#include <media/stagefright/foundation/ColorUtils.h>
#include <media/stagefright/ColorConverter.h>
+#include <media/stagefright/MediaCodecConstants.h>
#include <media/stagefright/MediaErrors.h>
#include "libyuv/convert_from.h"
@@ -51,13 +52,17 @@
static bool isRGB(OMX_COLOR_FORMATTYPE colorFormat) {
return colorFormat == OMX_COLOR_Format16bitRGB565
|| colorFormat == OMX_COLOR_Format32BitRGBA8888
- || colorFormat == OMX_COLOR_Format32bitBGRA8888;
+ || colorFormat == OMX_COLOR_Format32bitBGRA8888
+ || colorFormat == COLOR_Format32bitABGR2101010;
}
bool ColorConverter::ColorSpace::isBt709() {
return (mStandard == ColorUtils::kColorStandardBT709);
}
+bool ColorConverter::ColorSpace::isBt2020() {
+ return (mStandard == ColorUtils::kColorStandardBT2020);
+}
bool ColorConverter::ColorSpace::isJpeg() {
return ((mStandard == ColorUtils::kColorStandardBT601_625)
@@ -70,16 +75,19 @@
: mSrcFormat(from),
mDstFormat(to),
mSrcColorSpace({0, 0, 0}),
- mClip(NULL) {
+ mClip(NULL),
+ mClip10Bit(NULL) {
}
ColorConverter::~ColorConverter() {
delete[] mClip;
mClip = NULL;
+ delete[] mClip10Bit;
+ mClip10Bit = NULL;
}
bool ColorConverter::isValid() const {
- switch (mSrcFormat) {
+ switch ((int32_t)mSrcFormat) {
case OMX_COLOR_FormatYUV420Planar16:
if (mDstFormat == OMX_COLOR_FormatYUV444Y410) {
return true;
@@ -102,6 +110,8 @@
#else
return mDstFormat == OMX_COLOR_Format16bitRGB565;
#endif
+ case COLOR_FormatYUVP010:
+ return mDstFormat == COLOR_Format32bitABGR2101010;
default:
return false;
@@ -143,9 +153,10 @@
mCropTop(cropTop),
mCropRight(cropRight),
mCropBottom(cropBottom) {
- switch(mColorFormat) {
+ switch((int32_t)mColorFormat) {
case OMX_COLOR_Format16bitRGB565:
case OMX_COLOR_FormatYUV420Planar16:
+ case COLOR_FormatYUVP010:
case OMX_COLOR_FormatCbYCrY:
mBpp = 2;
mStride = 2 * mWidth;
@@ -153,6 +164,7 @@
case OMX_COLOR_Format32bitBGRA8888:
case OMX_COLOR_Format32BitRGBA8888:
+ case COLOR_Format32bitABGR2101010:
case OMX_COLOR_FormatYUV444Y410:
mBpp = 4;
mStride = 4 * mWidth;
@@ -213,7 +225,7 @@
status_t err;
- switch (mSrcFormat) {
+ switch ((int32_t)mSrcFormat) {
case OMX_COLOR_FormatYUV420Planar:
#ifdef USE_LIBYUV
err = convertYUV420PlanarUseLibYUV(src, dst);
@@ -235,6 +247,19 @@
break;
}
+ case COLOR_FormatYUVP010:
+ {
+#if PERF_PROFILING
+ int64_t startTimeUs = ALooper::GetNowUs();
+#endif
+ err = convertYUVP010(src, dst);
+#if PERF_PROFILING
+ int64_t endTimeUs = ALooper::GetNowUs();
+ ALOGD("convertYUVP010 took %lld us", (long long) (endTimeUs - startTimeUs));
+#endif
+ break;
+ }
+
case OMX_COLOR_FormatCbYCrY:
err = convertCbYCrY(src, dst);
break;
@@ -439,23 +464,23 @@
}
std::function<void (void *, bool, signed, signed, signed, signed, signed, signed)>
-getWriteToDst(OMX_COLOR_FORMATTYPE dstFormat, uint8_t *kAdjustedClip) {
- switch (dstFormat) {
+getWriteToDst(OMX_COLOR_FORMATTYPE dstFormat, void *kAdjustedClip) {
+ switch ((int)dstFormat) {
case OMX_COLOR_Format16bitRGB565:
{
return [kAdjustedClip](void *dst_ptr, bool uncropped,
signed r1, signed g1, signed b1,
signed r2, signed g2, signed b2) {
uint32_t rgb1 =
- ((kAdjustedClip[r1] >> 3) << 11)
- | ((kAdjustedClip[g1] >> 2) << 5)
- | (kAdjustedClip[b1] >> 3);
+ ((((uint8_t *)kAdjustedClip)[r1] >> 3) << 11)
+ | ((((uint8_t *)kAdjustedClip)[g1] >> 2) << 5)
+ | (((uint8_t *)kAdjustedClip)[b1] >> 3);
if (uncropped) {
uint32_t rgb2 =
- ((kAdjustedClip[r2] >> 3) << 11)
- | ((kAdjustedClip[g2] >> 2) << 5)
- | (kAdjustedClip[b2] >> 3);
+ ((((uint8_t *)kAdjustedClip)[r2] >> 3) << 11)
+ | ((((uint8_t *)kAdjustedClip)[g2] >> 2) << 5)
+ | (((uint8_t *)kAdjustedClip)[b2] >> 3);
*(uint32_t *)dst_ptr = (rgb2 << 16) | rgb1;
} else {
@@ -469,16 +494,16 @@
signed r1, signed g1, signed b1,
signed r2, signed g2, signed b2) {
((uint32_t *)dst_ptr)[0] =
- (kAdjustedClip[r1])
- | (kAdjustedClip[g1] << 8)
- | (kAdjustedClip[b1] << 16)
+ (((uint8_t *)kAdjustedClip)[r1])
+ | (((uint8_t *)kAdjustedClip)[g1] << 8)
+ | (((uint8_t *)kAdjustedClip)[b1] << 16)
| (0xFF << 24);
if (uncropped) {
((uint32_t *)dst_ptr)[1] =
- (kAdjustedClip[r2])
- | (kAdjustedClip[g2] << 8)
- | (kAdjustedClip[b2] << 16)
+ (((uint8_t *)kAdjustedClip)[r2])
+ | (((uint8_t *)kAdjustedClip)[g2] << 8)
+ | (((uint8_t *)kAdjustedClip)[b2] << 16)
| (0xFF << 24);
}
};
@@ -489,20 +514,41 @@
signed r1, signed g1, signed b1,
signed r2, signed g2, signed b2) {
((uint32_t *)dst_ptr)[0] =
- (kAdjustedClip[b1])
- | (kAdjustedClip[g1] << 8)
- | (kAdjustedClip[r1] << 16)
+ (((uint8_t *)kAdjustedClip)[b1])
+ | (((uint8_t *)kAdjustedClip)[g1] << 8)
+ | (((uint8_t *)kAdjustedClip)[r1] << 16)
| (0xFF << 24);
if (uncropped) {
((uint32_t *)dst_ptr)[1] =
- (kAdjustedClip[b2])
- | (kAdjustedClip[g2] << 8)
- | (kAdjustedClip[r2] << 16)
+ (((uint8_t *)kAdjustedClip)[b2])
+ | (((uint8_t *)kAdjustedClip)[g2] << 8)
+ | (((uint8_t *)kAdjustedClip)[r2] << 16)
| (0xFF << 24);
}
};
}
+ case COLOR_Format32bitABGR2101010:
+ {
+ return [kAdjustedClip](void *dst_ptr, bool uncropped,
+ signed r1, signed g1, signed b1,
+ signed r2, signed g2, signed b2) {
+ ((uint32_t *)dst_ptr)[0] =
+ (((uint16_t *)kAdjustedClip)[r1])
+ | (((uint16_t *)kAdjustedClip)[g1] << 10)
+ | (((uint16_t *)kAdjustedClip)[b1] << 20)
+ | (3 << 30);
+
+ if (uncropped) {
+ ((uint32_t *)dst_ptr)[1] =
+ (((uint16_t *)kAdjustedClip)[r2])
+ | (((uint16_t *)kAdjustedClip)[g2] << 10)
+ | (((uint16_t *)kAdjustedClip)[b2] << 20)
+ | (3 << 30);
+ }
+ };
+ }
+
default:
TRESPASS();
}
@@ -514,7 +560,7 @@
uint8_t *kAdjustedClip = initClip();
auto readFromSrc = getReadFromSrc(mSrcFormat);
- auto writeToDst = getWriteToDst(mDstFormat, kAdjustedClip);
+ auto writeToDst = getWriteToDst(mDstFormat, (void *)kAdjustedClip);
uint8_t *dst_ptr = (uint8_t *)dst.mBits
+ dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
@@ -591,34 +637,116 @@
return convertYUV420Planar(src, dst);
}
-/*
- * Pack 10-bit YUV into RGBA_1010102.
- *
- * Media sends 10-bit YUV in a RGBA_1010102 format buffer. SF will handle
- * the conversion to RGB using RenderEngine fallback.
- *
- * We do not perform a YUV->RGB conversion here, however the conversion with
- * BT2020 to Full range is below for reference:
- *
- * B = 1.168 *(Y - 64) + 2.148 *(U - 512)
- * G = 1.168 *(Y - 64) - 0.652 *(V - 512) - 0.188 *(U - 512)
- * R = 1.168 *(Y - 64) + 1.683 *(V - 512)
- *
- * B = 1196/1024 *(Y - 64) + 2200/1024 *(U - 512)
- * G = .................... - 668/1024 *(V - 512) - 192/1024 *(U - 512)
- * R = .................... + 1723/1024 *(V - 512)
- *
- * min_B = (1196 *(- 64) + 2200 *(- 512)) / 1024 = -1175
- * min_G = (1196 *(- 64) - 668 *(1023 - 512) - 192 *(1023 - 512)) / 1024 = -504
- * min_R = (1196 *(- 64) + 1723 *(- 512)) / 1024 = -937
- *
- * max_B = (1196 *(1023 - 64) + 2200 *(1023 - 512)) / 1024 = 2218
- * max_G = (1196 *(1023 - 64) - 668 *(- 512) - 192 *(- 512)) / 1024 = 1551
- * max_R = (1196 *(1023 - 64) + 1723 *(1023 - 512)) / 1024 = 1980
- *
- * clip range -1175 .. 2218
- *
- */
+status_t ColorConverter::convertYUVP010(
+ const BitmapParams &src, const BitmapParams &dst) {
+ if (mDstFormat == COLOR_Format32bitABGR2101010) {
+ return convertYUVP010ToRGBA1010102(src, dst);
+ }
+
+ return ERROR_UNSUPPORTED;
+}
+
+status_t ColorConverter::convertYUVP010ToRGBA1010102(
+ const BitmapParams &src, const BitmapParams &dst) {
+ uint16_t *kAdjustedClip10bit = initClip10Bit();
+
+// auto readFromSrc = getReadFromSrc(mSrcFormat);
+ auto writeToDst = getWriteToDst(mDstFormat, (void *)kAdjustedClip10bit);
+
+ uint8_t *dst_ptr = (uint8_t *)dst.mBits
+ + dst.mCropTop * dst.mStride + dst.mCropLeft * dst.mBpp;
+
+ uint16_t *src_y = (uint16_t *)((uint8_t *)src.mBits
+ + src.mCropTop * src.mStride + src.mCropLeft * src.mBpp);
+
+ uint16_t *src_uv = (uint16_t *)((uint8_t *)src.mBits
+ + src.mStride * src.mHeight
+ + (src.mCropTop / 2) * src.mStride + src.mCropLeft * src.mBpp);
+
+ // BT.2020 Limited Range conversion
+
+ // B = 1.168 *(Y - 64) + 2.148 *(U - 512)
+ // G = 1.168 *(Y - 64) - 0.652 *(V - 512) - 0.188 *(U - 512)
+ // R = 1.168 *(Y - 64) + 1.683 *(V - 512)
+
+ // B = 1196/1024 *(Y - 64) + 2200/1024 *(U - 512)
+ // G = .................... - 668/1024 *(V - 512) - 192/1024 *(U - 512)
+ // R = .................... + 1723/1024 *(V - 512)
+
+ // min_B = (1196 *(- 64) + 2200 *(- 512)) / 1024 = -1175
+ // min_G = (1196 *(- 64) - 668 *(1023 - 512) - 192 *(1023 - 512)) / 1024 = -504
+ // min_R = (1196 *(- 64) + 1723 *(- 512)) / 1024 = -937
+
+ // max_B = (1196 *(1023 - 64) + 2200 *(1023 - 512)) / 1024 = 2218
+ // max_G = (1196 *(1023 - 64) - 668 *(- 512) - 192 *(- 512)) / 1024 = 1551
+ // max_R = (1196 *(1023 - 64) + 1723 *(1023 - 512)) / 1024 = 1980
+
+ // clip range -1175 .. 2218
+
+ // BT.709 Limited Range conversion
+
+ // B = 1.164 * (Y - 64) + 2.018 * (U - 512)
+ // G = 1.164 * (Y - 64) - 0.813 * (V - 512) - 0.391 * (U - 512)
+ // R = 1.164 * (Y - 64) + 1.596 * (V - 512)
+
+ // B = 1192/1024 * (Y - 64) + 2068/1024 * (U - 512)
+ // G = .................... - 832/1024 * (V - 512) - 400/1024 * (U - 512)
+ // R = .................... + 1636/1024 * (V - 512)
+
+ // min_B = (1192 * (- 64) + 2068 * (- 512)) / 1024 = -1108
+
+ // max_B = (1192 * (1023 - 64) + 517 * (1023 - 512)) / 1024 = 2148
+
+ // clip range -1108 .. 2148
+
+ signed mY = 1196, mU_B = 2200, mV_G = -668, mV_R = 1723, mU_G = -192;
+ if (!mSrcColorSpace.isBt2020()) {
+ mY = 1192;
+ mU_B = 2068;
+ mV_G = -832;
+ mV_R = 1636;
+ mU_G = -400;
+ }
+ for (size_t y = 0; y < src.cropHeight(); ++y) {
+ for (size_t x = 0; x < src.cropWidth(); x += 2) {
+ signed y1, y2, u, v;
+ y1 = (src_y[x] >> 6) - 64;
+ y2 = (src_y[x + 1] >> 6) - 64;
+ u = int(src_uv[x] >> 6) - 512;
+ v = int(src_uv[x + 1] >> 6) - 512;
+
+ signed u_b = u * mU_B;
+ signed u_g = u * mU_G;
+ signed v_g = v * mV_G;
+ signed v_r = v * mV_R;
+
+ signed tmp1 = y1 * mY;
+ signed b1 = (tmp1 + u_b) / 1024;
+ signed g1 = (tmp1 + v_g + u_g) / 1024;
+ signed r1 = (tmp1 + v_r) / 1024;
+
+ signed tmp2 = y2 * mY;
+ signed b2 = (tmp2 + u_b) / 1024;
+ signed g2 = (tmp2 + v_g + u_g) / 1024;
+ signed r2 = (tmp2 + v_r) / 1024;
+
+ bool uncropped = x + 1 < src.cropWidth();
+
+ writeToDst(dst_ptr + x * dst.mBpp, uncropped, r1, g1, b1, r2, g2, b2);
+ }
+
+ src_y += src.mStride / 2;
+
+ if (y & 1) {
+ src_uv += src.mStride / 2;
+ }
+
+ dst_ptr += dst.mStride;
+ }
+
+ return OK;
+}
+
#if !USE_NEON_Y410
@@ -1033,4 +1161,19 @@
return &mClip[-kClipMin];
}
+uint16_t *ColorConverter::initClip10Bit() {
+ static const signed kClipMin = -1176;
+ static const signed kClipMax = 2219;
+
+ if (mClip10Bit == NULL) {
+ mClip10Bit = new uint16_t[kClipMax - kClipMin + 1];
+
+ for (signed i = kClipMin; i <= kClipMax; ++i) {
+ mClip10Bit[i - kClipMin] = (i < 0) ? 0 : (i > 1023) ? 1023 : (uint16_t)i;
+ }
+ }
+
+ return &mClip10Bit[-kClipMin];
+}
+
} // namespace android
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index a4e3425..53ca4e7 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -91,11 +91,11 @@
<MediaCodec name="c2.android.mpeg4.decoder" type="video/mp4v-es">
<Alias name="OMX.google.mpeg4.decoder" />
<!-- profiles and levels: ProfileSimple : Level3 -->
- <Limit name="size" min="2x2" max="352x288" />
+ <Limit name="size" min="2x2" max="1920x1920" />
<Limit name="alignment" value="2x2" />
- <Limit name="block-size" value="16x16" />
- <Limit name="blocks-per-second" range="12-11880" />
- <Limit name="bitrate" range="1-384000" />
+ <Limit name="block-count" range="1-14400" />
+ <Limit name="blocks-per-second" range="1-432000" />
+ <Limit name="bitrate" range="1-40000000" />
<Feature name="adaptive-playback" />
</MediaCodec>
<MediaCodec name="c2.android.h263.decoder" type="video/3gpp">
diff --git a/media/libstagefright/foundation/ColorUtils.cpp b/media/libstagefright/foundation/ColorUtils.cpp
index fa722b5..6dc8157 100644
--- a/media/libstagefright/foundation/ColorUtils.cpp
+++ b/media/libstagefright/foundation/ColorUtils.cpp
@@ -590,9 +590,10 @@
uint32_t gfxRange = range;
uint32_t gfxStandard = standard;
uint32_t gfxTransfer = transfer;
- // TRICKY: use & to ensure all three mappings are completed
- if (!(sGfxRanges.map(range, &gfxRange) & sGfxStandards.map(standard, &gfxStandard)
- & sGfxTransfers.map(transfer, &gfxTransfer))) {
+ bool mappedRange = sGfxRanges.map(range, &gfxRange);
+ bool mappedStandard = sGfxStandards.map(standard, &gfxStandard);
+ bool mappedTransfer = sGfxTransfers.map(transfer, &gfxTransfer);
+ if (! (mappedRange && mappedStandard && mappedTransfer)) {
ALOGW("could not safely map platform color aspects (R:%u(%s) S:%u(%s) T:%u(%s) to "
"graphics dataspace (R:%u S:%u T:%u)",
range, asString(range), standard, asString(standard), transfer, asString(transfer),
@@ -626,9 +627,10 @@
CU::ColorRange cuRange = CU::kColorRangeUnspecified;
CU::ColorStandard cuStandard = CU::kColorStandardUnspecified;
CU::ColorTransfer cuTransfer = CU::kColorTransferUnspecified;
- // TRICKY: use & to ensure all three mappings are completed
- if (!(sGfxRanges.map(gfxRange, &cuRange) & sGfxStandards.map(gfxStandard, &cuStandard)
- & sGfxTransfers.map(gfxTransfer, &cuTransfer))) {
+ bool mappedRange = sGfxRanges.map(gfxRange, &cuRange);
+ bool mappedStandard = sGfxStandards.map(gfxStandard, &cuStandard);
+ bool mappedTransfer = sGfxTransfers.map(gfxTransfer, &cuTransfer);
+ if (! (mappedRange && mappedStandard && mappedTransfer)) {
ALOGW("could not safely map graphics dataspace (R:%u S:%u T:%u) to "
"platform color aspects (R:%u(%s) S:%u(%s) T:%u(%s)",
gfxRange, gfxStandard, gfxTransfer,
@@ -781,5 +783,14 @@
return true;
}
+// static
+bool ColorUtils::isHDRStaticInfoValid(HDRStaticInfo *info) {
+ if (info->sType1.mMaxDisplayLuminance > 0.0f
+ && info->sType1.mMinDisplayLuminance > 0.0f) return true;
+ if (info->sType1.mMaxContentLightLevel > 0.0f
+ && info->sType1.mMaxFrameAverageLightLevel > 0.0f) return true;
+ return false;
+}
+
} // namespace android
diff --git a/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h b/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
index a2b6c4f..72c8074 100644
--- a/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
+++ b/media/libstagefright/foundation/include/media/stagefright/foundation/ColorUtils.h
@@ -193,6 +193,9 @@
static void setHDRStaticInfoIntoAMediaFormat(const HDRStaticInfo &info, AMediaFormat *format);
// (internal) used by the setHDRStaticInfoInfo* routines
static void fillHdrStaticInfoBuffer( const HDRStaticInfo &info, uint8_t *data);
+
+ // determine whether HDR static info is valid
+ static bool isHDRStaticInfoValid(HDRStaticInfo *info);
};
inline static const char *asString(android::ColorUtils::ColorStandard i, const char *def = "??") {
diff --git a/media/libstagefright/id3/TEST_MAPPING b/media/libstagefright/id3/TEST_MAPPING
index d82d26e..6106908 100644
--- a/media/libstagefright/id3/TEST_MAPPING
+++ b/media/libstagefright/id3/TEST_MAPPING
@@ -9,14 +9,15 @@
"presubmit-large": [
// this doesn't seem to run any tests.
- // but: cts-tradefed run -m CtsMediaTestCases -t android.media.cts.MediaMetadataRetrieverTest
+ // but: cts-tradefed run -m CtsMediaMiscTestCases -t \
+ // android.media.misc.cts.MediaMetadataRetrieverTest
// does run he 32 and 64 bit tests, but not the instant tests
// but all I know is that with 'atest', it's not running
{
- "name": "CtsMediaTestCases",
+ "name": "CtsMediaMiscTestCases",
"options": [
{
- "include-filter": "android.media.cts.MediaMetadataRetrieverTest"
+ "include-filter": "android.media.misc.cts.MediaMetadataRetrieverTest"
}
]
}
diff --git a/media/libstagefright/include/media/stagefright/ColorConverter.h b/media/libstagefright/include/media/stagefright/ColorConverter.h
index 75b0d8e..1d86a22 100644
--- a/media/libstagefright/include/media/stagefright/ColorConverter.h
+++ b/media/libstagefright/include/media/stagefright/ColorConverter.h
@@ -54,6 +54,7 @@
uint32_t mTransfer;
bool isBt709();
+ bool isBt2020();
bool isJpeg();
};
@@ -78,8 +79,10 @@
OMX_COLOR_FORMATTYPE mSrcFormat, mDstFormat;
ColorSpace mSrcColorSpace;
uint8_t *mClip;
+ uint16_t *mClip10Bit;
uint8_t *initClip();
+ uint16_t *initClip10Bit();
status_t convertCbYCrY(
const BitmapParams &src, const BitmapParams &dst);
@@ -111,6 +114,12 @@
status_t convertTIYUV420PackedSemiPlanar(
const BitmapParams &src, const BitmapParams &dst);
+ status_t convertYUVP010(
+ const BitmapParams &src, const BitmapParams &dst);
+
+ status_t convertYUVP010ToRGBA1010102(
+ const BitmapParams &src, const BitmapParams &dst);
+
ColorConverter(const ColorConverter &);
ColorConverter &operator=(const ColorConverter &);
};
diff --git a/media/libstagefright/include/media/stagefright/MediaCodec.h b/media/libstagefright/include/media/stagefright/MediaCodec.h
index d372140..ce3b0d0 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodec.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodec.h
@@ -445,6 +445,12 @@
int32_t mRotationDegrees;
int32_t mAllowFrameDroppingBySurface;
+ uint32_t mHDRMetadataFlags; /* bitmask of kFlagHDR* */
+ enum {
+ kFlagHDRStaticInfo = 1 << 0,
+ kFlagHDR10PlusInfo = 1 << 1,
+ };
+
// initial create parameters
AString mInitName;
diff --git a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
index 3a01925..9040e8b 100644
--- a/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
+++ b/media/libstagefright/include/media/stagefright/MediaCodecConstants.h
@@ -540,6 +540,9 @@
constexpr int32_t DolbyVisionLevelUhd30 = 0x40;
constexpr int32_t DolbyVisionLevelUhd48 = 0x80;
constexpr int32_t DolbyVisionLevelUhd60 = 0x100;
+constexpr int32_t DolbyVisionLevelUhd120 = 0x200;
+constexpr int32_t DolbyVisionLevel8k30 = 0x400;
+constexpr int32_t DolbyVisionLevel8k60 = 0x800;
inline static const char *asString_DolbyVisionLevel(int32_t i, const char *def = "??") {
switch (i) {
@@ -552,6 +555,9 @@
case DolbyVisionLevelUhd30: return "Uhd30";
case DolbyVisionLevelUhd48: return "Uhd48";
case DolbyVisionLevelUhd60: return "Uhd60";
+ case DolbyVisionLevelUhd120: return "Uhd120";
+ case DolbyVisionLevel8k30: return "8k30";
+ case DolbyVisionLevel8k60: return "8k60";
default: return def;
}
}
@@ -586,9 +592,11 @@
constexpr int32_t COLOR_Format24bitBGR888 = 12;
constexpr int32_t COLOR_Format24bitRGB888 = 11;
constexpr int32_t COLOR_Format25bitARGB1888 = 14;
+constexpr int32_t COLOR_Format32bitABGR2101010 = 0x7F00AAA2;
constexpr int32_t COLOR_Format32bitABGR8888 = 0x7F00A000;
constexpr int32_t COLOR_Format32bitARGB8888 = 16;
constexpr int32_t COLOR_Format32bitBGRA8888 = 15;
+constexpr int32_t COLOR_Format64bitABGRFloat = 0x7F000F16;
constexpr int32_t COLOR_Format8bitRGB332 = 2;
constexpr int32_t COLOR_FormatCbYCrY = 27;
constexpr int32_t COLOR_FormatCrYCbY = 28;
@@ -642,9 +650,11 @@
case COLOR_Format24bitBGR888: return "24bitBGR888";
case COLOR_Format24bitRGB888: return "24bitRGB888";
case COLOR_Format25bitARGB1888: return "25bitARGB1888";
+ case COLOR_Format32bitABGR2101010: return "32bitABGR2101010";
case COLOR_Format32bitABGR8888: return "32bitABGR8888";
case COLOR_Format32bitARGB8888: return "32bitARGB8888";
case COLOR_Format32bitBGRA8888: return "32bitBGRA8888";
+ case COLOR_Format64bitABGRFloat: return "64bitABGRFloat";
case COLOR_Format8bitRGB332: return "8bitRGB332";
case COLOR_FormatCbYCrY: return "CbYCrY";
case COLOR_FormatCrYCbY: return "CrYCbY";
@@ -677,6 +687,7 @@
case COLOR_FormatYUV422SemiPlanar: return "YUV422SemiPlanar";
case COLOR_FormatYUV444Flexible: return "YUV444Flexible";
case COLOR_FormatYUV444Interleaved: return "YUV444Interleaved";
+ case COLOR_FormatYUVP010: return "YUVP010";
case COLOR_QCOM_FormatYUV420SemiPlanar: return "QCOM_YUV420SemiPlanar";
case COLOR_TI_FormatYUV420PackedSemiPlanar: return "TI_YUV420PackedSemiPlanar";
default: return def;
@@ -684,6 +695,7 @@
}
constexpr char FEATURE_AdaptivePlayback[] = "adaptive-playback";
+constexpr char FEATURE_EncodingStatistics[] = "encoding-statistics";
constexpr char FEATURE_IntraRefresh[] = "intra-refresh";
constexpr char FEATURE_PartialFrame[] = "partial-frame";
constexpr char FEATURE_QpBounds[] = "qp-bounds";
@@ -737,6 +749,14 @@
constexpr int32_t COLOR_TRANSFER_SDR_VIDEO = 3;
constexpr int32_t COLOR_TRANSFER_ST2084 = 6;
+constexpr int32_t PICTURE_TYPE_I = 1;
+constexpr int32_t PICTURE_TYPE_P = 2;
+constexpr int32_t PICTURE_TYPE_B = 3;
+constexpr int32_t PICTURE_TYPE_UNKNOWN = 0;
+
+constexpr int32_t VIDEO_ENCODING_STATISTICS_LEVEL_1 = 1;
+constexpr int32_t VIDEO_ENCODING_STATISTICS_LEVEL_NONE = 0;
+
constexpr char KEY_AAC_DRC_ALBUM_MODE[] = "aac-drc-album-mode";
constexpr char KEY_AAC_DRC_ATTENUATION_FACTOR[] = "aac-drc-cut-level";
constexpr char KEY_AAC_DRC_BOOST_FACTOR[] = "aac-drc-boost-level";
@@ -795,6 +815,7 @@
constexpr char KEY_OPERATING_RATE[] = "operating-rate";
constexpr char KEY_OUTPUT_REORDER_DEPTH[] = "output-reorder-depth";
constexpr char KEY_PCM_ENCODING[] = "pcm-encoding";
+constexpr char KEY_PICTURE_TYPE[] = "picture-type";
constexpr char KEY_PIXEL_ASPECT_RATIO_HEIGHT[] = "sar-height";
constexpr char KEY_PIXEL_ASPECT_RATIO_WIDTH[] = "sar-width";
constexpr char KEY_PREPEND_HEADER_TO_SYNC_FRAMES[] = "prepend-sps-pps-to-idr-frames";
@@ -811,6 +832,8 @@
constexpr char KEY_TILE_HEIGHT[] = "tile-height";
constexpr char KEY_TILE_WIDTH[] = "tile-width";
constexpr char KEY_TRACK_ID[] = "track-id";
+constexpr char KEY_VIDEO_ENCODING_STATISTICS_LEVEL[] = "video-encoding-statistics-level";
+constexpr char KEY_VIDEO_QP_AVERAGE[] = "video-qp-average";
constexpr char KEY_VIDEO_QP_B_MAX[] = "video-qp-b-max";
constexpr char KEY_VIDEO_QP_B_MIN[] = "video-qp-b-min";
constexpr char KEY_VIDEO_QP_I_MAX[] = "video-qp-i-max";
diff --git a/media/libstagefright/include/media/stagefright/MetaDataBase.h b/media/libstagefright/include/media/stagefright/MetaDataBase.h
index c80012e..88c1f3f 100644
--- a/media/libstagefright/include/media/stagefright/MetaDataBase.h
+++ b/media/libstagefright/include/media/stagefright/MetaDataBase.h
@@ -60,6 +60,8 @@
kKeyAVCC = 'avcc', // raw data
kKeyHVCC = 'hvcc', // raw data
kKeyDVCC = 'dvcc', // raw data
+ kKeyDVVC = 'dvvc', // raw data
+ kKeyDVWC = 'dvwc', // raw data
kKeyAV1C = 'av1c', // raw data
kKeyThumbnailHVCC = 'thvc', // raw data
kKeyThumbnailAV1C = 'tav1', // raw data
@@ -283,6 +285,8 @@
kTypeHVCC = 'hvcc',
kTypeAV1C = 'av1c',
kTypeDVCC = 'dvcc',
+ kTypeDVVC = 'dvvc',
+ kTypeDVWC = 'dvwc',
kTypeD263 = 'd263',
kTypeHCOS = 'hcos',
};
diff --git a/media/libstagefright/rtsp/AAVCAssembler.cpp b/media/libstagefright/rtsp/AAVCAssembler.cpp
index 3f4d662..30cdbc9 100644
--- a/media/libstagefright/rtsp/AAVCAssembler.cpp
+++ b/media/libstagefright/rtsp/AAVCAssembler.cpp
@@ -295,6 +295,10 @@
}
void AAVCAssembler::checkSpsUpdated(const sp<ABuffer> &buffer) {
+ if (buffer->size() == 0) {
+ android_errorWriteLog(0x534e4554, "204077881");
+ return;
+ }
const uint8_t *data = buffer->data();
unsigned nalType = data[0] & 0x1f;
if (nalType == 0x7) {
diff --git a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
index 7bd33c1..847d324 100644
--- a/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
+++ b/media/libstagefright/rtsp/AMPEG4ElementaryAssembler.cpp
@@ -204,7 +204,7 @@
};
bool AMPEG4ElementaryAssembler::initCheck() {
- if(mSizeLength == 0 || mIndexLength == 0 || mIndexDeltaLength == 0) {
+ if(mIsGeneric && (mSizeLength == 0 || mIndexLength == 0 || mIndexDeltaLength == 0)) {
android_errorWriteLog(0x534e4554, "124777537");
return false;
}
diff --git a/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp b/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp
index ac1e9b1..a8e64b6 100644
--- a/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp
+++ b/media/libstagefright/tests/mediacodec/MediaCodecTest.cpp
@@ -393,3 +393,51 @@
std::this_thread::sleep_for(std::chrono::milliseconds(100));
looper->stop();
}
+
+TEST(MediaCodecTest, DeadWhileStoppingError) {
+ // Test scenario:
+ //
+ // 1) Client thread calls stop(); MediaCodec looper thread calls
+ // initiateShutdown(); shutdown is being handled at the component thread.
+ // 2) An error occurs while handling initiateShutdown().
+ // 3) MediaCodec looper thread handles the error.
+ // 4) Codec service dies after the error is handled
+ // 5) MediaCodec looper thread handles the death.
+
+ static const AString kCodecName{"test.codec"};
+ static const AString kCodecOwner{"nobody"};
+ static const AString kMediaType{"video/x-test"};
+
+ sp<MockCodec> mockCodec;
+ std::function<sp<CodecBase>(const AString &name, const char *owner)> getCodecBase =
+ [&mockCodec](const AString &, const char *) {
+ mockCodec = new MockCodec([](const std::shared_ptr<MockBufferChannel> &) {
+ // No mock setup, as we don't expect any buffer operations
+ // in this scenario.
+ });
+ ON_CALL(*mockCodec, initiateAllocateComponent(_))
+ .WillByDefault([mockCodec](const sp<AMessage> &) {
+ mockCodec->callback()->onComponentAllocated(kCodecName.c_str());
+ });
+ ON_CALL(*mockCodec, initiateShutdown(_))
+ .WillByDefault([mockCodec](bool) {
+ // 2)
+ mockCodec->callback()->onError(UNKNOWN_ERROR, ACTION_CODE_FATAL);
+ // 4)
+ mockCodec->callback()->onError(DEAD_OBJECT, ACTION_CODE_FATAL);
+ // Codec service has died, no callback.
+ });
+ return mockCodec;
+ };
+
+ sp<ALooper> looper{new ALooper};
+ sp<MediaCodec> codec = SetupMediaCodec(
+ kCodecOwner, kCodecName, kMediaType, looper, getCodecBase);
+ ASSERT_NE(nullptr, codec) << "Codec must not be null";
+ ASSERT_NE(nullptr, mockCodec) << "MockCodec must not be null";
+
+ codec->stop();
+ // sleep here so that the looper thread can handle the error
+ std::this_thread::sleep_for(std::chrono::milliseconds(100));
+ looper->stop();
+}
diff --git a/media/libstagefright/timedtext/test/Android.bp b/media/libstagefright/timedtext/test/Android.bp
index 0b632bf..60669f9 100644
--- a/media/libstagefright/timedtext/test/Android.bp
+++ b/media/libstagefright/timedtext/test/Android.bp
@@ -36,7 +36,6 @@
static_libs: [
"libstagefright_timedtext",
- "libstagefright_foundation",
],
include_dirs: [
@@ -47,6 +46,7 @@
"liblog",
"libmedia",
"libbinder",
+ "libstagefright_foundation",
],
cflags: [
diff --git a/media/libstagefright/writer_fuzzers/WriterFuzzerBase.cpp b/media/libstagefright/writer_fuzzers/WriterFuzzerBase.cpp
index ee7af70..b97f347 100644
--- a/media/libstagefright/writer_fuzzers/WriterFuzzerBase.cpp
+++ b/media/libstagefright/writer_fuzzers/WriterFuzzerBase.cpp
@@ -174,10 +174,13 @@
params.sampleRate = 16000;
} else {
params.sampleRate = max(1, params.sampleRate);
+ params.channelCount = max(0, params.channelCount);
}
format->setInt32("channel-count", params.channelCount);
format->setInt32("sample-rate", params.sampleRate);
} else if (!strncmp(params.mime, "video/", 6)) {
+ params.width = max(1, params.width);
+ params.height = max(1, params.height);
format->setInt32("width", params.width);
format->setInt32("height", params.height);
}
diff --git a/media/libwatchdog/Android.bp b/media/libwatchdog/Android.bp
index 411c206..5506a73 100644
--- a/media/libwatchdog/Android.bp
+++ b/media/libwatchdog/Android.bp
@@ -39,7 +39,7 @@
darwin: {
enabled: false,
},
- linux_glibc: {
+ glibc: {
cflags: [
"-Dsigev_notify_thread_id=_sigev_un._tid",
],
diff --git a/media/mediaserver/Android.bp b/media/mediaserver/Android.bp
index e25658f..d03746d 100644
--- a/media/mediaserver/Android.bp
+++ b/media/mediaserver/Android.bp
@@ -33,7 +33,7 @@
shared_libs: [
"android.hardware.media.omx@1.0",
- "libandroidicu",
+ "libicu",
"libfmq",
"libbinder",
"libhidlbase",
diff --git a/media/ndk/Android.bp b/media/ndk/Android.bp
index 8d527e9..94e5d1f 100644
--- a/media/ndk/Android.bp
+++ b/media/ndk/Android.bp
@@ -177,10 +177,6 @@
"NdkMediaDataSourceCallbacks.cpp",
],
- include_dirs: [
- "frameworks/av/media/libstagefright/include",
- "frameworks/av/media/ndk/include",
- ],
export_include_dirs: [
"include",
@@ -193,6 +189,7 @@
],
header_libs: [
+ "libstagefright_headers",
"libmedia_headers",
],
@@ -223,6 +220,7 @@
"libcutils",
"android.hardware.graphics.bufferqueue@1.0",
],
+
header_libs: [
"libstagefright_foundation_headers",
],
@@ -230,9 +228,6 @@
cflags: [
"-D__ANDROID_VNDK__",
],
- include_dirs: [
- "frameworks/av/media/ndk/",
- ],
}
cc_library_static {
diff --git a/media/ndk/NdkMediaCodec.cpp b/media/ndk/NdkMediaCodec.cpp
index 0e2de4e..227459a 100644
--- a/media/ndk/NdkMediaCodec.cpp
+++ b/media/ndk/NdkMediaCodec.cpp
@@ -158,8 +158,7 @@
}
Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
- if (mCodec->mAsyncCallbackUserData != NULL
- || mCodec->mAsyncCallback.onAsyncInputAvailable != NULL) {
+ if (mCodec->mAsyncCallback.onAsyncInputAvailable != NULL) {
mCodec->mAsyncCallback.onAsyncInputAvailable(
mCodec,
mCodec->mAsyncCallbackUserData,
@@ -205,8 +204,7 @@
(uint32_t)flags};
Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
- if (mCodec->mAsyncCallbackUserData != NULL
- || mCodec->mAsyncCallback.onAsyncOutputAvailable != NULL) {
+ if (mCodec->mAsyncCallback.onAsyncOutputAvailable != NULL) {
mCodec->mAsyncCallback.onAsyncOutputAvailable(
mCodec,
mCodec->mAsyncCallbackUserData,
@@ -234,8 +232,7 @@
AMediaFormat *aMediaFormat = AMediaFormat_fromMsg(©);
Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
- if (mCodec->mAsyncCallbackUserData != NULL
- || mCodec->mAsyncCallback.onAsyncFormatChanged != NULL) {
+ if (mCodec->mAsyncCallback.onAsyncFormatChanged != NULL) {
mCodec->mAsyncCallback.onAsyncFormatChanged(
mCodec,
mCodec->mAsyncCallbackUserData,
@@ -263,8 +260,7 @@
err, actionCode, detail.c_str());
Mutex::Autolock _l(mCodec->mAsyncCallbackLock);
- if (mCodec->mAsyncCallbackUserData != NULL
- || mCodec->mAsyncCallback.onAsyncError != NULL) {
+ if (mCodec->mAsyncCallback.onAsyncError != NULL) {
mCodec->mAsyncCallback.onAsyncError(
mCodec,
mCodec->mAsyncCallbackUserData,
@@ -474,16 +470,20 @@
AMediaCodec *mData,
AMediaCodecOnAsyncNotifyCallback callback,
void *userdata) {
- if (mData->mAsyncNotify == NULL && userdata != NULL) {
- mData->mAsyncNotify = new AMessage(kWhatAsyncNotify, mData->mHandler);
- status_t err = mData->mCodec->setCallback(mData->mAsyncNotify);
- if (err != OK) {
- ALOGE("setAsyncNotifyCallback: err(%d), failed to set async callback", err);
- return translate_error(err);
- }
- }
Mutex::Autolock _l(mData->mAsyncCallbackLock);
+
+ if (mData->mAsyncNotify == NULL) {
+ mData->mAsyncNotify = new AMessage(kWhatAsyncNotify, mData->mHandler);
+ }
+
+ // always call, codec may have been reset/re-configured since last call.
+ status_t err = mData->mCodec->setCallback(mData->mAsyncNotify);
+ if (err != OK) {
+ ALOGE("setAsyncNotifyCallback: err(%d), failed to set async callback", err);
+ return translate_error(err);
+ }
+
mData->mAsyncCallback = callback;
mData->mAsyncCallbackUserData = userdata;
diff --git a/media/ndk/NdkMediaFormat.cpp b/media/ndk/NdkMediaFormat.cpp
index 51f6c78..923453a 100644
--- a/media/ndk/NdkMediaFormat.cpp
+++ b/media/ndk/NdkMediaFormat.cpp
@@ -354,8 +354,14 @@
EXPORT const char* AMEDIAFORMAT_KEY_MIME = "mime";
EXPORT const char* AMEDIAFORMAT_KEY_MPEG_USER_DATA = "mpeg-user-data";
EXPORT const char* AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER = "mpeg2-stream-header";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS = "mpegh-compatible-sets";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION =
+ "mpegh-profile-level-indication";
+EXPORT const char* AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT =
+ "mpegh-reference-channel-layout";
EXPORT const char* AMEDIAFORMAT_KEY_OPERATING_RATE = "operating-rate";
EXPORT const char* AMEDIAFORMAT_KEY_PCM_ENCODING = "pcm-encoding";
+EXPORT const char* AMEDIAFORMAT_KEY_PICTURE_TYPE = "picture-type";
EXPORT const char* AMEDIAFORMAT_KEY_PRIORITY = "priority";
EXPORT const char* AMEDIAFORMAT_KEY_PROFILE = "profile";
EXPORT const char* AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN = "pcm-big-endian";
@@ -389,6 +395,9 @@
EXPORT const char* AMEDIAFORMAT_KEY_TRACK_ID = "track-id";
EXPORT const char* AMEDIAFORMAT_KEY_TRACK_INDEX = "track-index";
EXPORT const char* AMEDIAFORMAT_KEY_VALID_SAMPLES = "valid-samples";
+EXPORT const char* AMEDIAFORMAT_KEY_VIDEO_ENCODING_STATISTICS_LEVEL =
+ "video-encoding-statistics-level";
+EXPORT const char* AMEDIAFORMAT_KEY_VIDEO_QP_AVERAGE = "video-qp-average";
EXPORT const char* AMEDIAFORMAT_VIDEO_QP_B_MAX = "video-qp-b-max";
EXPORT const char* AMEDIAFORMAT_VIDEO_QP_B_MIN = "video-qp-b-min";
EXPORT const char* AMEDIAFORMAT_VIDEO_QP_I_MAX = "video-qp-i-max";
diff --git a/media/ndk/include/media/NdkMediaFormat.h b/media/ndk/include/media/NdkMediaFormat.h
index fbd855d..2195657 100644
--- a/media/ndk/include/media/NdkMediaFormat.h
+++ b/media/ndk/include/media/NdkMediaFormat.h
@@ -311,6 +311,10 @@
extern const char* AMEDIAFORMAT_KEY_LAST_SAMPLE_INDEX_IN_CHUNK __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_KEY_SAMPLE_TIME_BEFORE_APPEND __INTRODUCED_IN(31);
+extern const char* AMEDIAFORMAT_KEY_PICTURE_TYPE __INTRODUCED_IN(33);
+extern const char* AMEDIAFORMAT_KEY_VIDEO_ENCODING_STATISTICS_LEVEL __INTRODUCED_IN(33);
+extern const char* AMEDIAFORMAT_KEY_VIDEO_QP_AVERAGE __INTRODUCED_IN(33);
+
extern const char* AMEDIAFORMAT_VIDEO_QP_B_MAX __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_VIDEO_QP_B_MIN __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_VIDEO_QP_I_MAX __INTRODUCED_IN(31);
@@ -320,6 +324,34 @@
extern const char* AMEDIAFORMAT_VIDEO_QP_P_MAX __INTRODUCED_IN(31);
extern const char* AMEDIAFORMAT_VIDEO_QP_P_MIN __INTRODUCED_IN(31);
+/**
+ * MPEG-H audio profile and level compatibility.
+ *
+ * See FDAmd_2 of ISO_IEC_23008-3;2019 MHAProfileAndLevelCompatibilitySetBox.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS __INTRODUCED_IN(32);
+
+/**
+ * MPEG-H audio profile level indication.
+ *
+ * See ISO_IEC_23008-3;2019 MHADecoderConfigurationRecord mpegh3daProfileLevelIndication.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION __INTRODUCED_IN(32);
+
+/**
+ * MPEG-H audio reference channel layout.
+ *
+ * See ISO_IEC_23008-3;2019 MHADecoderConfigurationRecord referenceChannelLayout
+ * and ISO_IEC_23001‐8 ChannelConfiguration value.
+ *
+ * Available since API level 32.
+ */
+extern const char* AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT __INTRODUCED_IN(32);
+
__END_DECLS
#endif // _NDK_MEDIA_FORMAT_H
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index 7e9e57e..6f275c7 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -126,6 +126,9 @@
AMEDIAFORMAT_KEY_MIME; # var introduced=21
AMEDIAFORMAT_KEY_MPEG_USER_DATA; # var introduced=28
AMEDIAFORMAT_KEY_MPEG2_STREAM_HEADER; # var introduced=29
+ AMEDIAFORMAT_KEY_MPEGH_COMPATIBLE_SETS; # var introduced=32
+ AMEDIAFORMAT_KEY_MPEGH_PROFILE_LEVEL_INDICATION; # var introduced=32
+ AMEDIAFORMAT_KEY_MPEGH_REFERENCE_CHANNEL_LAYOUT; # var introduced=32
AMEDIAFORMAT_KEY_OPERATING_RATE; # var introduced=28
AMEDIAFORMAT_KEY_PCM_BIG_ENDIAN; # var introduced=29
AMEDIAFORMAT_KEY_PCM_ENCODING; # var introduced=28
diff --git a/media/utils/Android.bp b/media/utils/Android.bp
index e3b837e..88b822d 100644
--- a/media/utils/Android.bp
+++ b/media/utils/Android.bp
@@ -82,6 +82,36 @@
export_include_dirs: ["include"],
}
+cc_library {
+ name: "libmediautils_vendor",
+ vendor_available: true, // required for platform/hardware/interfaces
+ srcs: [
+ "MemoryLeakTrackUtil.cpp",
+ ],
+
+ cflags: [
+ "-Wall",
+ "-Wextra",
+ "-Werror",
+ ],
+ shared_libs: [
+ "liblog",
+ "libutils",
+ ],
+
+ static_libs: [
+ "libc_malloc_debug_backtrace",
+ ],
+
+ header_libs: [
+ "bionic_libc_platform_headers",
+ ],
+
+ local_include_dirs: ["include"],
+ export_include_dirs: ["include"],
+}
+
+
cc_library_headers {
name: "libmediautils_headers",
vendor_available: true, // required for platform/hardware/interfaces
diff --git a/services/Android.mk b/services/Android.mk
new file mode 100644
index 0000000..c86a226
--- /dev/null
+++ b/services/Android.mk
@@ -0,0 +1 @@
+$(eval $(call declare-1p-copy-files,frameworks/av/services/audiopolicy,))
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 6cdb3cd..51f39a6 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -64,6 +64,7 @@
#include <system/audio_effects/effect_ns.h>
#include <system/audio_effects/effect_aec.h>
#include <system/audio_effects/effect_hapticgenerator.h>
+#include <system/audio_effects/effect_spatializer.h>
#include <audio_utils/primitives.h>
@@ -335,12 +336,30 @@
return NO_ERROR;
}
-// getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
-const media::AudioVibratorInfo* AudioFlinger::getDefaultVibratorInfo_l() {
- if (mAudioVibratorInfos.empty()) {
- return nullptr;
+status_t AudioFlinger::setDeviceConnectedState(const struct audio_port_v7 *port, bool connected) {
+ status_t final_result = NO_INIT;
+ Mutex::Autolock _l(mLock);
+ AutoMutex lock(mHardwareLock);
+ mHardwareStatus = AUDIO_HW_SET_CONNECTED_STATE;
+ for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
+ sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
+ status_t result = dev->setConnectedState(port, connected);
+ // Same logic as with setParameter: it's a success if at least one
+ // HAL module accepts the update.
+ if (final_result != NO_ERROR) {
+ final_result = result;
+ }
}
- return &mAudioVibratorInfos.front();
+ mHardwareStatus = AUDIO_HW_IDLE;
+ return final_result;
+}
+
+// getDefaultVibratorInfo_l must be called with AudioFlinger lock held.
+std::optional<media::AudioVibratorInfo> AudioFlinger::getDefaultVibratorInfo_l() {
+ if (mAudioVibratorInfos.empty()) {
+ return {};
+ }
+ return mAudioVibratorInfos.front();
}
AudioFlinger::~AudioFlinger()
@@ -695,7 +714,7 @@
// dump all hardware devs
for (size_t i = 0; i < mAudioHwDevs.size(); i++) {
sp<DeviceHalInterface> dev = mAudioHwDevs.valueAt(i)->hwDevice();
- dev->dump(fd);
+ dev->dump(fd, args);
}
mPatchPanel.dump(fd);
@@ -2456,6 +2475,10 @@
ThreadBase *thread = (ThreadBase *)mRecordThreads.valueAt(i).get();
thread->systemReady();
}
+ for (size_t i = 0; i < mMmapThreads.size(); i++) {
+ ThreadBase *thread = (ThreadBase *)mMmapThreads.valueAt(i).get();
+ thread->systemReady();
+ }
return NO_ERROR;
}
@@ -2502,7 +2525,8 @@
sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig __unused,
audio_devices_t deviceType,
const String8& address,
audio_output_flags_t flags)
@@ -2530,16 +2554,16 @@
// Check only for Normal Mixing mode
if (kEnableExtendedPrecision) {
// Specify format (uncomment one below to choose)
- //config->format = AUDIO_FORMAT_PCM_FLOAT;
- //config->format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
- //config->format = AUDIO_FORMAT_PCM_32_BIT;
- //config->format = AUDIO_FORMAT_PCM_8_24_BIT;
- // ALOGV("openOutput_l() upgrading format to %#08x", config->format);
+ //halConfig->format = AUDIO_FORMAT_PCM_FLOAT;
+ //halConfig->format = AUDIO_FORMAT_PCM_24_BIT_PACKED;
+ //halConfig->format = AUDIO_FORMAT_PCM_32_BIT;
+ //halConfig->format = AUDIO_FORMAT_PCM_8_24_BIT;
+ // ALOGV("openOutput_l() upgrading format to %#08x", halConfig->format);
}
if (kEnableExtendedChannels) {
// Specify channel mask (uncomment one below to choose)
- //config->channel_mask = audio_channel_out_mask_from_count(4); // for USB 4ch
- //config->channel_mask = audio_channel_mask_from_representation_and_bits(
+ //halConfig->channel_mask = audio_channel_out_mask_from_count(4); // for USB 4ch
+ //halConfig->channel_mask = audio_channel_mask_from_representation_and_bits(
// AUDIO_CHANNEL_REPRESENTATION_INDEX, (1 << 4) - 1); // another 4ch example
}
}
@@ -2550,7 +2574,7 @@
*output,
deviceType,
flags,
- config,
+ halConfig,
address.string());
mHardwareStatus = AUDIO_HW_IDLE;
@@ -2565,13 +2589,25 @@
return thread;
} else {
sp<PlaybackThread> thread;
- if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
+ //TODO: b/193496180 use spatializer flag at audio HAL when available
+ if (flags == (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_FAST
+ | AUDIO_OUTPUT_FLAG_DEEP_BUFFER)) {
+#ifdef MULTICHANNEL_EFFECT_CHAIN
+ thread = new SpatializerThread(this, outputStream, *output,
+ mSystemReady, mixerConfig);
+ ALOGD("openOutput_l() created spatializer output: ID %d thread %p",
+ *output, thread.get());
+#else
+ ALOGE("openOutput_l() cannot create spatializer thread "
+ "without #define MULTICHANNEL_EFFECT_CHAIN");
+#endif
+ } else if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
thread = new OffloadThread(this, outputStream, *output, mSystemReady);
ALOGV("openOutput_l() created offload output: ID %d thread %p",
*output, thread.get());
} else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT)
- || !isValidPcmSinkFormat(config->format)
- || !isValidPcmSinkChannelMask(config->channel_mask)) {
+ || !isValidPcmSinkFormat(halConfig->format)
+ || !isValidPcmSinkChannelMask(halConfig->channel_mask)) {
thread = new DirectOutputThread(this, outputStream, *output, mSystemReady);
ALOGV("openOutput_l() created direct output: ID %d thread %p",
*output, thread.get());
@@ -2598,8 +2634,10 @@
{
audio_module_handle_t module = VALUE_OR_RETURN_STATUS(
aidl2legacy_int32_t_audio_module_handle_t(request.module));
- audio_config_t config = VALUE_OR_RETURN_STATUS(
- aidl2legacy_AudioConfig_audio_config_t(request.config));
+ audio_config_t halConfig = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioConfig_audio_config_t(request.halConfig));
+ audio_config_base_t mixerConfig = VALUE_OR_RETURN_STATUS(
+ aidl2legacy_AudioConfigBase_audio_config_base_t(request.mixerConfig));
sp<DeviceDescriptorBase> device = VALUE_OR_RETURN_STATUS(
aidl2legacy_DeviceDescriptorBase(request.device));
audio_output_flags_t flags = VALUE_OR_RETURN_STATUS(
@@ -2612,9 +2650,9 @@
"Channels %#x, flags %#x",
this, module,
device->toString().c_str(),
- config.sample_rate,
- config.format,
- config.channel_mask,
+ halConfig.sample_rate,
+ halConfig.format,
+ halConfig.channel_mask,
flags);
audio_devices_t deviceType = device->type();
@@ -2626,7 +2664,8 @@
Mutex::Autolock _l(mLock);
- sp<ThreadBase> thread = openOutput_l(module, &output, &config, deviceType, address, flags);
+ sp<ThreadBase> thread = openOutput_l(module, &output, &halConfig,
+ &mixerConfig, deviceType, address, flags);
if (thread != 0) {
if ((flags & AUDIO_OUTPUT_FLAG_MMAP_NOIRQ) == 0) {
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
@@ -2651,7 +2690,8 @@
mmapThread->ioConfigChanged(AUDIO_OUTPUT_OPENED);
}
response->output = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_io_handle_t_int32_t(output));
- response->config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(config));
+ response->config =
+ VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(halConfig));
response->latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<int32_t>(latencyMs));
response->flags = VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
@@ -3730,6 +3770,15 @@
goto Exit;
}
+ // Only audio policy service can create a spatializer effect
+ if ((memcmp(&descOut.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0) &&
+ (callingUid != AID_AUDIOSERVER || currentPid != getpid())) {
+ ALOGW("%s: attempt to create a spatializer effect from uid/pid %d/%d",
+ __func__, callingUid, currentPid);
+ lStatus = PERMISSION_DENIED;
+ goto Exit;
+ }
+
if (io == AUDIO_IO_HANDLE_NONE && sessionId == AUDIO_SESSION_OUTPUT_MIX) {
// if the output returned by getOutputForEffect() is removed before we lock the
// mutex below, the call to checkPlaybackThread_l(io) below will detect it
@@ -3745,7 +3794,7 @@
ALOGV("%s device type %#x address %s", __func__, device.mType, device.getAddress());
handle = mDeviceEffectManager.createEffect_l(
&descOut, device, client, effectClient, mPatchPanel.patches_l(),
- &enabledOut, &lStatus, probe);
+ &enabledOut, &lStatus, probe, request.notifyFramesProcessed);
if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
// remove local strong reference to Client with mClientLock held
Mutex::Autolock _cl(mClientLock);
@@ -3798,7 +3847,8 @@
io = mPlaybackThreads.keyAt(0);
}
ALOGV("createEffect() got io %d for effect %s", io, descOut.name);
- } else if (checkPlaybackThread_l(io) != nullptr) {
+ } else if (checkPlaybackThread_l(io) != nullptr
+ && sessionId != AUDIO_SESSION_OUTPUT_STAGE) {
// allow only one effect chain per sessionId on mPlaybackThreads.
for (size_t i = 0; i < mPlaybackThreads.size(); i++) {
const audio_io_handle_t checkIo = mPlaybackThreads.keyAt(i);
@@ -3864,7 +3914,8 @@
}
}
handle = thread->createEffect_l(client, effectClient, priority, sessionId,
- &descOut, &enabledOut, &lStatus, pinned, probe);
+ &descOut, &enabledOut, &lStatus, pinned, probe,
+ request.notifyFramesProcessed);
if (lStatus != NO_ERROR && lStatus != ALREADY_EXISTS) {
// remove local strong reference to Client with mClientLock held
Mutex::Autolock _cl(mClientLock);
@@ -4179,6 +4230,8 @@
case TransactionCode::LIST_AUDIO_PATCHES:
case TransactionCode::SET_AUDIO_PORT_CONFIG:
case TransactionCode::SET_RECORD_SILENCED:
+ case TransactionCode::AUDIO_POLICY_READY:
+ case TransactionCode::SET_DEVICE_CONNECTED_STATE:
ALOGW("%s: transaction %d received from PID %d",
__func__, code, IPCThreadState::self()->getCallingPid());
// return status only for non void methods
diff --git a/services/audioflinger/AudioFlinger.h b/services/audioflinger/AudioFlinger.h
index fff61f8..d2317e8 100644
--- a/services/audioflinger/AudioFlinger.h
+++ b/services/audioflinger/AudioFlinger.h
@@ -269,6 +269,9 @@
/* Indicate JAVA services are ready (scheduling, power management ...) */
virtual status_t systemReady();
+ virtual status_t audioPolicyReady() { mAudioPolicyReady.store(true); return NO_ERROR; }
+ bool isAudioPolicyReady() const { return mAudioPolicyReady.load(); }
+
virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones);
@@ -279,6 +282,8 @@
virtual status_t updateSecondaryOutputs(
const TrackSecondaryOutputsMap& trackSecondaryOutputs);
+ virtual status_t setDeviceConnectedState(const struct audio_port_v7 *port, bool connected);
+
status_t onTransactWrapper(TransactionCode code, const Parcel& data, uint32_t flags,
const std::function<status_t()>& delegate) override;
@@ -309,7 +314,7 @@
void updateDownStreamPatches_l(const struct audio_patch *patch,
const std::set<audio_io_handle_t> streams);
- const media::AudioVibratorInfo* getDefaultVibratorInfo_l();
+ std::optional<media::AudioVibratorInfo> getDefaultVibratorInfo_l();
private:
// FIXME The 400 is temporarily too high until a leak of writers in media.log is fixed.
@@ -735,7 +740,8 @@
const String8& outputDeviceAddress);
sp<ThreadBase> openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
audio_devices_t deviceType,
const String8& address,
audio_output_flags_t flags);
@@ -900,6 +906,7 @@
AUDIO_HW_SET_MASTER_MUTE, // set_master_mute
AUDIO_HW_GET_MASTER_MUTE, // get_master_mute
AUDIO_HW_GET_MICROPHONES, // getMicrophones
+ AUDIO_HW_SET_CONNECTED_STATE, // setConnectedState
};
mutable hardware_call_state mHardwareStatus; // for dump only
@@ -986,6 +993,7 @@
DeviceEffectManager mDeviceEffectManager;
bool mSystemReady;
+ std::atomic_bool mAudioPolicyReady{};
mediautils::UidInfo mUidInfo;
diff --git a/services/audioflinger/DeviceEffectManager.cpp b/services/audioflinger/DeviceEffectManager.cpp
index cecd52b..53ac5cb 100644
--- a/services/audioflinger/DeviceEffectManager.cpp
+++ b/services/audioflinger/DeviceEffectManager.cpp
@@ -77,7 +77,8 @@
const std::map<audio_patch_handle_t, PatchPanel::Patch>& patches,
int *enabled,
status_t *status,
- bool probe) {
+ bool probe,
+ bool notifyFramesProcessed) {
sp<DeviceEffectProxy> effect;
sp<EffectHandle> handle;
status_t lStatus;
@@ -95,10 +96,12 @@
effect = iter->second;
} else {
effect = new DeviceEffectProxy(device, mMyCallback,
- descriptor, mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT));
+ descriptor, mAudioFlinger.nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT),
+ notifyFramesProcessed);
}
// create effect handle and connect it to effect module
- handle = new EffectHandle(effect, client, effectClient, 0 /*priority*/);
+ handle = new EffectHandle(effect, client, effectClient, 0 /*priority*/,
+ notifyFramesProcessed);
lStatus = handle->initCheck();
if (lStatus == NO_ERROR) {
lStatus = effect->addHandle(handle.get());
diff --git a/services/audioflinger/DeviceEffectManager.h b/services/audioflinger/DeviceEffectManager.h
index a05f5fe..d2faa70 100644
--- a/services/audioflinger/DeviceEffectManager.h
+++ b/services/audioflinger/DeviceEffectManager.h
@@ -37,7 +37,8 @@
const std::map<audio_patch_handle_t, PatchPanel::Patch>& patches,
int *enabled,
status_t *status,
- bool probe);
+ bool probe,
+ bool notifyFramesProcessed);
void createAudioPatch(audio_patch_handle_t handle, const PatchPanel::Patch& patch);
void releaseAudioPatch(audio_patch_handle_t handle);
@@ -161,10 +162,16 @@
bool isOffload() const override { return false; }
bool isOffloadOrDirect() const override { return false; }
bool isOffloadOrMmap() const override { return false; }
+ bool isSpatializer() const override { return false; }
uint32_t sampleRate() const override { return 0; }
- audio_channel_mask_t channelMask() const override { return AUDIO_CHANNEL_NONE; }
- uint32_t channelCount() const override { return 0; }
+ audio_channel_mask_t inChannelMask(int id __unused) const override {
+ return AUDIO_CHANNEL_NONE;
+ }
+ uint32_t inChannelCount(int id __unused) const override { return 0; }
+ audio_channel_mask_t outChannelMask() const override { return AUDIO_CHANNEL_NONE; }
+ uint32_t outChannelCount() const override { return 0; }
+
audio_channel_mask_t hapticChannelMask() const override { return AUDIO_CHANNEL_NONE; }
size_t frameCount() const override { return 0; }
uint32_t latency() const override { return 0; }
@@ -190,6 +197,10 @@
wp<EffectChain> chain() const override { return nullptr; }
+ bool isAudioPolicyReady() const override {
+ return mManager.audioFlinger().isAudioPolicyReady();
+ }
+
int newEffectId() { return mManager.audioFlinger().nextUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT); }
status_t addEffectToHal(audio_port_handle_t deviceId,
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index b267d88..8d04edb 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -24,9 +24,11 @@
#include "Configuration.h"
#include <utils/Log.h>
#include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_downmix.h>
#include <system/audio_effects/effect_dynamicsprocessing.h>
#include <system/audio_effects/effect_hapticgenerator.h>
#include <system/audio_effects/effect_ns.h>
+#include <system/audio_effects/effect_spatializer.h>
#include <system/audio_effects/effect_visualizer.h>
#include <audio_utils/channels.h>
#include <audio_utils/primitives.h>
@@ -242,6 +244,12 @@
{
Mutex::Autolock _l(mLock);
+
+ if ((isInternal_l() && !mPolicyRegistered)
+ || !getCallback()->isAudioPolicyReady()) {
+ return NO_ERROR;
+ }
+
// register effect when first handle is attached and unregister when last handle is removed
if (mPolicyRegistered != mHandles.size() > 0) {
doRegister = true;
@@ -642,6 +650,13 @@
mState = IDLE;
}
break;
+ case ACTIVE:
+ for (size_t i = 0; i < mHandles.size(); i++) {
+ if (!mHandles[i]->disconnected()) {
+ mHandles[i]->framesProcessed(mConfig.inputCfg.buffer.frameCount);
+ }
+ }
+ break;
default: //IDLE , ACTIVE, DESTROYED
break;
}
@@ -875,9 +890,9 @@
// similar to output EFFECT_FLAG_TYPE_INSERT/REPLACE,
// in which case input channel masks should be used here.
callback = getCallback();
- channelMask = callback->channelMask();
+ channelMask = callback->inChannelMask(mId);
mConfig.inputCfg.channels = channelMask;
- mConfig.outputCfg.channels = channelMask;
+ mConfig.outputCfg.channels = callback->outChannelMask();
if ((mDescriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
if (mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_MONO) {
@@ -940,11 +955,7 @@
// Auxiliary effect:
// accumulates in output buffer: input buffer != output buffer
// Therefore: accumulate <=> input buffer != output buffer
- if (mConfig.inputCfg.buffer.raw != mConfig.outputCfg.buffer.raw) {
- mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_ACCUMULATE;
- } else {
- mConfig.outputCfg.accessMode = EFFECT_BUFFER_ACCESS_WRITE;
- }
+ mConfig.outputCfg.accessMode = requiredEffectBufferAccessMode();
mConfig.inputCfg.mask = EFFECT_CONFIG_ALL;
mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
mConfig.inputCfg.buffer.frameCount = callback->frameCount();
@@ -1600,7 +1611,7 @@
return status;
}
-status_t AudioFlinger::EffectModule::setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo)
+status_t AudioFlinger::EffectModule::setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo)
{
if (mStatus != NO_ERROR) {
return mStatus;
@@ -1610,15 +1621,17 @@
return INVALID_OPERATION;
}
+ const size_t paramCount = 3;
std::vector<uint8_t> request(
- sizeof(effect_param_t) + sizeof(int32_t) + 2 * sizeof(float));
+ sizeof(effect_param_t) + sizeof(int32_t) + paramCount * sizeof(float));
effect_param_t *param = (effect_param_t*) request.data();
param->psize = sizeof(int32_t);
- param->vsize = 2 * sizeof(float);
+ param->vsize = paramCount * sizeof(float);
*(int32_t*)param->data = HG_PARAM_VIBRATOR_INFO;
float* vibratorInfoPtr = reinterpret_cast<float*>(param->data + sizeof(int32_t));
- vibratorInfoPtr[0] = vibratorInfo->resonantFrequency;
- vibratorInfoPtr[1] = vibratorInfo->qFactor;
+ vibratorInfoPtr[0] = vibratorInfo.resonantFrequency;
+ vibratorInfoPtr[1] = vibratorInfo.qFactor;
+ vibratorInfoPtr[2] = vibratorInfo.maxAmplitude;
std::vector<uint8_t> response;
status_t status = command(EFFECT_CMD_SET_PARAM, request, sizeof(int32_t), &response);
if (status == NO_ERROR) {
@@ -1708,10 +1721,11 @@
AudioFlinger::EffectHandle::EffectHandle(const sp<EffectBase>& effect,
const sp<AudioFlinger::Client>& client,
const sp<media::IEffectClient>& effectClient,
- int32_t priority)
+ int32_t priority, bool notifyFramesProcessed)
: BnEffect(),
mEffect(effect), mEffectClient(effectClient), mClient(client), mCblk(NULL),
- mPriority(priority), mHasControl(false), mEnabled(false), mDisconnected(false)
+ mPriority(priority), mHasControl(false), mEnabled(false), mDisconnected(false),
+ mNotifyFramesProcessed(notifyFramesProcessed)
{
ALOGV("constructor %p client %p", this, client.get());
@@ -2020,6 +2034,13 @@
}
}
+void AudioFlinger::EffectHandle::framesProcessed(int32_t frames) const
+{
+ if (mEffectClient != 0 && mNotifyFramesProcessed) {
+ mEffectClient->framesProcessed(frames);
+ }
+}
+
void AudioFlinger::EffectHandle::dumpToBuffer(char* buffer, size_t size)
{
bool locked = mCblk != NULL && AudioFlinger::dumpTryLock(mCblk->lock);
@@ -2048,11 +2069,11 @@
mNewLeftVolume(UINT_MAX), mNewRightVolume(UINT_MAX),
mEffectCallback(new EffectCallback(wp<EffectChain>(this), thread))
{
- mStrategy = AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
sp<ThreadBase> p = thread.promote();
if (p == nullptr) {
return;
}
+ mStrategy = p->getStrategyForStream(AUDIO_STREAM_MUSIC);
mMaxTailBuffers = ((kProcessTailDurationMs * p->sampleRate()) / 1000) /
p->frameCount();
}
@@ -2125,8 +2146,8 @@
if (mInBuffer == NULL) {
return;
}
- const size_t frameSize =
- audio_bytes_per_sample(EFFECT_BUFFER_FORMAT) * mEffectCallback->channelCount();
+ const size_t frameSize = audio_bytes_per_sample(EFFECT_BUFFER_FORMAT)
+ * mEffectCallback->inChannelCount(mEffects[0]->id());
memset(mInBuffer->audioBuffer()->raw, 0, mEffectCallback->frameCount() * frameSize);
mInBuffer->commit();
@@ -2212,11 +2233,9 @@
// addEffect_l() must be called with ThreadBase::mLock and EffectChain::mLock held
status_t AudioFlinger::EffectChain::addEffect_ll(const sp<EffectModule>& effect)
{
- effect_descriptor_t desc = effect->desc();
- uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK;
-
effect->setCallback(mEffectCallback);
+ effect_descriptor_t desc = effect->desc();
if ((desc.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY) {
// Auxiliary effects are inserted at the beginning of mEffects vector as
// they are processed first and accumulated in chain input buffer
@@ -2236,97 +2255,139 @@
numSamples * sizeof(int32_t), &halBuffer);
#endif
if (result != OK) return result;
+
+ effect->configure();
+
effect->setInBuffer(halBuffer);
// auxiliary effects output samples to chain input buffer for further processing
// by insert effects
effect->setOutBuffer(mInBuffer);
} else {
- // Insert effects are inserted at the end of mEffects vector as they are processed
- // after track and auxiliary effects.
- // Insert effect order as a function of indicated preference:
- // if EFFECT_FLAG_INSERT_EXCLUSIVE, insert in first position or reject if
- // another effect is present
- // else if EFFECT_FLAG_INSERT_FIRST, insert in first position or after the
- // last effect claiming first position
- // else if EFFECT_FLAG_INSERT_LAST, insert in last position or before the
- // first effect claiming last position
- // else if EFFECT_FLAG_INSERT_ANY insert after first or before last
- // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is
- // already present
-
- size_t size = mEffects.size();
- size_t idx_insert = size;
- ssize_t idx_insert_first = -1;
- ssize_t idx_insert_last = -1;
-
- for (size_t i = 0; i < size; i++) {
- effect_descriptor_t d = mEffects[i]->desc();
- uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK;
- uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK;
- if (iMode == EFFECT_FLAG_TYPE_INSERT) {
- // check invalid effect chaining combinations
- if (insertPref == EFFECT_FLAG_INSERT_EXCLUSIVE ||
- iPref == EFFECT_FLAG_INSERT_EXCLUSIVE) {
- ALOGW("addEffect_l() could not insert effect %s: exclusive conflict with %s",
- desc.name, d.name);
- return INVALID_OPERATION;
- }
- // remember position of first insert effect and by default
- // select this as insert position for new effect
- if (idx_insert == size) {
- idx_insert = i;
- }
- // remember position of last insert effect claiming
- // first position
- if (iPref == EFFECT_FLAG_INSERT_FIRST) {
- idx_insert_first = i;
- }
- // remember position of first insert effect claiming
- // last position
- if (iPref == EFFECT_FLAG_INSERT_LAST &&
- idx_insert_last == -1) {
- idx_insert_last = i;
- }
- }
+ ssize_t idx_insert = getInsertIndex(desc);
+ if (idx_insert < 0) {
+ return INVALID_OPERATION;
}
- // modify idx_insert from first position if needed
- if (insertPref == EFFECT_FLAG_INSERT_LAST) {
- if (idx_insert_last != -1) {
- idx_insert = idx_insert_last;
- } else {
- idx_insert = size;
- }
- } else {
- if (idx_insert_first != -1) {
- idx_insert = idx_insert_first + 1;
- }
- }
-
- // always read samples from chain input buffer
- effect->setInBuffer(mInBuffer);
-
- // if last effect in the chain, output samples to chain
- // output buffer, otherwise to chain input buffer
- if (idx_insert == size) {
- if (idx_insert != 0) {
- mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
- mEffects[idx_insert-1]->configure();
- }
- effect->setOutBuffer(mOutBuffer);
- } else {
- effect->setOutBuffer(mInBuffer);
- }
+ size_t previousSize = mEffects.size();
mEffects.insertAt(effect, idx_insert);
- ALOGV("addEffect_l() effect %p, added in chain %p at rank %zu", effect.get(), this,
- idx_insert);
+ effect->configure();
+
+ // - By default:
+ // All effects read samples from chain input buffer.
+ // The last effect in the chain, writes samples to chain output buffer,
+ // otherwise to chain input buffer
+ // - In the OUTPUT_STAGE chain of a spatializer mixer thread:
+ // The spatializer effect (first effect) reads samples from the input buffer
+ // and writes samples to the output buffer.
+ // All other effects read and writes samples to the output buffer
+ if (mEffectCallback->isSpatializer()
+ && mSessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+ effect->setOutBuffer(mOutBuffer);
+ if (idx_insert == 0) {
+ if (previousSize != 0) {
+ mEffects[1]->configure();
+ mEffects[1]->setInBuffer(mOutBuffer);
+ mEffects[1]->updateAccessMode(); // reconfig if neeeded.
+ }
+ effect->setInBuffer(mInBuffer);
+ } else {
+ effect->setInBuffer(mOutBuffer);
+ }
+ } else {
+ effect->setInBuffer(mInBuffer);
+ if (idx_insert == previousSize) {
+ if (idx_insert != 0) {
+ mEffects[idx_insert-1]->configure();
+ mEffects[idx_insert-1]->setOutBuffer(mInBuffer);
+ mEffects[idx_insert - 1]->updateAccessMode(); // reconfig if neeeded.
+ }
+ effect->setOutBuffer(mOutBuffer);
+ } else {
+ effect->setOutBuffer(mInBuffer);
+ }
+ }
+ ALOGV("%s effect %p, added in chain %p at rank %zu",
+ __func__, effect.get(), this, idx_insert);
}
effect->configure();
return NO_ERROR;
}
+ssize_t AudioFlinger::EffectChain::getInsertIndex(const effect_descriptor_t& desc) {
+ // Insert effects are inserted at the end of mEffects vector as they are processed
+ // after track and auxiliary effects.
+ // Insert effect order as a function of indicated preference:
+ // if EFFECT_FLAG_INSERT_EXCLUSIVE, insert in first position or reject if
+ // another effect is present
+ // else if EFFECT_FLAG_INSERT_FIRST, insert in first position or after the
+ // last effect claiming first position
+ // else if EFFECT_FLAG_INSERT_LAST, insert in last position or before the
+ // first effect claiming last position
+ // else if EFFECT_FLAG_INSERT_ANY insert after first or before last
+ // Reject insertion if an effect with EFFECT_FLAG_INSERT_EXCLUSIVE is
+ // already present
+ // Spatializer or Downmixer effects are inserted in first position because
+ // they adapt the channel count for all other effects in the chain
+ if ((memcmp(&desc.type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0)
+ || (memcmp(&desc.type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0)) {
+ return 0;
+ }
+
+ size_t size = mEffects.size();
+ uint32_t insertPref = desc.flags & EFFECT_FLAG_INSERT_MASK;
+ ssize_t idx_insert;
+ ssize_t idx_insert_first = -1;
+ ssize_t idx_insert_last = -1;
+
+ idx_insert = size;
+ for (size_t i = 0; i < size; i++) {
+ effect_descriptor_t d = mEffects[i]->desc();
+ uint32_t iMode = d.flags & EFFECT_FLAG_TYPE_MASK;
+ uint32_t iPref = d.flags & EFFECT_FLAG_INSERT_MASK;
+ if (iMode == EFFECT_FLAG_TYPE_INSERT) {
+ // check invalid effect chaining combinations
+ if (insertPref == EFFECT_FLAG_INSERT_EXCLUSIVE ||
+ iPref == EFFECT_FLAG_INSERT_EXCLUSIVE) {
+ ALOGW("%s could not insert effect %s: exclusive conflict with %s",
+ __func__, desc.name, d.name);
+ return -1;
+ }
+ // remember position of first insert effect and by default
+ // select this as insert position for new effect
+ if (idx_insert == size) {
+ idx_insert = i;
+ }
+ // remember position of last insert effect claiming
+ // first position
+ if (iPref == EFFECT_FLAG_INSERT_FIRST) {
+ idx_insert_first = i;
+ }
+ // remember position of first insert effect claiming
+ // last position
+ if (iPref == EFFECT_FLAG_INSERT_LAST &&
+ idx_insert_last == -1) {
+ idx_insert_last = i;
+ }
+ }
+ }
+
+ // modify idx_insert from first position if needed
+ if (insertPref == EFFECT_FLAG_INSERT_LAST) {
+ if (idx_insert_last != -1) {
+ idx_insert = idx_insert_last;
+ } else {
+ idx_insert = size;
+ }
+ } else {
+ if (idx_insert_first != -1) {
+ idx_insert = idx_insert_first + 1;
+ }
+ }
+ return idx_insert;
+}
+
// removeEffect_l() must be called with ThreadBase::mLock held
size_t AudioFlinger::EffectChain::removeEffect_l(const sp<EffectModule>& effect,
bool release)
@@ -2350,14 +2411,23 @@
if (type != EFFECT_FLAG_TYPE_AUXILIARY) {
if (i == size - 1 && i != 0) {
- mEffects[i - 1]->setOutBuffer(mOutBuffer);
mEffects[i - 1]->configure();
+ mEffects[i - 1]->setOutBuffer(mOutBuffer);
+ mEffects[i - 1]->updateAccessMode(); // reconfig if neeeded.
}
}
mEffects.removeAt(i);
+
+ // make sure the input buffer configuration for the new first effect in the chain
+ // is updated if needed (can switch from HAL channel mask to mixer channel mask)
+ if (i == 0 && size > 1) {
+ mEffects[0]->configure();
+ mEffects[0]->setInBuffer(mInBuffer);
+ mEffects[0]->updateAccessMode(); // reconfig if neeeded.
+ }
+
ALOGV("removeEffect_l() effect %p, removed from chain %p at rank %zu", effect.get(),
this, i);
-
break;
}
}
@@ -2901,27 +2971,26 @@
}
bool AudioFlinger::EffectChain::EffectCallback::isOffload() const {
- sp<ThreadBase> t = thread().promote();
- if (t == nullptr) {
- return false;
- }
- return t->type() == ThreadBase::OFFLOAD;
+ return mThreadType == ThreadBase::OFFLOAD;
}
bool AudioFlinger::EffectChain::EffectCallback::isOffloadOrDirect() const {
- sp<ThreadBase> t = thread().promote();
- if (t == nullptr) {
- return false;
- }
- return t->type() == ThreadBase::OFFLOAD || t->type() == ThreadBase::DIRECT;
+ return mThreadType == ThreadBase::OFFLOAD || mThreadType == ThreadBase::DIRECT;
}
bool AudioFlinger::EffectChain::EffectCallback::isOffloadOrMmap() const {
- sp<ThreadBase> t = thread().promote();
- if (t == nullptr) {
+ switch (mThreadType) {
+ case ThreadBase::OFFLOAD:
+ case ThreadBase::MMAP_PLAYBACK:
+ case ThreadBase::MMAP_CAPTURE:
+ return true;
+ default:
return false;
}
- return t->isOffloadOrMmap();
+}
+
+bool AudioFlinger::EffectChain::EffectCallback::isSpatializer() const {
+ return mThreadType == ThreadBase::SPATIALIZER;
}
uint32_t AudioFlinger::EffectChain::EffectCallback::sampleRate() const {
@@ -2932,20 +3001,68 @@
return t->sampleRate();
}
-audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::channelMask() const {
+audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::inChannelMask(int id) const {
sp<ThreadBase> t = thread().promote();
if (t == nullptr) {
return AUDIO_CHANNEL_NONE;
}
- return t->channelMask();
+ sp<EffectChain> c = chain().promote();
+ if (c == nullptr) {
+ return AUDIO_CHANNEL_NONE;
+ }
+
+ if (mThreadType == ThreadBase::SPATIALIZER) {
+ if (c->sessionId() == AUDIO_SESSION_OUTPUT_STAGE) {
+ if (c->isFirstEffect(id)) {
+ return t->mixerChannelMask();
+ } else {
+ return t->channelMask();
+ }
+ } else if (!audio_is_global_session(c->sessionId())) {
+ if ((t->hasAudioSession_l(c->sessionId()) & ThreadBase::SPATIALIZED_SESSION) != 0) {
+ return t->mixerChannelMask();
+ } else {
+ return t->channelMask();
+ }
+ } else {
+ return t->channelMask();
+ }
+ } else {
+ return t->channelMask();
+ }
}
-uint32_t AudioFlinger::EffectChain::EffectCallback::channelCount() const {
+uint32_t AudioFlinger::EffectChain::EffectCallback::inChannelCount(int id) const {
+ return audio_channel_count_from_out_mask(inChannelMask(id));
+}
+
+audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::outChannelMask() const {
sp<ThreadBase> t = thread().promote();
if (t == nullptr) {
- return 0;
+ return AUDIO_CHANNEL_NONE;
}
- return t->channelCount();
+ sp<EffectChain> c = chain().promote();
+ if (c == nullptr) {
+ return AUDIO_CHANNEL_NONE;
+ }
+
+ if (mThreadType == ThreadBase::SPATIALIZER) {
+ if (!audio_is_global_session(c->sessionId())) {
+ if ((t->hasAudioSession_l(c->sessionId()) & ThreadBase::SPATIALIZED_SESSION) != 0) {
+ return t->mixerChannelMask();
+ } else {
+ return t->channelMask();
+ }
+ } else {
+ return t->channelMask();
+ }
+ } else {
+ return t->channelMask();
+ }
+}
+
+uint32_t AudioFlinger::EffectChain::EffectCallback::outChannelCount() const {
+ return audio_channel_count_from_out_mask(outChannelMask());
}
audio_channel_mask_t AudioFlinger::EffectChain::EffectCallback::hapticChannelMask() const {
@@ -3143,7 +3260,8 @@
} else {
mHalEffect->setDevices({mDevice});
}
- *handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/);
+ *handle = new EffectHandle(mHalEffect, nullptr, nullptr, 0 /*priority*/,
+ mNotifyFramesProcessed);
status = (*handle)->initCheck();
if (status == OK) {
status = mHalEffect->addHandle((*handle).get());
@@ -3169,7 +3287,8 @@
int enabled;
*handle = thread->createEffect_l(nullptr, nullptr, 0, AUDIO_SESSION_DEVICE,
const_cast<effect_descriptor_t *>(&mDescriptor),
- &enabled, &status, false, false /*probe*/);
+ &enabled, &status, false, false /*probe*/,
+ mNotifyFramesProcessed);
ALOGV("%s thread->createEffect_l status %d", __func__, status);
} else {
status = BAD_VALUE;
@@ -3364,7 +3483,8 @@
return proxy->sampleRate();
}
-audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::channelMask() const {
+audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::inChannelMask(
+ int id __unused) const {
sp<DeviceEffectProxy> proxy = mProxy.promote();
if (proxy == nullptr) {
return AUDIO_CHANNEL_OUT_STEREO;
@@ -3372,7 +3492,23 @@
return proxy->channelMask();
}
-uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::channelCount() const {
+uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::inChannelCount(int id __unused) const {
+ sp<DeviceEffectProxy> proxy = mProxy.promote();
+ if (proxy == nullptr) {
+ return 2;
+ }
+ return proxy->channelCount();
+}
+
+audio_channel_mask_t AudioFlinger::DeviceEffectProxy::ProxyCallback::outChannelMask() const {
+ sp<DeviceEffectProxy> proxy = mProxy.promote();
+ if (proxy == nullptr) {
+ return AUDIO_CHANNEL_OUT_STEREO;
+ }
+ return proxy->channelMask();
+}
+
+uint32_t AudioFlinger::DeviceEffectProxy::ProxyCallback::outChannelCount() const {
sp<DeviceEffectProxy> proxy = mProxy.promote();
if (proxy == nullptr) {
return 2;
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index a727e04..5ebf483 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -33,9 +33,12 @@
virtual bool isOffload() const = 0;
virtual bool isOffloadOrDirect() const = 0;
virtual bool isOffloadOrMmap() const = 0;
+ virtual bool isSpatializer() const = 0;
virtual uint32_t sampleRate() const = 0;
- virtual audio_channel_mask_t channelMask() const = 0;
- virtual uint32_t channelCount() const = 0;
+ virtual audio_channel_mask_t inChannelMask(int id) const = 0;
+ virtual uint32_t inChannelCount(int id) const = 0;
+ virtual audio_channel_mask_t outChannelMask() const = 0;
+ virtual uint32_t outChannelCount() const = 0;
virtual audio_channel_mask_t hapticChannelMask() const = 0;
virtual size_t frameCount() const = 0;
@@ -64,6 +67,8 @@
virtual void resetVolume() = 0;
virtual wp<EffectChain> chain() const = 0;
+
+ virtual bool isAudioPolicyReady() const = 0;
};
// EffectBase(EffectModule) and EffectChain classes both have their own mutex to protect
@@ -164,6 +169,16 @@
void dump(int fd, const Vector<String16>& args);
+protected:
+ bool isInternal_l() const {
+ for (auto handle : mHandles) {
+ if (handle->client() != nullptr) {
+ return false;
+ }
+ }
+ return true;
+ }
+
private:
friend class AudioFlinger; // for mHandles
bool mPinned = false;
@@ -240,6 +255,13 @@
return mOutBuffer != 0 ? reinterpret_cast<int16_t*>(mOutBuffer->ptr()) : NULL;
}
+ // Updates the access mode if it is out of date. May issue a new effect configure.
+ void updateAccessMode() {
+ if (requiredEffectBufferAccessMode() != mConfig.outputCfg.accessMode) {
+ configure();
+ }
+ }
+
status_t setDevices(const AudioDeviceTypeAddrVector &devices);
status_t setInputDevice(const AudioDeviceTypeAddr &device);
status_t setVolume(uint32_t *left, uint32_t *right, bool controller);
@@ -259,7 +281,7 @@
bool isHapticGenerator() const;
status_t setHapticIntensity(int id, int intensity);
- status_t setVibratorInfo(const media::AudioVibratorInfo* vibratorInfo);
+ status_t setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo);
void dump(int fd, const Vector<String16>& args);
@@ -275,6 +297,11 @@
status_t stop_l();
status_t removeEffectFromHal_l();
status_t sendSetAudioDevicesCommand(const AudioDeviceTypeAddrVector &devices, uint32_t cmdCode);
+ effect_buffer_access_e requiredEffectBufferAccessMode() const {
+ return mConfig.inputCfg.buffer.raw == mConfig.outputCfg.buffer.raw
+ ? EFFECT_BUFFER_ACCESS_WRITE : EFFECT_BUFFER_ACCESS_ACCUMULATE;
+ }
+
effect_config_t mConfig; // input and output audio configuration
sp<EffectHalInterface> mEffectInterface; // Effect module HAL
@@ -327,7 +354,7 @@
EffectHandle(const sp<EffectBase>& effect,
const sp<AudioFlinger::Client>& client,
const sp<media::IEffectClient>& effectClient,
- int32_t priority);
+ int32_t priority, bool notifyFramesProcessed);
virtual ~EffectHandle();
virtual status_t initCheck();
@@ -342,6 +369,8 @@
android::binder::Status disconnect() override;
android::binder::Status getCblk(media::SharedFileRegion* _aidl_return) override;
+ sp<Client> client() const { return mClient; }
+
private:
void disconnect(bool unpinIfLast);
@@ -356,6 +385,8 @@
void setEnabled(bool enabled);
bool enabled() const { return mEnabled; }
+ void framesProcessed(int32_t frames) const;
+
// Getters
wp<EffectBase> effect() const { return mEffect; }
int id() const {
@@ -389,6 +420,8 @@
bool mEnabled; // cached enable state: needed when the effect is
// restored after being suspended
bool mDisconnected; // Set to true by disconnect()
+ const bool mNotifyFramesProcessed; // true if the client callback event
+ // EVENT_FRAMES_PROCESSED must be generated
};
// the EffectChain class represents a group of effects associated to one audio session.
@@ -511,6 +544,8 @@
sp<EffectCallbackInterface> effectCallback() const { return mEffectCallback; }
wp<ThreadBase> thread() const { return mEffectCallback->thread(); }
+ bool isFirstEffect(int id) const { return !mEffects.isEmpty() && id == mEffects[0]->id(); }
+
void dump(int fd, const Vector<String16>& args);
private:
@@ -530,6 +565,12 @@
: mChain(owner)
, mThread(thread)
, mAudioFlinger(*gAudioFlinger) {
+ sp<ThreadBase> base = thread.promote();
+ if (base != nullptr) {
+ mThreadType = base->type();
+ } else {
+ mThreadType = ThreadBase::MIXER; // assure a consistent value.
+ }
}
status_t createEffectHal(const effect_uuid_t *pEffectUuid,
@@ -542,10 +583,13 @@
bool isOffload() const override;
bool isOffloadOrDirect() const override;
bool isOffloadOrMmap() const override;
+ bool isSpatializer() const override;
uint32_t sampleRate() const override;
- audio_channel_mask_t channelMask() const override;
- uint32_t channelCount() const override;
+ audio_channel_mask_t inChannelMask(int id) const override;
+ uint32_t inChannelCount(int id) const override;
+ audio_channel_mask_t outChannelMask() const override;
+ uint32_t outChannelCount() const override;
audio_channel_mask_t hapticChannelMask() const override;
size_t frameCount() const override;
uint32_t latency() const override;
@@ -566,16 +610,22 @@
wp<EffectChain> chain() const override { return mChain; }
+ bool isAudioPolicyReady() const override {
+ return mAudioFlinger.isAudioPolicyReady();
+ }
+
wp<ThreadBase> thread() const { return mThread.load(); }
- void setThread(const wp<ThreadBase>& thread) {
+ void setThread(const sp<ThreadBase>& thread) {
mThread = thread;
+ mThreadType = thread->type();
}
private:
const wp<EffectChain> mChain;
mediautils::atomic_wp<ThreadBase> mThread;
AudioFlinger &mAudioFlinger; // implementation detail: outer instance always exists.
+ ThreadBase::type_t mThreadType;
};
friend class AudioFlinger; // for mThread, mEffects
@@ -612,6 +662,8 @@
void setVolumeForOutput_l(uint32_t left, uint32_t right);
+ ssize_t getInsertIndex(const effect_descriptor_t& desc);
+
mutable Mutex mLock; // mutex protecting effect list
Vector< sp<EffectModule> > mEffects; // list of effect modules
audio_session_t mSessionId; // audio session ID
@@ -643,11 +695,11 @@
public:
DeviceEffectProxy (const AudioDeviceTypeAddr& device,
const sp<DeviceEffectManagerCallback>& callback,
- effect_descriptor_t *desc, int id)
+ effect_descriptor_t *desc, int id, bool notifyFramesProcessed)
: EffectBase(callback, desc, id, AUDIO_SESSION_DEVICE, false),
mDevice(device), mManagerCallback(callback),
- mMyCallback(new ProxyCallback(wp<DeviceEffectProxy>(this),
- callback)) {}
+ mMyCallback(new ProxyCallback(wp<DeviceEffectProxy>(this), callback)),
+ mNotifyFramesProcessed(notifyFramesProcessed) {}
status_t setEnabled(bool enabled, bool fromHandle) override;
sp<DeviceEffectProxy> asDeviceEffectProxy() override { return this; }
@@ -692,10 +744,13 @@
bool isOffload() const override { return false; }
bool isOffloadOrDirect() const override { return false; }
bool isOffloadOrMmap() const override { return false; }
+ bool isSpatializer() const override { return false; }
uint32_t sampleRate() const override;
- audio_channel_mask_t channelMask() const override;
- uint32_t channelCount() const override;
+ audio_channel_mask_t inChannelMask(int id) const override;
+ uint32_t inChannelCount(int id) const override;
+ audio_channel_mask_t outChannelMask() const override;
+ uint32_t outChannelCount() const override;
audio_channel_mask_t hapticChannelMask() const override { return AUDIO_CHANNEL_NONE; }
size_t frameCount() const override { return 0; }
uint32_t latency() const override { return 0; }
@@ -716,6 +771,10 @@
wp<EffectChain> chain() const override { return nullptr; }
+ bool isAudioPolicyReady() const override {
+ return mManagerCallback->isAudioPolicyReady();
+ }
+
int newEffectId();
private:
@@ -734,4 +793,5 @@
std::map<audio_patch_handle_t, sp<EffectHandle>> mEffectHandles; // protected by mProxyLock
sp<EffectModule> mHalEffect; // protected by mProxyLock
struct audio_port_config mDevicePort = { .id = AUDIO_PORT_HANDLE_NONE };
+ const bool mNotifyFramesProcessed;
};
diff --git a/services/audioflinger/FastMixer.cpp b/services/audioflinger/FastMixer.cpp
index 88d4eaf..26bd92d 100644
--- a/services/audioflinger/FastMixer.cpp
+++ b/services/audioflinger/FastMixer.cpp
@@ -89,6 +89,7 @@
// TODO: Add channel mask to NBAIO_Format.
// We assume that the channel mask must be a valid positional channel mask.
mSinkChannelMask = getChannelMaskFromCount(mSinkChannelCount);
+ mBalance.setChannelMask(mSinkChannelMask);
unsigned i;
for (i = 0; i < FastMixerState::sMaxFastTracks; ++i) {
@@ -204,6 +205,8 @@
(void *)(uintptr_t)fastTrack->mHapticPlaybackEnabled);
mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_INTENSITY,
(void *)(uintptr_t)fastTrack->mHapticIntensity);
+ mMixer->setParameter(index, AudioMixer::TRACK, AudioMixer::HAPTIC_MAX_AMPLITUDE,
+ (void *)(&(fastTrack->mHapticMaxAmplitude)));
mMixer->enable(index);
break;
diff --git a/services/audioflinger/FastMixerState.h b/services/audioflinger/FastMixerState.h
index 857d3de..ce3cc14 100644
--- a/services/audioflinger/FastMixerState.h
+++ b/services/audioflinger/FastMixerState.h
@@ -17,6 +17,8 @@
#ifndef ANDROID_AUDIO_FAST_MIXER_STATE_H
#define ANDROID_AUDIO_FAST_MIXER_STATE_H
+#include <math.h>
+
#include <audio_utils/minifloat.h>
#include <system/audio.h>
#include <media/AudioMixer.h>
@@ -51,6 +53,7 @@
int mGeneration; // increment when any field is assigned
bool mHapticPlaybackEnabled = false; // haptic playback is enabled or not
os::HapticScale mHapticIntensity = os::HapticScale::MUTE; // intensity of haptic data
+ float mHapticMaxAmplitude = NAN; // max amplitude allowed for haptic data
};
// Represents a single state of the fast mixer
diff --git a/services/audioflinger/OWNERS b/services/audioflinger/OWNERS
index 034d161..17d4c37 100644
--- a/services/audioflinger/OWNERS
+++ b/services/audioflinger/OWNERS
@@ -1,4 +1,4 @@
-gkasten@google.com
hunga@google.com
jmtrivi@google.com
mnaganov@google.com
+philburk@google.com
diff --git a/services/audioflinger/PatchPanel.cpp b/services/audioflinger/PatchPanel.cpp
index b82601a..45dd258 100644
--- a/services/audioflinger/PatchPanel.cpp
+++ b/services/audioflinger/PatchPanel.cpp
@@ -258,6 +258,7 @@
reinterpret_cast<PlaybackThread*>(thread.get()), false /*closeThread*/);
} else {
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ audio_config_base_t mixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE;
if (patch->sinks[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
@@ -276,6 +277,7 @@
patch->sinks[0].ext.device.hw_module,
&output,
&config,
+ &mixerConfig,
outputDevice,
outputDeviceAddress,
flags);
diff --git a/services/audioflinger/PlaybackTracks.h b/services/audioflinger/PlaybackTracks.h
index 04b1c6d..aecd4d3 100644
--- a/services/audioflinger/PlaybackTracks.h
+++ b/services/audioflinger/PlaybackTracks.h
@@ -19,6 +19,8 @@
#error This header file should only be included from AudioFlinger.h
#endif
+#include <math.h>
+
// Checks and monitors OP_PLAY_AUDIO
class OpPlayAudioMonitor : public RefBase {
public:
@@ -161,6 +163,8 @@
}
/** Return at what intensity to play haptics, used in mixer. */
os::HapticScale getHapticIntensity() const { return mHapticIntensity; }
+ /** Return the maximum amplitude allowed for haptics data, used in mixer. */
+ float getHapticMaxAmplitude() const { return mHapticMaxAmplitude; }
/** Set intensity of haptic playback, should be set after querying vibrator service. */
void setHapticIntensity(os::HapticScale hapticIntensity) {
if (os::isValidHapticScale(hapticIntensity)) {
@@ -168,6 +172,12 @@
setHapticPlaybackEnabled(mHapticIntensity != os::HapticScale::MUTE);
}
}
+ /** Set maximum amplitude allowed for haptic data, should be set after querying
+ * vibrator service.
+ */
+ void setHapticMaxAmplitude(float maxAmplitude) {
+ mHapticMaxAmplitude = maxAmplitude;
+ }
sp<os::ExternalVibration> getExternalVibration() const { return mExternalVibration; }
void setTeePatches(TeePatches teePatches);
@@ -191,6 +201,7 @@
audio_output_flags_t getOutputFlags() const { return mFlags; }
float getSpeed() const { return mSpeed; }
+
protected:
// for numerous
friend class PlaybackThread;
@@ -288,6 +299,8 @@
bool mHapticPlaybackEnabled = false; // indicates haptic playback enabled or not
// intensity to play haptic data
os::HapticScale mHapticIntensity = os::HapticScale::MUTE;
+ // max amplitude allowed for haptic data
+ float mHapticMaxAmplitude = NAN;
class AudioVibrationController : public os::BnExternalVibrationController {
public:
explicit AudioVibrationController(Track* track) : mTrack(track) {}
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 746d875..09e4078 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -50,8 +50,10 @@
#include <audio_utils/format.h>
#include <audio_utils/minifloat.h>
#include <audio_utils/safe_math.h>
-#include <system/audio_effects/effect_ns.h>
#include <system/audio_effects/effect_aec.h>
+#include <system/audio_effects/effect_downmix.h>
+#include <system/audio_effects/effect_ns.h>
+#include <system/audio_effects/effect_spatializer.h>
#include <system/audio.h>
// NBAIO implementations
@@ -507,6 +509,8 @@
return "MMAP_PLAYBACK";
case MMAP_CAPTURE:
return "MMAP_CAPTURE";
+ case SPATIALIZER:
+ return "SPATIALIZER";
default:
return "unknown";
}
@@ -722,6 +726,19 @@
sendConfigEvent_l(configEvent);
}
+void AudioFlinger::ThreadBase::sendCheckOutputStageEffectsEvent()
+{
+ Mutex::Autolock _l(mLock);
+ sendCheckOutputStageEffectsEvent_l();
+}
+
+void AudioFlinger::ThreadBase::sendCheckOutputStageEffectsEvent_l()
+{
+ sp<ConfigEvent> configEvent =
+ (ConfigEvent *)new CheckOutputStageEffectsEvent();
+ sendConfigEvent_l(configEvent);
+}
+
// post condition: mConfigEvents.isEmpty()
void AudioFlinger::ThreadBase::processConfigEvents_l()
{
@@ -784,6 +801,11 @@
(ResizeBufferConfigEventData *)event->mData.get();
resizeInputBuffer_l(data->mMaxSharedAudioHistoryMs);
} break;
+
+ case CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS: {
+ setCheckOutputStageEffects();
+ } break;
+
default:
ALOG_ASSERT(false, "processConfigEvents_l() unknown event type %d", event->mType);
break;
@@ -1008,6 +1030,8 @@
return String16("MmapPlayback");
case MMAP_CAPTURE:
return String16("MmapCapture");
+ case SPATIALIZER:
+ return String16("AudioSpatial");
default:
ALOG_ASSERT(false);
return String16("AudioUnknown");
@@ -1296,8 +1320,8 @@
{
// no preprocessing on playback threads
if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC) {
- ALOGW("checkEffectCompatibility_l(): pre processing effect %s created on playback"
- " thread %s", desc->name, mThreadName);
+ ALOGW("%s: pre processing effect %s created on playback"
+ " thread %s", __func__, desc->name, mThreadName);
return BAD_VALUE;
}
@@ -1312,14 +1336,21 @@
return BAD_VALUE;
}
+ if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
+ && mType != SPATIALIZER) {
+ ALOGW("%s: attempt to create a spatializer effect on a thread of type %d",
+ __func__, mType);
+ return BAD_VALUE;
+ }
+
switch (mType) {
case MIXER: {
#ifndef MULTICHANNEL_EFFECT_CHAIN
// Reject any effect on mixer multichannel sinks.
// TODO: fix both format and multichannel issues with effects.
if (mChannelCount != FCC_2) {
- ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d) on MIXER"
- " thread %s", desc->name, mChannelCount, mThreadName);
+ ALOGW("%s: effect %s for multichannel(%d) on MIXER thread %s",
+ __func__, desc->name, mChannelCount, mThreadName);
return BAD_VALUE;
}
#endif
@@ -1333,15 +1364,15 @@
} else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
// only post processing on output stage session
if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
- ALOGW("checkEffectCompatibility_l(): non post processing effect %s not allowed"
- " on output stage session", desc->name);
+ ALOGW("%s: non post processing effect %s not allowed on output stage session",
+ __func__, desc->name);
return BAD_VALUE;
}
} else if (sessionId == AUDIO_SESSION_DEVICE) {
// only post processing on output stage session
if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
- ALOGW("checkEffectCompatibility_l(): non post processing effect %s not allowed"
- " on device session", desc->name);
+ ALOGW("%s: non post processing effect %s not allowed on device session",
+ __func__, desc->name);
return BAD_VALUE;
}
} else {
@@ -1352,13 +1383,12 @@
}
if (flags & AUDIO_OUTPUT_FLAG_RAW) {
- ALOGW("checkEffectCompatibility_l(): effect %s on playback thread in raw mode",
- desc->name);
+ ALOGW("%s: effect %s on playback thread in raw mode", __func__, desc->name);
return BAD_VALUE;
}
if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) == 0) {
- ALOGW("checkEffectCompatibility_l(): non HW effect %s on playback thread"
- " in fast mode", desc->name);
+ ALOGW("%s: non HW effect %s on playback thread in fast mode",
+ __func__, desc->name);
return BAD_VALUE;
}
}
@@ -1372,35 +1402,64 @@
case DIRECT:
// Reject any effect on Direct output threads for now, since the format of
// mSinkBuffer is not guaranteed to be compatible with effect processing (PCM 16 stereo).
- ALOGW("checkEffectCompatibility_l(): effect %s on DIRECT output thread %s",
- desc->name, mThreadName);
+ ALOGW("%s: effect %s on DIRECT output thread %s",
+ __func__, desc->name, mThreadName);
return BAD_VALUE;
case DUPLICATING:
#ifndef MULTICHANNEL_EFFECT_CHAIN
// Reject any effect on mixer multichannel sinks.
// TODO: fix both format and multichannel issues with effects.
if (mChannelCount != FCC_2) {
- ALOGW("checkEffectCompatibility_l(): effect %s for multichannel(%d)"
- " on DUPLICATING thread %s", desc->name, mChannelCount, mThreadName);
+ ALOGW("%s: effect %s for multichannel(%d) on DUPLICATING thread %s",
+ __func__, desc->name, mChannelCount, mThreadName);
return BAD_VALUE;
}
#endif
if (audio_is_global_session(sessionId)) {
- ALOGW("checkEffectCompatibility_l(): global effect %s on DUPLICATING"
- " thread %s", desc->name, mThreadName);
+ ALOGW("%s: global effect %s on DUPLICATING thread %s",
+ __func__, desc->name, mThreadName);
return BAD_VALUE;
}
if ((desc->flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_POST_PROC) {
- ALOGW("checkEffectCompatibility_l(): post processing effect %s on"
- " DUPLICATING thread %s", desc->name, mThreadName);
+ ALOGW("%s: post processing effect %s on DUPLICATING thread %s",
+ __func__, desc->name, mThreadName);
return BAD_VALUE;
}
if ((desc->flags & EFFECT_FLAG_HW_ACC_TUNNEL) != 0) {
- ALOGW("checkEffectCompatibility_l(): HW tunneled effect %s on"
- " DUPLICATING thread %s", desc->name, mThreadName);
+ ALOGW("%s: HW tunneled effect %s on DUPLICATING thread %s",
+ __func__, desc->name, mThreadName);
return BAD_VALUE;
}
break;
+ case SPATIALIZER:
+ // Global effects (AUDIO_SESSION_OUTPUT_MIX) are not supported on spatializer mixer
+ // as there is no common accumulation buffer for sptialized and non sptialized tracks.
+ // Post processing effects (AUDIO_SESSION_OUTPUT_STAGE or AUDIO_SESSION_DEVICE)
+ // are supported and added after the spatializer.
+ if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
+ ALOGW("%s: global effect %s not supported on spatializer thread %s",
+ __func__, desc->name, mThreadName);
+ return BAD_VALUE;
+ } else if (sessionId == AUDIO_SESSION_OUTPUT_STAGE) {
+ // only post processing , downmixer or spatializer effects on output stage session
+ if (memcmp(&desc->type, FX_IID_SPATIALIZER, sizeof(effect_uuid_t)) == 0
+ || memcmp(&desc->type, EFFECT_UIID_DOWNMIX, sizeof(effect_uuid_t)) == 0) {
+ break;
+ }
+ if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
+ ALOGW("%s: non post processing effect %s not allowed on output stage session",
+ __func__, desc->name);
+ return BAD_VALUE;
+ }
+ } else if (sessionId == AUDIO_SESSION_DEVICE) {
+ // only post processing on output stage session
+ if ((desc->flags & EFFECT_FLAG_TYPE_MASK) != EFFECT_FLAG_TYPE_POST_PROC) {
+ ALOGW("%s: non post processing effect %s not allowed on device session",
+ __func__, desc->name);
+ return BAD_VALUE;
+ }
+ }
+ break;
default:
LOG_ALWAYS_FATAL("checkEffectCompatibility_l(): wrong thread type %d", mType);
}
@@ -1418,7 +1477,8 @@
int *enabled,
status_t *status,
bool pinned,
- bool probe)
+ bool probe,
+ bool notifyFramesProcessed)
{
sp<EffectModule> effect;
sp<EffectHandle> handle;
@@ -1477,18 +1537,19 @@
if (effect->isHapticGenerator()) {
// TODO(b/184194057): Use the vibrator information from the vibrator that will be used
// for the HapticGenerator.
- const media::AudioVibratorInfo* defaultVibratorInfo =
- mAudioFlinger->getDefaultVibratorInfo_l();
- if (defaultVibratorInfo != nullptr) {
+ const std::optional<media::AudioVibratorInfo> defaultVibratorInfo =
+ std::move(mAudioFlinger->getDefaultVibratorInfo_l());
+ if (defaultVibratorInfo) {
// Only set the vibrator info when it is a valid one.
- effect->setVibratorInfo(defaultVibratorInfo);
+ effect->setVibratorInfo(*defaultVibratorInfo);
}
}
// create effect handle and connect it to effect module
- handle = new EffectHandle(effect, client, effectClient, priority);
+ handle = new EffectHandle(effect, client, effectClient, priority, notifyFramesProcessed);
lStatus = handle->initCheck();
if (lStatus == OK) {
lStatus = effect->addHandle(handle.get());
+ sendCheckOutputStageEffectsEvent_l();
}
if (enabled != NULL) {
*enabled = (int)effect->isEnabled();
@@ -1531,6 +1592,7 @@
if (remove) {
removeEffect_l(effect, true);
}
+ sendCheckOutputStageEffectsEvent_l();
}
if (remove) {
mAudioFlinger->updateOrphanEffectChains(effect);
@@ -1888,6 +1950,14 @@
item->selfrecord();
}
+product_strategy_t AudioFlinger::ThreadBase::getStrategyForStream(audio_stream_type_t stream) const
+{
+ if (!mAudioFlinger->isAudioPolicyReady()) {
+ return PRODUCT_STRATEGY_NONE;
+ }
+ return AudioSystem::getStrategyForStream(stream);
+}
+
// ----------------------------------------------------------------------------
// Playback
// ----------------------------------------------------------------------------
@@ -1896,15 +1966,16 @@
AudioStreamOut* output,
audio_io_handle_t id,
type_t type,
- bool systemReady)
+ bool systemReady,
+ audio_config_base_t *mixerConfig)
: ThreadBase(audioFlinger, id, type, systemReady, true /* isOut */),
mNormalFrameCount(0), mSinkBuffer(NULL),
- mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
+ mMixerBufferEnabled(AudioFlinger::kEnableExtendedPrecision || type == SPATIALIZER),
mMixerBuffer(NULL),
mMixerBufferSize(0),
mMixerBufferFormat(AUDIO_FORMAT_INVALID),
mMixerBufferValid(false),
- mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision),
+ mEffectBufferEnabled(AudioFlinger::kEnableExtendedPrecision || type == SPATIALIZER),
mEffectBuffer(NULL),
mEffectBufferSize(0),
mEffectBufferFormat(AUDIO_FORMAT_INVALID),
@@ -1956,8 +2027,18 @@
mOutput->audioHwDev->moduleName(), AUDIO_HARDWARE_MODULE_ID_MSD) == 0;
}
+ if (mixerConfig != nullptr && mixerConfig->channel_mask != AUDIO_CHANNEL_NONE) {
+ mMixerChannelMask = mixerConfig->channel_mask;
+ }
+
readOutputParameters_l();
+ if (mType != SPATIALIZER
+ && mMixerChannelMask != mChannelMask) {
+ LOG_ALWAYS_FATAL("HAL channel mask %#x does not match mixer channel mask %#x",
+ mChannelMask, mMixerChannelMask);
+ }
+
// TODO: We may also match on address as well as device type for
// AUDIO_DEVICE_OUT_BUS, AUDIO_DEVICE_OUT_ALL_A2DP, AUDIO_DEVICE_OUT_REMOTE_SUBMIX
if (type == MIXER || type == DIRECT || type == OFFLOAD) {
@@ -1986,6 +2067,7 @@
free(mSinkBuffer);
free(mMixerBuffer);
free(mEffectBuffer);
+ free(mPostSpatializerBuffer);
}
// Thread virtuals
@@ -2080,10 +2162,12 @@
write(fd, result.string(), result.size());
}
-void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args __unused)
+void AudioFlinger::PlaybackThread::dumpInternals_l(int fd, const Vector<String16>& args)
{
dprintf(fd, " Master volume: %f\n", mMasterVolume);
dprintf(fd, " Master mute: %s\n", mMasterMute ? "on" : "off");
+ dprintf(fd, " Mixer channel Mask: %#x (%s)\n",
+ mMixerChannelMask, channelMaskToString(mMixerChannelMask, true /* output */).c_str());
if (mHapticChannelMask != AUDIO_CHANNEL_NONE) {
dprintf(fd, " Haptic channel mask: %#x (%s)\n", mHapticChannelMask,
channelMaskToString(mHapticChannelMask, true /* output */).c_str());
@@ -2109,7 +2193,7 @@
}
if (output != nullptr) {
dprintf(fd, " Hal stream dump:\n");
- (void)output->stream->dump(fd);
+ (void)output->stream->dump(fd, args);
}
}
@@ -2397,11 +2481,11 @@
// all tracks in same audio session must share the same routing strategy otherwise
// conflicts will happen when tracks are moved from one output to another by audio policy
// manager
- product_strategy_t strategy = AudioSystem::getStrategyForStream(streamType);
+ product_strategy_t strategy = getStrategyForStream(streamType);
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> t = mTracks[i];
if (t != 0 && t->isExternalTrack()) {
- product_strategy_t actual = AudioSystem::getStrategyForStream(t->streamType());
+ product_strategy_t actual = getStrategyForStream(t->streamType());
if (sessionId == t->sessionId() && strategy != actual) {
ALOGE("createTrack_l() mismatched strategy; expected %u but found %u",
strategy, actual);
@@ -2445,7 +2529,7 @@
if (chain != 0) {
ALOGV("createTrack_l() setting main buffer %p", chain->inBuffer());
track->setMainBuffer(chain->inBuffer());
- chain->setStrategy(AudioSystem::getStrategyForStream(track->streamType()));
+ chain->setStrategy(getStrategyForStream(track->streamType()));
chain->incTrackCnt();
}
@@ -2613,8 +2697,19 @@
mLock.unlock();
const int intensity = AudioFlinger::onExternalVibrationStart(
track->getExternalVibration());
+ std::optional<media::AudioVibratorInfo> vibratorInfo;
+ {
+ // TODO(b/184194780): Use the vibrator information from the vibrator that will be
+ // used to play this track.
+ Mutex::Autolock _l(mAudioFlinger->mLock);
+ vibratorInfo = std::move(mAudioFlinger->getDefaultVibratorInfo_l());
+ }
mLock.lock();
track->setHapticIntensity(static_cast<os::HapticScale>(intensity));
+ if (vibratorInfo) {
+ track->setHapticMaxAmplitude(vibratorInfo->maxAmplitude);
+ }
+
// Haptic playback should be enabled by vibrator service.
if (track->getHapticPlaybackEnabled()) {
// Disable haptic playback of all active track to ensure only
@@ -2814,14 +2909,20 @@
if (!audio_is_output_channel(mChannelMask)) {
LOG_ALWAYS_FATAL("HAL channel mask %#x not valid for output", mChannelMask);
}
- if ((mType == MIXER || mType == DUPLICATING)
- && !isValidPcmSinkChannelMask(mChannelMask)) {
+ if (hasMixer() && !isValidPcmSinkChannelMask(mChannelMask)) {
LOG_ALWAYS_FATAL("HAL channel mask %#x not supported for mixed output",
mChannelMask);
}
+
+ if (mMixerChannelMask == AUDIO_CHANNEL_NONE) {
+ mMixerChannelMask = mChannelMask;
+ }
+
mChannelCount = audio_channel_count_from_out_mask(mChannelMask);
mBalance.setChannelMask(mChannelMask);
+ uint32_t mixerChannelCount = audio_channel_count_from_out_mask(mMixerChannelMask);
+
// Get actual HAL format.
status_t result = mOutput->stream->getAudioProperties(nullptr, nullptr, &mHALFormat);
LOG_ALWAYS_FATAL_IF(result != OK, "Error when retrieving output stream format: %d", result);
@@ -2831,8 +2932,7 @@
if (!audio_is_valid_format(mFormat)) {
LOG_ALWAYS_FATAL("HAL format %#x not valid for output", mFormat);
}
- if ((mType == MIXER || mType == DUPLICATING)
- && !isValidPcmSinkFormat(mFormat)) {
+ if (hasMixer() && !isValidPcmSinkFormat(mFormat)) {
LOG_FATAL("HAL format %#x not supported for mixed output",
mFormat);
}
@@ -2841,7 +2941,7 @@
LOG_ALWAYS_FATAL_IF(result != OK,
"Error when retrieving output stream buffer size: %d", result);
mFrameCount = mBufferSize / mFrameSize;
- if ((mType == MIXER || mType == DUPLICATING) && (mFrameCount & 15)) {
+ if (hasMixer() && (mFrameCount & 15)) {
ALOGW("HAL output buffer size is %zu frames but AudioMixer requires multiples of 16 frames",
mFrameCount);
}
@@ -2914,7 +3014,7 @@
}
mNormalFrameCount = multiplier * mFrameCount;
// round up to nearest 16 frames to satisfy AudioMixer
- if (mType == MIXER || mType == DUPLICATING) {
+ if (hasMixer()) {
mNormalFrameCount = (mNormalFrameCount + 15) & ~15;
}
ALOGI("HAL output buffer size %zu frames, normal sink buffer size %zu frames", mFrameCount,
@@ -2930,6 +3030,7 @@
// Originally this was int16_t[] array, need to remove legacy implications.
free(mSinkBuffer);
mSinkBuffer = NULL;
+
// For sink buffer size, we use the frame size from the downstream sink to avoid problems
// with non PCM formats for compressed music, e.g. AAC, and Offload threads.
const size_t sinkBufferSize = mNormalFrameCount * mFrameSize;
@@ -2941,7 +3042,7 @@
mMixerBuffer = NULL;
if (mMixerBufferEnabled) {
mMixerBufferFormat = AUDIO_FORMAT_PCM_FLOAT; // no longer valid: AUDIO_FORMAT_PCM_16_BIT.
- mMixerBufferSize = mNormalFrameCount * mChannelCount
+ mMixerBufferSize = mNormalFrameCount * mixerChannelCount
* audio_bytes_per_sample(mMixerBufferFormat);
(void)posix_memalign(&mMixerBuffer, 32, mMixerBufferSize);
}
@@ -2949,15 +3050,24 @@
mEffectBuffer = NULL;
if (mEffectBufferEnabled) {
mEffectBufferFormat = EFFECT_BUFFER_FORMAT;
- mEffectBufferSize = mNormalFrameCount * mChannelCount
+ mEffectBufferSize = mNormalFrameCount * mixerChannelCount
* audio_bytes_per_sample(mEffectBufferFormat);
(void)posix_memalign(&mEffectBuffer, 32, mEffectBufferSize);
}
+ if (mType == SPATIALIZER) {
+ free(mPostSpatializerBuffer);
+ mPostSpatializerBuffer = nullptr;
+ mPostSpatializerBufferSize = mNormalFrameCount * mChannelCount
+ * audio_bytes_per_sample(mEffectBufferFormat);
+ (void)posix_memalign(&mPostSpatializerBuffer, 32, mPostSpatializerBufferSize);
+ }
+
mHapticChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & AUDIO_CHANNEL_HAPTIC_ALL);
mChannelMask = static_cast<audio_channel_mask_t>(mChannelMask & ~mHapticChannelMask);
mHapticChannelCount = audio_channel_count_from_out_mask(mHapticChannelMask);
mChannelCount -= mHapticChannelCount;
+ mMixerChannelMask = static_cast<audio_channel_mask_t>(mMixerChannelMask & ~mHapticChannelMask);
// force reconfiguration of effect chains and engines to take new buffer size and audio
// parameters into account
@@ -3051,15 +3161,15 @@
// session AUDIO_SESSION_OUTPUT_MIX is placed in same strategy as MUSIC stream so that
// it is moved to correct output by audio policy manager when A2DP is connected or disconnected
if (sessionId == AUDIO_SESSION_OUTPUT_MIX) {
- return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
+ return getStrategyForStream(AUDIO_STREAM_MUSIC);
}
for (size_t i = 0; i < mTracks.size(); i++) {
sp<Track> track = mTracks[i];
if (sessionId == track->sessionId() && !track->isInvalid()) {
- return AudioSystem::getStrategyForStream(track->streamType());
+ return getStrategyForStream(track->streamType());
}
}
- return AudioSystem::getStrategyForStream(AUDIO_STREAM_MUSIC);
+ return getStrategyForStream(AUDIO_STREAM_MUSIC);
}
@@ -3336,23 +3446,34 @@
{
audio_session_t session = chain->sessionId();
sp<EffectBufferHalInterface> halInBuffer, halOutBuffer;
- status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
- mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
- mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
- &halInBuffer);
- if (result != OK) return result;
- halOutBuffer = halInBuffer;
- effect_buffer_t *buffer = reinterpret_cast<effect_buffer_t*>(halInBuffer->externalData());
- ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
- if (!audio_is_global_session(session)) {
- // Only one effect chain can be present in direct output thread and it uses
- // the sink buffer as input
- if (mType != DIRECT) {
- size_t numSamples = mNormalFrameCount * (mChannelCount + mHapticChannelCount);
+ effect_buffer_t *buffer = nullptr; // only used for non global sessions
+
+ if (mType == SPATIALIZER ) {
+ if (!audio_is_global_session(session)) {
+ // player sessions on a spatializer output will use a dedicated input buffer and
+ // will either output multi channel to mEffectBuffer if the track is spatilaized
+ // or stereo to mPostSpatializerBuffer if not spatialized.
+ uint32_t channelMask;
+ bool isSessionSpatialized =
+ (hasAudioSession_l(session) & ThreadBase::SPATIALIZED_SESSION) != 0;
+ if (isSessionSpatialized) {
+ channelMask = mMixerChannelMask;
+ } else {
+ channelMask = mChannelMask;
+ }
+ size_t numSamples = mNormalFrameCount
+ * (audio_channel_count_from_out_mask(channelMask) + mHapticChannelCount);
status_t result = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
numSamples * sizeof(effect_buffer_t),
&halInBuffer);
if (result != OK) return result;
+
+ result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+ isSessionSpatialized ? mEffectBuffer : mPostSpatializerBuffer,
+ isSessionSpatialized ? mEffectBufferSize : mPostSpatializerBufferSize,
+ &halOutBuffer);
+ if (result != OK) return result;
+
#ifdef FLOAT_EFFECT_CHAIN
buffer = halInBuffer->audioBuffer()->f32;
#else
@@ -3360,14 +3481,60 @@
#endif
ALOGV("addEffectChain_l() creating new input buffer %p session %d",
buffer, session);
- }
+ } else {
+ // A global session on a SPATIALIZER thread is either OUTPUT_STAGE or DEVICE
+ // - OUTPUT_STAGE session uses the mEffectBuffer as input buffer and
+ // mPostSpatializerBuffer as output buffer
+ // - DEVICE session uses the mPostSpatializerBuffer as input and output buffer.
+ status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+ mEffectBuffer, mEffectBufferSize, &halInBuffer);
+ if (result != OK) return result;
+ result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+ mPostSpatializerBuffer, mPostSpatializerBufferSize, &halOutBuffer);
+ if (result != OK) return result;
+ if (session == AUDIO_SESSION_DEVICE) {
+ halInBuffer = halOutBuffer;
+ }
+ }
+ } else {
+ status_t result = mAudioFlinger->mEffectsFactoryHal->mirrorBuffer(
+ mEffectBufferEnabled ? mEffectBuffer : mSinkBuffer,
+ mEffectBufferEnabled ? mEffectBufferSize : mSinkBufferSize,
+ &halInBuffer);
+ if (result != OK) return result;
+ halOutBuffer = halInBuffer;
+ ALOGV("addEffectChain_l() %p on thread %p for session %d", chain.get(), this, session);
+ if (!audio_is_global_session(session)) {
+ buffer = reinterpret_cast<effect_buffer_t*>(halInBuffer->externalData());
+ // Only one effect chain can be present in direct output thread and it uses
+ // the sink buffer as input
+ if (mType != DIRECT) {
+ size_t numSamples = mNormalFrameCount
+ * (audio_channel_count_from_out_mask(mMixerChannelMask)
+ + mHapticChannelCount);
+ status_t result = mAudioFlinger->mEffectsFactoryHal->allocateBuffer(
+ numSamples * sizeof(effect_buffer_t),
+ &halInBuffer);
+ if (result != OK) return result;
+#ifdef FLOAT_EFFECT_CHAIN
+ buffer = halInBuffer->audioBuffer()->f32;
+#else
+ buffer = halInBuffer->audioBuffer()->s16;
+#endif
+ ALOGV("addEffectChain_l() creating new input buffer %p session %d",
+ buffer, session);
+ }
+ }
+ }
+
+ if (!audio_is_global_session(session)) {
// Attach all tracks with same session ID to this chain.
for (size_t i = 0; i < mTracks.size(); ++i) {
sp<Track> track = mTracks[i];
if (session == track->sessionId()) {
- ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p", track.get(),
- buffer);
+ ALOGV("addEffectChain_l() track->setMainBuffer track %p buffer %p",
+ track.get(), buffer);
track->setMainBuffer(buffer);
chain->incTrackCnt();
}
@@ -3376,11 +3543,13 @@
// indicate all active tracks in the chain
for (const sp<Track> &track : mActiveTracks) {
if (session == track->sessionId()) {
- ALOGV("addEffectChain_l() activating track %p on session %d", track.get(), session);
+ ALOGV("addEffectChain_l() activating track %p on session %d",
+ track.get(), session);
chain->incActiveTrackCnt();
}
}
}
+
chain->setThread(this);
chain->setInBuffer(halInBuffer);
chain->setOutBuffer(halOutBuffer);
@@ -3531,6 +3700,8 @@
audio_patch_handle_t lastDownstreamPatchHandle = AUDIO_PATCH_HANDLE_NONE;
+ sendCheckOutputStageEffectsEvent();
+
// loopCount is used for statistics and diagnostics.
for (int64_t loopCount = 0; !exitPending(); ++loopCount)
{
@@ -3542,6 +3713,7 @@
Vector< sp<EffectChain> > effectChains;
audio_session_t activeHapticSessionId = AUDIO_SESSION_NONE;
+ bool isHapticSessionSpatialized = false;
std::vector<sp<Track>> activeTracks;
// If the device is AUDIO_DEVICE_OUT_BUS, check for downstream latency.
@@ -3587,11 +3759,18 @@
}
}
+ if (mCheckOutputStageEffects.exchange(false)) {
+ checkOutputStageEffects();
+ }
+
{ // scope for mLock
Mutex::Autolock _l(mLock);
processConfigEvents_l();
+ if (mCheckOutputStageEffects.load()) {
+ continue;
+ }
// See comment at declaration of logString for why this is done under mLock
if (logString != NULL) {
@@ -3695,16 +3874,21 @@
// This must be done under the same lock as prepareTracks_l().
// The haptic data from the effect is at a higher priority than the one from track.
// TODO: Write haptic data directly to sink buffer when mixing.
- if (mHapticChannelCount > 0 && effectChains.size() > 0) {
+ if (mHapticChannelCount > 0) {
for (const auto& track : mActiveTracks) {
sp<EffectChain> effectChain = getEffectChain_l(track->sessionId());
- if (effectChain != nullptr && effectChain->containsHapticGeneratingEffect_l()) {
+ if (effectChain != nullptr
+ && effectChain->containsHapticGeneratingEffect_l()) {
activeHapticSessionId = track->sessionId();
+ isHapticSessionSpatialized =
+ mType == SPATIALIZER && track->canBeSpatialized();
break;
}
- if (track->getHapticPlaybackEnabled()) {
+ if (activeHapticSessionId == AUDIO_SESSION_NONE
+ && track->getHapticPlaybackEnabled()) {
activeHapticSessionId = track->sessionId();
- break;
+ isHapticSessionSpatialized =
+ mType == SPATIALIZER && track->canBeSpatialized();
}
}
}
@@ -3754,6 +3938,8 @@
//
// mMixerBufferValid is only set true by MixerThread::prepareTracks_l().
// TODO use mSleepTimeUs == 0 as an additional condition.
+ uint32_t mixerChannelCount = mEffectBufferValid ?
+ audio_channel_count_from_out_mask(mMixerChannelMask) : mChannelCount;
if (mMixerBufferValid) {
void *buffer = mEffectBufferValid ? mEffectBuffer : mSinkBuffer;
audio_format_t format = mEffectBufferValid ? mEffectBufferFormat : mFormat;
@@ -3774,7 +3960,7 @@
}
memcpy_by_audio_format(buffer, format, mMixerBuffer, mMixerBufferFormat,
- mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+ mNormalFrameCount * (mixerChannelCount + mHapticChannelCount));
// If we're going directly to the sink and there are haptic channels,
// we should adjust channels as the sample data is partially interleaved
@@ -3807,8 +3993,16 @@
&& activeHapticSessionId == effectChains[i]->sessionId()) {
// Haptic data is active in this case, copy it directly from
// in buffer to out buffer.
+ uint32_t hapticSessionChannelCount = mEffectBufferValid ?
+ audio_channel_count_from_out_mask(mMixerChannelMask) :
+ mChannelCount;
+ if (mType == SPATIALIZER && !isHapticSessionSpatialized) {
+ hapticSessionChannelCount = mChannelCount;
+ }
+
const size_t audioBufferSize = mNormalFrameCount
- * audio_bytes_per_frame(mChannelCount, EFFECT_BUFFER_FORMAT);
+ * audio_bytes_per_frame(hapticSessionChannelCount,
+ EFFECT_BUFFER_FORMAT);
memcpy_by_audio_format(
(uint8_t*)effectChains[i]->outBuffer() + audioBufferSize,
EFFECT_BUFFER_FORMAT,
@@ -3834,9 +4028,9 @@
// TODO use mSleepTimeUs == 0 as an additional condition.
if (mEffectBufferValid) {
//ALOGV("writing effect buffer to sink buffer format %#x", mFormat);
-
+ void *effectBuffer = (mType == SPATIALIZER) ? mPostSpatializerBuffer : mEffectBuffer;
if (requireMonoBlend()) {
- mono_blend(mEffectBuffer, mEffectBufferFormat, mChannelCount, mNormalFrameCount,
+ mono_blend(effectBuffer, mEffectBufferFormat, mChannelCount, mNormalFrameCount,
true /*limit*/);
}
@@ -3845,11 +4039,30 @@
// We do it here if there is no FastMixer.
// mBalance detects zero balance within the class for speed (not needed here).
mBalance.setBalance(mMasterBalance.load());
- mBalance.process((float *)mEffectBuffer, mNormalFrameCount);
+ mBalance.process((float *)effectBuffer, mNormalFrameCount);
}
- memcpy_by_audio_format(mSinkBuffer, mFormat, mEffectBuffer, mEffectBufferFormat,
+ // for SPATIALIZER thread, Move haptics channels from mEffectBuffer to
+ // mPostSpatializerBuffer if the haptics track is spatialized.
+ // Otherwise, the haptics channels are already in mPostSpatializerBuffer.
+ // For other thread types, the haptics channels are already in mEffectBuffer.
+ if (mType == SPATIALIZER && isHapticSessionSpatialized) {
+ const size_t srcBufferSize = mNormalFrameCount *
+ audio_bytes_per_frame(audio_channel_count_from_out_mask(mMixerChannelMask),
+ mEffectBufferFormat);
+ const size_t dstBufferSize = mNormalFrameCount
+ * audio_bytes_per_frame(mChannelCount, mEffectBufferFormat);
+
+ memcpy_by_audio_format((uint8_t*)mPostSpatializerBuffer + dstBufferSize,
+ mEffectBufferFormat,
+ (uint8_t*)mEffectBuffer + srcBufferSize,
+ mEffectBufferFormat,
+ mNormalFrameCount * mHapticChannelCount);
+ }
+
+ memcpy_by_audio_format(mSinkBuffer, mFormat, effectBuffer, mEffectBufferFormat,
mNormalFrameCount * (mChannelCount + mHapticChannelCount));
+
// The sample data is partially interleaved when haptic channels exist,
// we need to adjust channels here.
if (mHapticChannelCount > 0) {
@@ -4448,8 +4661,8 @@
// ----------------------------------------------------------------------------
AudioFlinger::MixerThread::MixerThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, bool systemReady, type_t type)
- : PlaybackThread(audioFlinger, output, id, type, systemReady),
+ audio_io_handle_t id, bool systemReady, type_t type, audio_config_base_t *mixerConfig)
+ : PlaybackThread(audioFlinger, output, id, type, systemReady, mixerConfig),
// mAudioMixer below
// mFastMixer below
mFastMixerFutex(0),
@@ -4487,26 +4700,30 @@
// initialize fast mixer depending on configuration
bool initFastMixer;
- switch (kUseFastMixer) {
- case FastMixer_Never:
+ if (mType == SPATIALIZER) {
initFastMixer = false;
- break;
- case FastMixer_Always:
- initFastMixer = true;
- break;
- case FastMixer_Static:
- case FastMixer_Dynamic:
- // FastMixer was designed to operate with a HAL that pulls at a regular rate,
- // where the period is less than an experimentally determined threshold that can be
- // scheduled reliably with CFS. However, the BT A2DP HAL is
- // bursty (does not pull at a regular rate) and so cannot operate with FastMixer.
- initFastMixer = mFrameCount < mNormalFrameCount
- && Intersection(outDeviceTypes(), getAudioDeviceOutAllA2dpSet()).empty();
- break;
+ } else {
+ switch (kUseFastMixer) {
+ case FastMixer_Never:
+ initFastMixer = false;
+ break;
+ case FastMixer_Always:
+ initFastMixer = true;
+ break;
+ case FastMixer_Static:
+ case FastMixer_Dynamic:
+ // FastMixer was designed to operate with a HAL that pulls at a regular rate,
+ // where the period is less than an experimentally determined threshold that can be
+ // scheduled reliably with CFS. However, the BT A2DP HAL is
+ // bursty (does not pull at a regular rate) and so cannot operate with FastMixer.
+ initFastMixer = mFrameCount < mNormalFrameCount
+ && Intersection(outDeviceTypes(), getAudioDeviceOutAllA2dpSet()).empty();
+ break;
+ }
+ ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
+ "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
+ mFrameCount, mNormalFrameCount);
}
- ALOGW_IF(initFastMixer == false && mFrameCount < mNormalFrameCount,
- "FastMixer is preferred for this sink as frameCount %zu is less than threshold %zu",
- mFrameCount, mNormalFrameCount);
if (initFastMixer) {
audio_format_t fastMixerFormat;
if (mMixerBufferEnabled && mEffectBufferEnabled) {
@@ -4566,6 +4783,7 @@
fastTrack->mFormat = mFormat; // mPipeSink format for audio to FastMixer
fastTrack->mHapticPlaybackEnabled = mHapticChannelMask != AUDIO_CHANNEL_NONE;
fastTrack->mHapticIntensity = os::HapticScale::NONE;
+ fastTrack->mHapticMaxAmplitude = NAN;
fastTrack->mGeneration++;
state->mFastTracksGen++;
state->mTrackMask = 1;
@@ -4861,6 +5079,9 @@
// before effects processing or output.
if (mMixerBufferValid) {
memset(mMixerBuffer, 0, mMixerBufferSize);
+ if (mType == SPATIALIZER) {
+ memset(mSinkBuffer, 0, mSinkBufferSize);
+ }
} else {
memset(mSinkBuffer, 0, mSinkBufferSize);
}
@@ -5103,6 +5324,7 @@
fastTrack->mFormat = track->mFormat;
fastTrack->mHapticPlaybackEnabled = track->getHapticPlaybackEnabled();
fastTrack->mHapticIntensity = track->getHapticIntensity();
+ fastTrack->mHapticMaxAmplitude = track->getHapticMaxAmplitude();
fastTrack->mGeneration++;
state->mTrackMask |= 1 << j;
didModify = true;
@@ -5352,11 +5574,21 @@
trackId,
AudioMixer::TRACK,
AudioMixer::CHANNEL_MASK, (void *)(uintptr_t)track->channelMask());
- mAudioMixer->setParameter(
- trackId,
- AudioMixer::TRACK,
- AudioMixer::MIXER_CHANNEL_MASK,
- (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
+
+ if (mType == SPATIALIZER && !track->canBeSpatialized()) {
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MIXER_CHANNEL_MASK,
+ (void *)(uintptr_t)(mChannelMask | mHapticChannelMask));
+ } else {
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MIXER_CHANNEL_MASK,
+ (void *)(uintptr_t)(mMixerChannelMask | mHapticChannelMask));
+ }
+
// limit track sample rate to 2 x output sample rate, which changes at re-configuration
uint32_t maxSampleRate = mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX;
uint32_t reqSampleRate = proxy->getSampleRate();
@@ -5393,16 +5625,27 @@
if (mMixerBufferEnabled
&& (track->mainBuffer() == mSinkBuffer
|| track->mainBuffer() == mMixerBuffer)) {
- mAudioMixer->setParameter(
- trackId,
- AudioMixer::TRACK,
- AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
- mAudioMixer->setParameter(
- trackId,
- AudioMixer::TRACK,
- AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
- // TODO: override track->mainBuffer()?
- mMixerBufferValid = true;
+ if (mType == SPATIALIZER && !track->canBeSpatialized()) {
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MIXER_FORMAT, (void *)mEffectBufferFormat);
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MAIN_BUFFER, (void *)mPostSpatializerBuffer);
+ } else {
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MIXER_FORMAT, (void *)mMixerBufferFormat);
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::MAIN_BUFFER, (void *)mMixerBuffer);
+ // TODO: override track->mainBuffer()?
+ mMixerBufferValid = true;
+ }
} else {
mAudioMixer->setParameter(
trackId,
@@ -5425,6 +5668,10 @@
trackId,
AudioMixer::TRACK,
AudioMixer::HAPTIC_INTENSITY, (void *)(uintptr_t)track->getHapticIntensity());
+ mAudioMixer->setParameter(
+ trackId,
+ AudioMixer::TRACK,
+ AudioMixer::HAPTIC_MAX_AMPLITUDE, (void *)(&(track->mHapticMaxAmplitude)));
// reset retry count
track->mRetryCount = kMaxTrackRetries;
@@ -5575,7 +5822,8 @@
// remove all the tracks that need to be...
removeTracks_l(*tracksToRemove);
- if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0) {
+ if (getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX) != 0 ||
+ getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE) != 0) {
mEffectBufferValid = true;
}
@@ -5583,12 +5831,17 @@
// as long as there are effects we should clear the effects buffer, to avoid
// passing a non-clean buffer to the effect chain
memset(mEffectBuffer, 0, mEffectBufferSize);
+ if (mType == SPATIALIZER) {
+ memset(mPostSpatializerBuffer, 0, mPostSpatializerBufferSize);
+ }
}
// sink or mix buffer must be cleared if all tracks are connected to an
// effect chain as in this case the mixer will not write to the sink or mix buffer
// and track effects will accumulate into it
- if ((mBytesRemaining == 0) && ((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
- (mixedTracks == 0 && fastTracks > 0))) {
+ // always clear sink buffer for spatializer output as the output of the spatializer
+ // effect will be accumulated into it
+ if ((mBytesRemaining == 0) && (((mixedTracks != 0 && mixedTracks == tracksWithEffect) ||
+ (mixedTracks == 0 && fastTracks > 0)) || (mType == SPATIALIZER))) {
// FIXME as a performance optimization, should remember previous zero status
if (mMixerBufferValid) {
memset(mMixerBuffer, 0, mMixerBufferSize);
@@ -5626,6 +5879,20 @@
return trackCount;
}
+bool AudioFlinger::PlaybackThread::checkRunningTimestamp()
+{
+ uint64_t position = 0;
+ struct timespec unused;
+ const status_t ret = mOutput->getPresentationPosition(&position, &unused);
+ if (ret == NO_ERROR) {
+ if (position != mLastCheckedTimestampPosition) {
+ mLastCheckedTimestampPosition = position;
+ return true;
+ }
+ }
+ return false;
+}
+
// isTrackAllowed_l() must be called with ThreadBase::mLock held
bool AudioFlinger::MixerThread::isTrackAllowed_l(
audio_channel_mask_t channelMask, audio_format_t format,
@@ -6054,19 +6321,24 @@
// fill a buffer, then remove it from active list.
// Only consider last track started for mixer state control
if (--(track->mRetryCount) <= 0) {
- ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
- tracksToRemove->add(track);
- // indicate to client process that the track was disabled because of underrun;
- // it will then automatically call start() when data is available
- track->disable();
- // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
- // unlike mixerthread, HAL can be paused for direct output
- ALOGW("pause because of UNDERRUN, framesReady = %zu,"
- "minFrames = %u, mFormat = %#x",
- framesReady, minFrames, mFormat);
- if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
- doHwPause = true;
- mHwPaused = true;
+ const bool running = checkRunningTimestamp();
+ if (running) { // still running, give us more time.
+ track->mRetryCount = kMaxTrackRetriesOffload;
+ } else {
+ ALOGV("BUFFER TIMEOUT: remove track(%d) from active list", trackId);
+ tracksToRemove->add(track);
+ // indicate to client process that the track was disabled because of
+ // underrun; it will then automatically call start() when data is available
+ track->disable();
+ // only do hw pause when track is going to be removed due to BUFFER TIMEOUT.
+ // unlike mixerthread, HAL can be paused for direct output
+ ALOGW("pause because of UNDERRUN, framesReady = %zu,"
+ "minFrames = %u, mFormat = %#x",
+ framesReady, minFrames, mFormat);
+ if (last && mHwSupportsPause && !mHwPaused && !mStandby) {
+ doHwPause = true;
+ mHwPaused = true;
+ }
}
} else if (last) {
mixerStatus = MIXER_TRACKS_ENABLED;
@@ -6277,6 +6549,7 @@
void AudioFlinger::DirectOutputThread::flushHw_l()
{
+ PlaybackThread::flushHw_l();
mOutput->flush();
mHwPaused = false;
mFlushPending = false;
@@ -6412,8 +6685,7 @@
AudioFlinger::OffloadThread::OffloadThread(const sp<AudioFlinger>& audioFlinger,
AudioStreamOut* output, audio_io_handle_t id, bool systemReady)
: DirectOutputThread(audioFlinger, output, id, OFFLOAD, systemReady),
- mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true),
- mOffloadUnderrunPosition(~0LL)
+ mPausedWriteLength(0), mPausedBytesRemaining(0), mKeepWakeLock(true)
{
//FIXME: mStandby should be set to true by ThreadBase constructo
mStandby = true;
@@ -6630,19 +6902,7 @@
// No buffers for this track. Give it a few chances to
// fill a buffer, then remove it from active list.
if (--(track->mRetryCount) <= 0) {
- bool running = false;
- uint64_t position = 0;
- struct timespec unused;
- // The running check restarts the retry counter at least once.
- status_t ret = mOutput->stream->getPresentationPosition(&position, &unused);
- if (ret == NO_ERROR && position != mOffloadUnderrunPosition) {
- running = true;
- mOffloadUnderrunPosition = position;
- }
- if (ret == NO_ERROR) {
- ALOGVV("underrun counter, running(%d): %lld vs %lld", running,
- (long long)position, (long long)mOffloadUnderrunPosition);
- }
+ const bool running = checkRunningTimestamp();
if (running) { // still running, give us more time.
track->mRetryCount = kMaxTrackRetriesOffload;
} else {
@@ -6713,7 +6973,6 @@
mPausedBytesRemaining = 0;
// reset bytes written count to reflect that DSP buffers are empty after flush.
mBytesWritten = 0;
- mOffloadUnderrunPosition = ~0LL;
if (mUseAsyncWrite) {
// discard any pending drain or write ack by incrementing sequence
@@ -6971,6 +7230,69 @@
MixerThread::cacheParameters_l();
}
+// ----------------------------------------------------------------------------
+
+AudioFlinger::SpatializerThread::SpatializerThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamOut* output,
+ audio_io_handle_t id,
+ bool systemReady,
+ audio_config_base_t *mixerConfig)
+ : MixerThread(audioFlinger, output, id, systemReady, SPATIALIZER, mixerConfig)
+{
+}
+
+void AudioFlinger::SpatializerThread::checkOutputStageEffects()
+{
+ bool hasVirtualizer = false;
+ bool hasDownMixer = false;
+ sp<EffectHandle> finalDownMixer;
+ {
+ Mutex::Autolock _l(mLock);
+ sp<EffectChain> chain = getEffectChain_l(AUDIO_SESSION_OUTPUT_STAGE);
+ if (chain != 0) {
+ hasVirtualizer = chain->getEffectFromType_l(FX_IID_SPATIALIZER) != nullptr;
+ hasDownMixer = chain->getEffectFromType_l(EFFECT_UIID_DOWNMIX) != nullptr;
+ }
+
+ finalDownMixer = mFinalDownMixer;
+ mFinalDownMixer.clear();
+ }
+
+ if (hasVirtualizer) {
+ if (finalDownMixer != nullptr) {
+ int32_t ret;
+ finalDownMixer->disable(&ret);
+ }
+ finalDownMixer.clear();
+ } else if (!hasDownMixer) {
+ std::vector<effect_descriptor_t> descriptors;
+ status_t status = mAudioFlinger->mEffectsFactoryHal->getDescriptors(
+ EFFECT_UIID_DOWNMIX, &descriptors);
+ if (status != NO_ERROR) {
+ return;
+ }
+ ALOG_ASSERT(!descriptors.empty(),
+ "%s getDescriptors() returned no error but empty list", __func__);
+
+ finalDownMixer = createEffect_l(nullptr /*client*/, nullptr /*effectClient*/,
+ 0 /*priority*/, AUDIO_SESSION_OUTPUT_STAGE, &descriptors[0], nullptr /*enabled*/,
+ &status, false /*pinned*/, false /*probe*/, false /*notifyFramesProcessed*/);
+
+ if (finalDownMixer == nullptr || (status != NO_ERROR && status != ALREADY_EXISTS)) {
+ ALOGW("%s error creating downmixer %d", __func__, status);
+ finalDownMixer.clear();
+ } else {
+ int32_t ret;
+ finalDownMixer->enable(&ret);
+ }
+ }
+
+ {
+ Mutex::Autolock _l(mLock);
+ mFinalDownMixer = finalDownMixer;
+ }
+}
+
// ----------------------------------------------------------------------------
// Record
@@ -9278,7 +9600,7 @@
mActiveTracks.add(track);
sp<EffectChain> chain = getEffectChain_l(mSessionId);
if (chain != 0) {
- chain->setStrategy(AudioSystem::getStrategyForStream(streamType()));
+ chain->setStrategy(getStrategyForStream(streamType()));
chain->incTrackCnt();
chain->incActiveTrackCnt();
}
diff --git a/services/audioflinger/Threads.h b/services/audioflinger/Threads.h
index 16082a9..04ad20e 100644
--- a/services/audioflinger/Threads.h
+++ b/services/audioflinger/Threads.h
@@ -32,6 +32,7 @@
OFFLOAD, // Thread class is OffloadThread
MMAP_PLAYBACK, // Thread class for MMAP playback stream
MMAP_CAPTURE, // Thread class for MMAP capture stream
+ SPATIALIZER, //
// If you add any values here, also update ThreadBase::threadTypeToString()
};
@@ -53,7 +54,8 @@
CFG_EVENT_CREATE_AUDIO_PATCH,
CFG_EVENT_RELEASE_AUDIO_PATCH,
CFG_EVENT_UPDATE_OUT_DEVICE,
- CFG_EVENT_RESIZE_BUFFER
+ CFG_EVENT_RESIZE_BUFFER,
+ CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS
};
class ConfigEventData: public RefBase {
@@ -87,7 +89,13 @@
public:
virtual ~ConfigEvent() {}
- void dump(char *buffer, size_t size) { mData->dump(buffer, size); }
+ void dump(char *buffer, size_t size) {
+ snprintf(buffer, size, "Event type: %d\n", mType);
+ if (mData != nullptr) {
+ snprintf(buffer, size, "Data:\n");
+ mData->dump(buffer, size);
+ }
+ }
const int mType; // event type e.g. CFG_EVENT_IO
Mutex mLock; // mutex associated with mCond
@@ -110,7 +118,7 @@
mEvent(event), mPid(pid), mPortId(portId) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "IO event: event %d\n", mEvent);
+ snprintf(buffer, size, "- IO event: event %d\n", mEvent);
}
const audio_io_config_event mEvent;
@@ -133,7 +141,7 @@
mPid(pid), mTid(tid), mPrio(prio), mForApp(forApp) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Prio event: pid %d, tid %d, prio %d, for app? %d\n",
+ snprintf(buffer, size, "- Prio event: pid %d, tid %d, prio %d, for app? %d\n",
mPid, mTid, mPrio, mForApp);
}
@@ -158,7 +166,7 @@
mKeyValuePairs(keyValuePairs) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "KeyValue: %s\n", mKeyValuePairs.string());
+ snprintf(buffer, size, "- KeyValue: %s\n", mKeyValuePairs.string());
}
const String8 mKeyValuePairs;
@@ -181,7 +189,7 @@
mPatch(patch), mHandle(handle) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Patch handle: %u\n", mHandle);
+ snprintf(buffer, size, "- Patch handle: %u\n", mHandle);
}
const struct audio_patch mPatch;
@@ -205,7 +213,7 @@
mHandle(handle) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Patch handle: %u\n", mHandle);
+ snprintf(buffer, size, "- Patch handle: %u\n", mHandle);
}
audio_patch_handle_t mHandle;
@@ -227,7 +235,7 @@
mOutDevices(outDevices) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "Devices: %s", android::toString(mOutDevices).c_str());
+ snprintf(buffer, size, "- Devices: %s", android::toString(mOutDevices).c_str());
}
DeviceDescriptorBaseVector mOutDevices;
@@ -249,7 +257,7 @@
mMaxSharedAudioHistoryMs(maxSharedAudioHistoryMs) {}
virtual void dump(char *buffer, size_t size) {
- snprintf(buffer, size, "mMaxSharedAudioHistoryMs: %d", mMaxSharedAudioHistoryMs);
+ snprintf(buffer, size, "- mMaxSharedAudioHistoryMs: %d", mMaxSharedAudioHistoryMs);
}
int32_t mMaxSharedAudioHistoryMs;
@@ -265,6 +273,16 @@
virtual ~ResizeBufferConfigEvent() {}
};
+ class CheckOutputStageEffectsEvent : public ConfigEvent {
+ public:
+ CheckOutputStageEffectsEvent() :
+ ConfigEvent(CFG_EVENT_CHECK_OUTPUT_STAGE_EFFECTS) {
+ }
+
+ virtual ~CheckOutputStageEffectsEvent() {}
+ };
+
+
class PMDeathRecipient : public IBinder::DeathRecipient {
public:
explicit PMDeathRecipient(const wp<ThreadBase>& thread) : mThread(thread) {}
@@ -290,8 +308,11 @@
// dynamic externally-visible
uint32_t sampleRate() const { return mSampleRate; }
audio_channel_mask_t channelMask() const { return mChannelMask; }
+ virtual audio_channel_mask_t mixerChannelMask() const { return mChannelMask; }
+
audio_format_t format() const { return mHALFormat; }
uint32_t channelCount() const { return mChannelCount; }
+
// Called by AudioFlinger::frameCount(audio_io_handle_t output) and effects,
// and returns the [normal mix] buffer's frame count.
virtual size_t frameCount() const = 0;
@@ -330,7 +351,11 @@
status_t sendUpdateOutDeviceConfigEvent(
const DeviceDescriptorBaseVector& outDevices);
void sendResizeBufferConfigEvent_l(int32_t maxSharedAudioHistoryMs);
+ void sendCheckOutputStageEffectsEvent();
+ void sendCheckOutputStageEffectsEvent_l();
+
void processConfigEvents_l();
+ virtual void setCheckOutputStageEffects() {}
virtual void cacheParameters_l() = 0;
virtual status_t createAudioPatch_l(const struct audio_patch *patch,
audio_patch_handle_t *handle) = 0;
@@ -383,7 +408,8 @@
int *enabled,
status_t *status /*non-NULL*/,
bool pinned,
- bool probe);
+ bool probe,
+ bool notifyFramesProcessed);
// return values for hasAudioSession (bit field)
enum effect_state {
@@ -391,8 +417,10 @@
// effect
TRACK_SESSION = 0x2, // the audio session corresponds to at least one
// track
- FAST_SESSION = 0x4 // the audio session corresponds to at least one
+ FAST_SESSION = 0x4, // the audio session corresponds to at least one
// fast track
+ SPATIALIZED_SESSION = 0x8 // the audio session corresponds to at least one
+ // spatialized track
};
// get effect chain corresponding to session Id.
@@ -433,6 +461,7 @@
// - EFFECT_SESSION if effects on this audio session exist in one chain
// - TRACK_SESSION if tracks on this audio session exist
// - FAST_SESSION if fast tracks on this audio session exist
+ // - SPATIALIZED_SESSION if spatialized tracks on this audio session exist
virtual uint32_t hasAudioSession_l(audio_session_t sessionId) const = 0;
uint32_t hasAudioSession(audio_session_t sessionId) const {
Mutex::Autolock _l(mLock);
@@ -454,6 +483,9 @@
if (track->isFastTrack()) {
result |= FAST_SESSION; // caution, only represents first track.
}
+ if (track->canBeSpatialized()) {
+ result |= SPATIALIZED_SESSION; // caution, only first track.
+ }
break;
}
}
@@ -574,6 +606,8 @@
return INVALID_OPERATION;
}
+ product_strategy_t getStrategyForStream(audio_stream_type_t stream) const;
+
virtual void dumpInternals_l(int fd __unused, const Vector<String16>& args __unused)
{ }
virtual void dumpTracks_l(int fd __unused, const Vector<String16>& args __unused) { }
@@ -824,7 +858,8 @@
static const nsecs_t kMaxNextBufferDelayNs = 100000000;
PlaybackThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
- audio_io_handle_t id, type_t type, bool systemReady);
+ audio_io_handle_t id, type_t type, bool systemReady,
+ audio_config_base_t *mixerConfig = nullptr);
virtual ~PlaybackThread();
// Thread virtuals
@@ -881,6 +916,8 @@
mActiveTracks.updatePowerState(this, true /* force */);
}
+ virtual void checkOutputStageEffects() {}
+
void dumpInternals_l(int fd, const Vector<String16>& args) override;
void dumpTracks_l(int fd, const Vector<String16>& args) override;
@@ -973,6 +1010,10 @@
virtual size_t frameCount() const { return mNormalFrameCount; }
+ audio_channel_mask_t mixerChannelMask() const override {
+ return mMixerChannelMask;
+ }
+
status_t getTimestamp_l(AudioTimestamp& timestamp);
void addPatchTrack(const sp<PatchTrack>& track);
@@ -1015,6 +1056,9 @@
PlaybackThread::Track* getTrackById_l(audio_port_handle_t trackId);
+ bool hasMixer() const {
+ return mType == MIXER || mType == DUPLICATING || mType == SPATIALIZER;
+ }
protected:
// updated by readOutputParameters_l()
size_t mNormalFrameCount; // normal mixer and effects
@@ -1084,6 +1128,15 @@
// for any processing (including output processing).
bool mEffectBufferValid;
+ // Frame size aligned buffer used as input and output to all post processing effects
+ // except the Spatializer in a SPATIALIZER thread. Non spatialized tracks are mixed into
+ // this buffer so that post processing effects can be applied.
+ void* mPostSpatializerBuffer = nullptr;
+
+ // Size of mPostSpatializerBuffer in bytes
+ size_t mPostSpatializerBufferSize;
+
+
// suspend count, > 0 means suspended. While suspended, the thread continues to pull from
// tracks and mix, but doesn't write to HAL. A2DP and SCO HAL implementations can't handle
// concurrent use of both of them, so Audio Policy Service suspends one of the threads to
@@ -1101,6 +1154,9 @@
// haptic playback.
audio_channel_mask_t mHapticChannelMask = AUDIO_CHANNEL_NONE;
uint32_t mHapticChannelCount = 0;
+
+ audio_channel_mask_t mMixerChannelMask = AUDIO_CHANNEL_NONE;
+
private:
// mMasterMute is in both PlaybackThread and in AudioFlinger. When a
// PlaybackThread needs to find out if master-muted, it checks it's local
@@ -1134,6 +1190,9 @@
// Cache various calculated values, at threadLoop() entry and after a parameter change
virtual void cacheParameters_l();
+ void setCheckOutputStageEffects() override {
+ mCheckOutputStageEffects.store(true);
+ }
virtual uint32_t correctLatency_l(uint32_t latency) const;
@@ -1314,6 +1373,16 @@
// audio patch used by the downstream software patch.
// Only used if ThreadBase::mIsMsdDevice is true.
struct audio_patch mDownStreamPatch;
+
+ std::atomic_bool mCheckOutputStageEffects{};
+
+ // A differential check on the timestamps to see if there is a change in the
+ // timestamp frame position between the last call to checkRunningTimestamp.
+ uint64_t mLastCheckedTimestampPosition = ~0LL;
+
+ bool checkRunningTimestamp();
+
+ virtual void flushHw_l() { mLastCheckedTimestampPosition = ~0LL; }
};
class MixerThread : public PlaybackThread {
@@ -1322,7 +1391,8 @@
AudioStreamOut* output,
audio_io_handle_t id,
bool systemReady,
- type_t type = MIXER);
+ type_t type = MIXER,
+ audio_config_base_t *mixerConfig = nullptr);
virtual ~MixerThread();
// Thread virtuals
@@ -1430,7 +1500,7 @@
virtual bool checkForNewParameter_l(const String8& keyValuePair,
status_t& status);
- virtual void flushHw_l();
+ void flushHw_l() override;
void setMasterBalance(float balance) override;
@@ -1495,7 +1565,7 @@
OffloadThread(const sp<AudioFlinger>& audioFlinger, AudioStreamOut* output,
audio_io_handle_t id, bool systemReady);
virtual ~OffloadThread() {};
- virtual void flushHw_l();
+ void flushHw_l() override;
protected:
// threadLoop snippets
@@ -1512,10 +1582,6 @@
size_t mPausedWriteLength; // length in bytes of write interrupted by pause
size_t mPausedBytesRemaining; // bytes still waiting in mixbuffer after resume
bool mKeepWakeLock; // keep wake lock while waiting for write callback
- uint64_t mOffloadUnderrunPosition; // Current frame position for offloaded playback
- // used and valid only during underrun. ~0 if
- // no underrun has occurred during playback and
- // is not reset on standby.
};
class AsyncCallbackThread : public Thread {
@@ -1611,6 +1677,24 @@
}
};
+class SpatializerThread : public MixerThread {
+public:
+ SpatializerThread(const sp<AudioFlinger>& audioFlinger,
+ AudioStreamOut* output,
+ audio_io_handle_t id,
+ bool systemReady,
+ audio_config_base_t *mixerConfig);
+ ~SpatializerThread() override {}
+
+ bool hasFastMixer() const override { return false; }
+
+protected:
+ void checkOutputStageEffects() override;
+
+private:
+ sp<EffectHandle> mFinalDownMixer;
+};
+
// record thread
class RecordThread : public ThreadBase
{
diff --git a/services/audioflinger/TrackBase.h b/services/audioflinger/TrackBase.h
index 5311fe2..b582b3a 100644
--- a/services/audioflinger/TrackBase.h
+++ b/services/audioflinger/TrackBase.h
@@ -107,6 +107,9 @@
audio_attributes_t attributes() const { return mAttr; }
+ bool canBeSpatialized() const { return mIsOut && (mAttr.flags
+ & (AUDIO_FLAG_CONTENT_SPATIALIZED | AUDIO_FLAG_NEVER_SPATIALIZE)) == 0; }
+
#ifdef TEE_SINK
void dumpTee(int fd, const std::string &reason) const {
mTee.dump(fd, reason);
diff --git a/services/audiopolicy/AudioPolicyInterface.h b/services/audiopolicy/AudioPolicyInterface.h
index 602671e..39c3dc5 100644
--- a/services/audiopolicy/AudioPolicyInterface.h
+++ b/services/audiopolicy/AudioPolicyInterface.h
@@ -333,6 +333,50 @@
virtual status_t getDevicesForRoleAndCapturePreset(audio_source_t audioSource,
device_role_t role,
AudioDeviceTypeAddrVector &devices) = 0;
+
+ /**
+ * Queries if some kind of spatialization will be performed if the audio playback context
+ * described by the provided arguments is present.
+ * The context is made of:
+ * - The audio attributes describing the playback use case.
+ * - The audio configuration describing the audio format, channels, sampling rate ...
+ * - The devices describing the sink audio device selected for playback.
+ * All arguments are optional and only the specified arguments are used to match against
+ * supported criteria. For instance, supplying no argument will tell if spatialization is
+ * supported or not in general.
+ * @param attr audio attributes describing the playback use case
+ * @param config audio configuration describing the audio format, channels, sampling rate...
+ * @param devices the sink audio device selected for playback
+ * @return true if spatialization is enabled for this context,
+ * false otherwise
+ */
+ virtual bool canBeSpatialized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices) const = 0;
+
+ /**
+ * Opens a specialized spatializer output if supported by the platform.
+ * If several spatializer output profiles exist, the one supporting the sink device
+ * corresponding to the provided audio attributes will be selected.
+ * Only one spatializer output stream can be opened at a time and an error is returned
+ * if one already exists.
+ * @param config audio format, channel mask and sampling rate to be used as the mixer
+ * configuration for the spatializer mixer created.
+ * @param attr audio attributes describing the playback use case that will drive the
+ * sink device selection
+ * @param output the IO handle of the output opened
+ * @return NO_ERROR if an output was opened, INVALID_OPERATION or BAD_VALUE otherwise
+ */
+ virtual status_t getSpatializerOutput(const audio_config_base_t *config,
+ const audio_attributes_t *attr,
+ audio_io_handle_t *output) = 0;
+
+ /**
+ * Closes a previously opened specialized spatializer output.
+ * @param output the IO handle of the output to close.
+ * @return NO_ERROR if an output was closed, INVALID_OPERATION or BAD_VALUE otherwise
+ */
+ virtual status_t releaseSpatializerOutput(audio_io_handle_t output) = 0;
};
@@ -359,7 +403,8 @@
// The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
virtual status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags) = 0;
@@ -459,6 +504,8 @@
virtual status_t updateSecondaryOutputs(
const TrackSecondaryOutputsMap& trackSecondaryOutputs) = 0;
+
+ virtual status_t setDeviceConnectedState(const struct audio_port_v7 *port, bool connected) = 0;
};
// These are the signatures of createAudioPolicyManager/destroyAudioPolicyManager
diff --git a/services/audiopolicy/common/include/Volume.h b/services/audiopolicy/common/include/Volume.h
index 736f8b2..f0636a0 100644
--- a/services/audiopolicy/common/include/Volume.h
+++ b/services/audiopolicy/common/include/Volume.h
@@ -127,6 +127,7 @@
case AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES:
case AUDIO_DEVICE_OUT_USB_HEADSET:
case AUDIO_DEVICE_OUT_BLE_HEADSET:
+ case AUDIO_DEVICE_OUT_BLE_BROADCAST:
return DEVICE_CATEGORY_HEADSET;
case AUDIO_DEVICE_OUT_HEARING_AID:
return DEVICE_CATEGORY_HEARING_AID;
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
index 1f9b535..8aab634 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioOutputDescriptor.h
@@ -158,7 +158,7 @@
virtual bool isDuplicated() const { return false; }
virtual uint32_t latency() { return 0; }
virtual bool isFixedVolume(const DeviceTypeSet& deviceTypes);
- virtual bool setVolume(float volumeDb,
+ virtual bool setVolume(float volumeDb, bool muted,
VolumeSource volumeSource, const StreamTypeVector &streams,
const DeviceTypeSet& deviceTypes,
uint32_t delayMs,
@@ -352,7 +352,22 @@
setClientActive(client, false);
}
}
- virtual bool setVolume(float volumeDb,
+
+ /**
+ * @brief setSwMute for SwOutput routed on a device that supports Hw Gain, this function allows
+ * to mute the tracks associated to a given volume source only.
+ * As an output may host one or more source(s), and as AudioPolicyManager may dispatch or not
+ * the volume change request according to the priority of the volume source to control the
+ * unique hw gain controller, a separated API allows to force a mute/unmute of a volume source.
+ * @param muted true to mute, false otherwise
+ * @param vs volume source to be considered
+ * @param device scoped for the change
+ * @param delayMs potentially applyed to prevent cut sounds.
+ */
+ void setSwMute(bool muted, VolumeSource vs, const StreamTypeVector &streams,
+ const DeviceTypeSet& device, uint32_t delayMs);
+
+ virtual bool setVolume(float volumeDb, bool muted,
VolumeSource volumeSource, const StreamTypeVector &streams,
const DeviceTypeSet& device,
uint32_t delayMs,
@@ -362,7 +377,8 @@
const struct audio_port_config *srcConfig = NULL) const;
virtual void toAudioPort(struct audio_port_v7 *port) const;
- status_t open(const audio_config_t *config,
+ status_t open(const audio_config_t *halConfig,
+ const audio_config_base_t *mixerConfig,
const DeviceVector &devices,
audio_stream_type_t stream,
audio_output_flags_t flags,
@@ -423,6 +439,7 @@
uint32_t mDirectOpenCount; // number of clients using this output (direct outputs only)
audio_session_t mDirectClientSession; // session id of the direct output client
bool mPendingReopenToQueryProfiles = false;
+ audio_channel_mask_t mMixerChannelMask = AUDIO_CHANNEL_NONE;
};
// Audio output driven by an input device directly.
@@ -435,7 +452,7 @@
void dump(String8 *dst) const override;
- virtual bool setVolume(float volumeDb,
+ virtual bool setVolume(float volumeDb, bool muted,
VolumeSource volumeSource, const StreamTypeVector &streams,
const DeviceTypeSet& deviceTypes,
uint32_t delayMs,
diff --git a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
index cf1f64c..a8fd856 100644
--- a/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
+++ b/services/audiopolicy/common/managerdefinitions/include/AudioPolicyConfig.h
@@ -202,6 +202,20 @@
{AUDIO_FORMAT_AC4, {}}};
}
+ //TODO: b/193496180 use spatializer flag at audio HAL when available
+ // until then, use DEEP_BUFFER+FAST flag combo to indicate the spatializer output profile
+ void convertSpatializerFlag()
+ {
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& curProfile : hwModule->getOutputProfiles()) {
+ if (curProfile->getFlags()
+ == (AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER)) {
+ curProfile->setFlags(AUDIO_OUTPUT_FLAG_SPATIALIZER);
+ }
+ }
+ }
+ }
+
private:
static const constexpr char* const kDefaultEngineLibraryNameSuffix = "default";
diff --git a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
index 20b4044..58d05c6 100644
--- a/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
+++ b/services/audiopolicy/common/managerdefinitions/include/DeviceDescriptor.h
@@ -168,6 +168,10 @@
DeviceVector getDevicesFromDeviceTypeAddrVec(
const AudioDeviceTypeAddrVector& deviceTypeAddrVector) const;
+ // Return the device vector that contains device descriptor whose AudioDeviceTypeAddr appears
+ // in the given AudioDeviceTypeAddrVector
+ AudioDeviceTypeAddrVector toTypeAddrVector() const;
+
// If there are devices with the given type and the devices to add is not empty,
// remove all the devices with the given type and add all the devices to add.
void replaceDevicesByType(audio_devices_t typeToRemove, const DeviceVector &devicesToAdd);
diff --git a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
index 6b08f7c..5c3bdb3 100644
--- a/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/AudioOutputDescriptor.cpp
@@ -155,7 +155,7 @@
return false;
}
-bool AudioOutputDescriptor::setVolume(float volumeDb,
+bool AudioOutputDescriptor::setVolume(float volumeDb, bool /*muted*/,
VolumeSource volumeSource,
const StreamTypeVector &/*streams*/,
const DeviceTypeSet& deviceTypes,
@@ -435,14 +435,36 @@
mFlags & AUDIO_OUTPUT_FLAG_FAST ? AUDIO_LATENCY_LOW : AUDIO_LATENCY_NORMAL;
}
-bool SwAudioOutputDescriptor::setVolume(float volumeDb,
+void SwAudioOutputDescriptor::setSwMute(
+ bool muted, VolumeSource vs, const StreamTypeVector &streamTypes,
+ const DeviceTypeSet& deviceTypes, uint32_t delayMs) {
+ // volume source active and more than one volume source is active, otherwise, no-op or let
+ // setVolume controlling SW and/or HW Gains
+ if (!streamTypes.empty() && isActive(vs) && (getActiveVolumeSources().size() > 1)) {
+ for (const auto& devicePort : devices()) {
+ if (isSingleDeviceType(deviceTypes, devicePort->type()) &&
+ devicePort->hasGainController(true /*canUseForVolume*/)) {
+ float volumeAmpl = muted ? 0.0f : Volume::DbToAmpl(0);
+ ALOGV("%s: output: %d, vs: %d, muted: %d, active vs count: %zu", __func__,
+ mIoHandle, vs, muted, getActiveVolumeSources().size());
+ for (const auto &stream : streamTypes) {
+ mClientInterface->setStreamVolume(stream, volumeAmpl, mIoHandle, delayMs);
+ }
+ return;
+ }
+ }
+ }
+}
+
+bool SwAudioOutputDescriptor::setVolume(float volumeDb, bool muted,
VolumeSource vs, const StreamTypeVector &streamTypes,
const DeviceTypeSet& deviceTypes,
uint32_t delayMs,
bool force)
{
StreamTypeVector streams = streamTypes;
- if (!AudioOutputDescriptor::setVolume(volumeDb, vs, streamTypes, deviceTypes, delayMs, force)) {
+ if (!AudioOutputDescriptor::setVolume(
+ volumeDb, muted, vs, streamTypes, deviceTypes, delayMs, force)) {
return false;
}
if (streams.empty()) {
@@ -459,11 +481,17 @@
// different Volume Source (or if we allow several curves within same volume group)
//
// @todo: default stream volume to max (0) when using HW Port gain?
- float volumeAmpl = Volume::DbToAmpl(0);
- for (const auto &stream : streams) {
- mClientInterface->setStreamVolume(stream, volumeAmpl, mIoHandle, delayMs);
+ // Allows to set SW Gain on AudioFlinger if:
+ // -volume group has explicit stream(s) associated
+ // -volume group with no explicit stream(s) is the only active source on this output
+ // Allows to mute SW Gain on AudioFlinger only for volume group with explicit stream(s)
+ if (!streamTypes.empty() || (getActiveVolumeSources().size() == 1)) {
+ const bool canMute = muted && (volumeDb != 0.0f) && !streamTypes.empty();
+ float volumeAmpl = canMute ? 0.0f : Volume::DbToAmpl(0);
+ for (const auto &stream : streams) {
+ mClientInterface->setStreamVolume(stream, volumeAmpl, mIoHandle, delayMs);
+ }
}
-
AudioGains gains = devicePort->getGains();
int gainMinValueInMb = gains[0]->getMinValueInMb();
int gainMaxValueInMb = gains[0]->getMaxValueInMb();
@@ -491,7 +519,8 @@
return true;
}
-status_t SwAudioOutputDescriptor::open(const audio_config_t *config,
+status_t SwAudioOutputDescriptor::open(const audio_config_t *halConfig,
+ const audio_config_base_t *mixerConfig,
const DeviceVector &devices,
audio_stream_type_t stream,
audio_output_flags_t flags,
@@ -504,45 +533,62 @@
"with the requested devices, all device types: %s",
__func__, dumpDeviceTypes(devices.types()).c_str());
- audio_config_t lConfig;
- if (config == nullptr) {
- lConfig = AUDIO_CONFIG_INITIALIZER;
- lConfig.sample_rate = mSamplingRate;
- lConfig.channel_mask = mChannelMask;
- lConfig.format = mFormat;
+ audio_config_t lHalConfig;
+ if (halConfig == nullptr) {
+ lHalConfig = AUDIO_CONFIG_INITIALIZER;
+ lHalConfig.sample_rate = mSamplingRate;
+ lHalConfig.channel_mask = mChannelMask;
+ lHalConfig.format = mFormat;
} else {
- lConfig = *config;
+ lHalConfig = *halConfig;
}
// if the selected profile is offloaded and no offload info was specified,
// create a default one
if ((mProfile->getFlags() & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) &&
- lConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
+ lHalConfig.offload_info.format == AUDIO_FORMAT_DEFAULT) {
flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
- lConfig.offload_info = AUDIO_INFO_INITIALIZER;
- lConfig.offload_info.sample_rate = lConfig.sample_rate;
- lConfig.offload_info.channel_mask = lConfig.channel_mask;
- lConfig.offload_info.format = lConfig.format;
- lConfig.offload_info.stream_type = stream;
- lConfig.offload_info.duration_us = -1;
- lConfig.offload_info.has_video = true; // conservative
- lConfig.offload_info.is_streaming = true; // likely
- lConfig.offload_info.encapsulation_mode = lConfig.offload_info.encapsulation_mode;
- lConfig.offload_info.content_id = lConfig.offload_info.content_id;
- lConfig.offload_info.sync_id = lConfig.offload_info.sync_id;
+ lHalConfig.offload_info = AUDIO_INFO_INITIALIZER;
+ lHalConfig.offload_info.sample_rate = lHalConfig.sample_rate;
+ lHalConfig.offload_info.channel_mask = lHalConfig.channel_mask;
+ lHalConfig.offload_info.format = lHalConfig.format;
+ lHalConfig.offload_info.stream_type = stream;
+ lHalConfig.offload_info.duration_us = -1;
+ lHalConfig.offload_info.has_video = true; // conservative
+ lHalConfig.offload_info.is_streaming = true; // likely
+ lHalConfig.offload_info.encapsulation_mode = lHalConfig.offload_info.encapsulation_mode;
+ lHalConfig.offload_info.content_id = lHalConfig.offload_info.content_id;
+ lHalConfig.offload_info.sync_id = lHalConfig.offload_info.sync_id;
+ }
+
+ audio_config_base_t lMixerConfig;
+ if (mixerConfig == nullptr) {
+ lMixerConfig = AUDIO_CONFIG_BASE_INITIALIZER;
+ lMixerConfig.sample_rate = lHalConfig.sample_rate;
+ lMixerConfig.channel_mask = lHalConfig.channel_mask;
+ lMixerConfig.format = lHalConfig.format;
+ } else {
+ lMixerConfig = *mixerConfig;
}
mFlags = (audio_output_flags_t)(mFlags | flags);
+ //TODO: b/193496180 use spatializer flag at audio HAL when available
+ audio_output_flags_t halFlags = mFlags;
+ if ((mFlags & AUDIO_OUTPUT_FLAG_SPATIALIZER) != 0) {
+ halFlags = (audio_output_flags_t)(AUDIO_OUTPUT_FLAG_FAST | AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
+ }
+
ALOGV("opening output for device %s profile %p name %s",
mDevices.toString().c_str(), mProfile.get(), mProfile->getName().c_str());
status_t status = mClientInterface->openOutput(mProfile->getModuleHandle(),
output,
- &lConfig,
+ &lHalConfig,
+ &lMixerConfig,
device,
&mLatency,
- mFlags);
+ halFlags);
if (status == NO_ERROR) {
LOG_ALWAYS_FATAL_IF(*output == AUDIO_IO_HANDLE_NONE,
@@ -550,9 +596,10 @@
"selected device %s for opening",
__FUNCTION__, *output, devices.toString().c_str(),
device->toString().c_str());
- mSamplingRate = lConfig.sample_rate;
- mChannelMask = lConfig.channel_mask;
- mFormat = lConfig.format;
+ mSamplingRate = lHalConfig.sample_rate;
+ mChannelMask = lHalConfig.channel_mask;
+ mFormat = lHalConfig.format;
+ mMixerChannelMask = lMixerConfig.channel_mask;
mId = PolicyAudioPort::getNextUniqueId();
mIoHandle = *output;
mProfile->curOpenCount++;
@@ -679,14 +726,14 @@
}
-bool HwAudioOutputDescriptor::setVolume(float volumeDb,
+bool HwAudioOutputDescriptor::setVolume(float volumeDb, bool muted,
VolumeSource volumeSource, const StreamTypeVector &streams,
const DeviceTypeSet& deviceTypes,
uint32_t delayMs,
bool force)
{
bool changed = AudioOutputDescriptor::setVolume(
- volumeDb, volumeSource, streams, deviceTypes, delayMs, force);
+ volumeDb, muted, volumeSource, streams, deviceTypes, delayMs, force);
if (changed) {
// TODO: use gain controller on source device if any to adjust volume
diff --git a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
index 561fab3..c9c8ede 100644
--- a/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/DeviceDescriptor.cpp
@@ -442,6 +442,14 @@
return devices;
}
+AudioDeviceTypeAddrVector DeviceVector::toTypeAddrVector() const {
+ AudioDeviceTypeAddrVector result;
+ for (const auto& device : *this) {
+ result.push_back(AudioDeviceTypeAddr(device->type(), device->address()));
+ }
+ return result;
+}
+
void DeviceVector::replaceDevicesByType(
audio_devices_t typeToRemove, const DeviceVector &devicesToAdd) {
DeviceVector devicesToRemove = getDevicesFromType(typeToRemove);
diff --git a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
index 84ed656..a631963 100644
--- a/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
+++ b/services/audiopolicy/common/managerdefinitions/src/Serializer.cpp
@@ -861,10 +861,10 @@
ALOGE("%s: No version found in root node %s", __func__, rootName);
return BAD_VALUE;
}
- if (version == "7.0") {
+ if (version == "7.0" || version == "7.1") {
mChannelMasksSeparator = mSamplingRatesSeparator = mFlagsSeparator = " ";
} else if (version != "1.0") {
- ALOGE("%s: Version does not match; expected \"1.0\" or \"7.0\" got \"%s\"",
+ ALOGE("%s: Version does not match; expected \"1.0\", \"7.0\", or \"7.1\" got \"%s\"",
__func__, version.c_str());
return BAD_VALUE;
}
diff --git a/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp b/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp
index b3f8947..06cc799 100644
--- a/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp
+++ b/services/audiopolicy/engine/common/src/LastRemovableMediaDevices.cpp
@@ -85,6 +85,7 @@
case AUDIO_DEVICE_OUT_HEARING_AID:
case AUDIO_DEVICE_OUT_BLE_HEADSET:
case AUDIO_DEVICE_OUT_BLE_SPEAKER:
+ case AUDIO_DEVICE_OUT_BLE_BROADCAST:
return GROUP_BT_A2DP;
default:
return GROUP_NONE;
diff --git a/services/audiopolicy/fuzzer/Android.bp b/services/audiopolicy/fuzzer/Android.bp
index faf15d6..9f6b703 100644
--- a/services/audiopolicy/fuzzer/Android.bp
+++ b/services/audiopolicy/fuzzer/Android.bp
@@ -62,4 +62,7 @@
"libaudiopolicymanager_interface_headers",
],
data: [":audiopolicyfuzzer_configuration_files"],
+ fuzz_config: {
+ cc: ["mnaganov@google.com"],
+ },
}
diff --git a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
index 7000cd9..8584702 100644
--- a/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
+++ b/services/audiopolicy/fuzzer/audiopolicy_fuzzer.cpp
@@ -163,7 +163,9 @@
AUDIO_FLAG_BYPASS_MUTE, AUDIO_FLAG_LOW_LATENCY,
AUDIO_FLAG_DEEP_BUFFER, AUDIO_FLAG_NO_MEDIA_PROJECTION,
AUDIO_FLAG_MUTE_HAPTIC, AUDIO_FLAG_NO_SYSTEM_CAPTURE,
- AUDIO_FLAG_CAPTURE_PRIVATE};
+ AUDIO_FLAG_CAPTURE_PRIVATE, AUDIO_FLAG_CONTENT_SPATIALIZED,
+ AUDIO_FLAG_NEVER_SPATIALIZE,
+ };
std::vector<audio_policy_dev_state_t> kAudioPolicyDeviceStates = {
AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE,
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 4ce7851..3cfb944 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -112,11 +112,14 @@
void AudioPolicyManager::broadcastDeviceConnectionState(const sp<DeviceDescriptor> &device,
audio_policy_dev_state_t state)
{
- AudioParameter param(String8(device->address().c_str()));
- const String8 key(state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE ?
- AudioParameter::keyDeviceConnect : AudioParameter::keyDeviceDisconnect);
- param.addInt(key, device->type());
- mpClientInterface->setParameters(AUDIO_IO_HANDLE_NONE, param.toString());
+ audio_port_v7 devicePort;
+ device->toAudioPort(&devicePort);
+ if (status_t status = mpClientInterface->setDeviceConnectedState(
+ &devicePort, state == AUDIO_POLICY_DEVICE_STATE_AVAILABLE);
+ status != OK) {
+ ALOGE("Error %d while setting connected state for device %s", status,
+ device->getDeviceTypeAddr().toString(false).c_str());
+ }
}
status_t AudioPolicyManager::setDeviceConnectionStateInt(audio_devices_t deviceType,
@@ -246,8 +249,8 @@
sp<SwAudioOutputDescriptor> desc = mOutputs.valueFor(output);
// close unused outputs after device disconnection or direct outputs that have
// been opened by checkOutputsForDevice() to query dynamic parameters
- if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE) ||
- (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
+ if ((state == AUDIO_POLICY_DEVICE_STATE_UNAVAILABLE)
+ || (((desc->mFlags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) &&
(desc->mDirectOpenCount == 0))) {
clearAudioSourcesForOutput(output);
closeOutput(output);
@@ -940,6 +943,32 @@
return profile;
}
+sp<IOProfile> AudioPolicyManager::getSpatializerOutputProfile(
+ const audio_config_t *config __unused, const AudioDeviceTypeAddrVector &devices) const
+{
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& curProfile : hwModule->getOutputProfiles()) {
+ if (curProfile->getFlags() != AUDIO_OUTPUT_FLAG_SPATIALIZER) {
+ continue;
+ }
+ // reject profiles not corresponding to a device currently available
+ DeviceVector supportedDevices = curProfile->getSupportedDevices();
+ if (!mAvailableOutputDevices.containsAtLeastOne(supportedDevices)) {
+ continue;
+ }
+ if (!devices.empty()) {
+ if (supportedDevices.getDevicesFromDeviceTypeAddrVec(devices).size()
+ != devices.size()) {
+ continue;
+ }
+ }
+ ALOGV("%s found profile %s", __func__, curProfile->getName().c_str());
+ return curProfile;
+ }
+ }
+ return nullptr;
+}
+
audio_io_handle_t AudioPolicyManager::getOutput(audio_stream_type_t stream)
{
DeviceVector devices = mEngine->getOutputDevicesForStream(stream, false /*fromCache*/);
@@ -1109,7 +1138,7 @@
*output = AUDIO_IO_HANDLE_NONE;
if (!msdDevices.isEmpty()) {
- *output = getOutputForDevices(msdDevices, session, *stream, config, flags);
+ *output = getOutputForDevices(msdDevices, session, resultAttr, config, flags);
if (*output != AUDIO_IO_HANDLE_NONE && setMsdOutputPatches(&outputDevices) == NO_ERROR) {
ALOGV("%s() Using MSD devices %s instead of devices %s",
__func__, msdDevices.toString().c_str(), outputDevices.toString().c_str());
@@ -1118,7 +1147,7 @@
}
}
if (*output == AUDIO_IO_HANDLE_NONE) {
- *output = getOutputForDevices(outputDevices, session, *stream, config,
+ *output = getOutputForDevices(outputDevices, session, resultAttr, config,
flags, resultAttr->flags & AUDIO_FLAG_MUTE_HAPTIC);
}
if (*output == AUDIO_IO_HANDLE_NONE) {
@@ -1280,7 +1309,8 @@
// all MSD patches to prioritize this request over any active output on MSD.
releaseMsdOutputPatches(devices);
- status_t status = outputDesc->open(config, devices, stream, flags, output);
+ status_t status =
+ outputDesc->open(config, nullptr /* mixerConfig */, devices, stream, flags, output);
// only accept an output with the requested parameters
if (status != NO_ERROR ||
@@ -1315,7 +1345,7 @@
audio_io_handle_t AudioPolicyManager::getOutputForDevices(
const DeviceVector &devices,
audio_session_t session,
- audio_stream_type_t stream,
+ const audio_attributes_t *attr,
const audio_config_t *config,
audio_output_flags_t *flags,
bool forceMutingHaptic)
@@ -1337,6 +1367,9 @@
if ((*flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
*flags = (audio_output_flags_t)(*flags | AUDIO_OUTPUT_FLAG_DIRECT);
}
+
+ audio_stream_type_t stream = mEngine->getStreamTypeForAttributes(*attr);
+
// only allow deep buffering for music stream type
if (stream != AUDIO_STREAM_MUSIC) {
*flags = (audio_output_flags_t)(*flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
@@ -1356,6 +1389,11 @@
ALOGV("Set VoIP and Direct output flags for PCM format");
}
+ if (mSpatializerOutput != nullptr
+ && canBeSpatialized(attr, config, devices.toTypeAddrVector())) {
+ return mSpatializerOutput->mIoHandle;
+ }
+
audio_config_t directConfig = *config;
directConfig.channel_mask = channelMask;
status_t status = openDirectOutput(stream, session, &directConfig, *flags, devices, &output);
@@ -1807,7 +1845,7 @@
if (stream == AUDIO_STREAM_TTS) {
ALOGV("\t found BEACON stream");
if (!mTtsOutputAvailable && mOutputs.isAnyOutputActive(
- toVolumeSource(AUDIO_STREAM_TTS) /*sourceToIgnore*/)) {
+ toVolumeSource(AUDIO_STREAM_TTS, false) /*sourceToIgnore*/)) {
return INVALID_OPERATION;
} else {
beaconMuteLatency = handleEventForBeacon(STARTING_BEACON);
@@ -2029,12 +2067,20 @@
if (outputDesc->getActivityCount(clientVolSrc) == 0 || forceDeviceUpdate) {
outputDesc->setStopTime(client, systemTime());
DeviceVector newDevices = getNewOutputDevices(outputDesc, false /*fromCache*/);
+
+ // If the routing does not change, if an output is routed on a device using HwGain
+ // (aka setAudioPortConfig) and there are still active clients following different
+ // volume group(s), force reapply volume
+ bool requiresVolumeCheck = outputDesc->getActivityCount(clientVolSrc) == 0 &&
+ outputDesc->useHwGain() && outputDesc->isAnyActive(VOLUME_SOURCE_NONE);
+
// delay the device switch by twice the latency because stopOutput() is executed when
// the track stop() command is received and at that time the audio track buffer can
// still contain data that needs to be drained. The latency only covers the audio HAL
// and kernel buffers. Also the latency does not always include additional delay in the
// audio path (audio DSP, CODEC ...)
- setOutputDevices(outputDesc, newDevices, false, outputDesc->latency()*2);
+ setOutputDevices(outputDesc, newDevices, false, outputDesc->latency()*2,
+ nullptr, true /*requiresMuteCheck*/, requiresVolumeCheck);
// force restoring the device selection on other active outputs if it differs from the
// one being selected for this output
@@ -2790,6 +2836,8 @@
// HW Gain management, do not change the volume
if (desc->useHwGain()) {
applyVolume = false;
+ // If the volume source is active with higher priority source, ensure at least Sw Muted
+ desc->setSwMute((index == 0), vs, curves.getStreamTypes(), curDevices, 0 /*delayMs*/);
for (const auto &productStrategy : mEngine->getOrderedProductStrategies()) {
auto activeClients = desc->clientsList(true /*activeOnly*/, productStrategy,
false /*preferredDevice*/);
@@ -2829,7 +2877,7 @@
// handled by system UI
status_t volStatus = checkAndSetVolume(
curves, vs, index, desc, curDevices,
- ((vs == toVolumeSource(AUDIO_STREAM_SYSTEM))?
+ ((vs == toVolumeSource(AUDIO_STREAM_SYSTEM, false))?
TOUCH_SOUND_FIXED_DELAY_MS : 0));
if (volStatus != NO_ERROR) {
status = volStatus;
@@ -3031,12 +3079,14 @@
bool AudioPolicyManager::isStreamActive(audio_stream_type_t stream, uint32_t inPastMs) const
{
- return mOutputs.isActive(toVolumeSource(stream), inPastMs);
+ auto vs = toVolumeSource(stream, false);
+ return vs != VOLUME_SOURCE_NONE ? mOutputs.isActive(vs, inPastMs) : false;
}
bool AudioPolicyManager::isStreamActiveRemotely(audio_stream_type_t stream, uint32_t inPastMs) const
{
- return mOutputs.isActiveRemotely(toVolumeSource(stream), inPastMs);
+ auto vs = toVolumeSource(stream, false);
+ return vs != VOLUME_SOURCE_NONE ? mOutputs.isActiveRemotely(vs, inPastMs) : false;
}
bool AudioPolicyManager::isSourceActive(audio_source_t source) const
@@ -4817,6 +4867,205 @@
return source;
}
+/* static */
+bool AudioPolicyManager::isChannelMaskSpatialized(audio_channel_mask_t channels) {
+ switch (channels) {
+ case AUDIO_CHANNEL_OUT_5POINT1:
+ case AUDIO_CHANNEL_OUT_5POINT1POINT2:
+ case AUDIO_CHANNEL_OUT_5POINT1POINT4:
+ case AUDIO_CHANNEL_OUT_7POINT1:
+ case AUDIO_CHANNEL_OUT_7POINT1POINT2:
+ case AUDIO_CHANNEL_OUT_7POINT1POINT4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool AudioPolicyManager::canBeSpatialized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices) const
+{
+ // The caller can have the audio attributes criteria ignored by either passing a null ptr or
+ // the AUDIO_ATTRIBUTES_INITIALIZER value.
+ // If attributes are specified, current policy is to only allow spatialization for media
+ // and game usages.
+ if (attr != nullptr && *attr != AUDIO_ATTRIBUTES_INITIALIZER) {
+ if (attr->usage != AUDIO_USAGE_MEDIA && attr->usage != AUDIO_USAGE_GAME) {
+ return false;
+ }
+ if ((attr->flags & (AUDIO_FLAG_CONTENT_SPATIALIZED | AUDIO_FLAG_NEVER_SPATIALIZE)) != 0) {
+ return false;
+ }
+ }
+
+ // The caller can have the devices criteria ignored by passing and empty vector, and
+ // getSpatializerOutputProfile() will ignore the devices when looking for a match.
+ // Otherwise an output profile supporting a spatializer effect that can be routed
+ // to the specified devices must exist.
+ sp<IOProfile> profile =
+ getSpatializerOutputProfile(config, devices);
+ if (profile == nullptr) {
+ return false;
+ }
+
+ // The caller can have the audio config criteria ignored by either passing a null ptr or
+ // the AUDIO_CONFIG_INITIALIZER value.
+ // If an audio config is specified, current policy is to only allow spatialization for
+ // some positional channel masks.
+ // If the spatializer output is already opened, only channel masks included in the
+ // spatializer output mixer channel mask are allowed.
+
+ if (config != nullptr && *config != AUDIO_CONFIG_INITIALIZER) {
+ if (!isChannelMaskSpatialized(config->channel_mask)) {
+ return false;
+ }
+ if (mSpatializerOutput != nullptr && mSpatializerOutput->mProfile == profile) {
+ if ((config->channel_mask & mSpatializerOutput->mMixerChannelMask)
+ != config->channel_mask) {
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void AudioPolicyManager::checkVirtualizerClientRoutes() {
+ std::set<audio_stream_type_t> streamsToInvalidate;
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ const sp<SwAudioOutputDescriptor>& desc = mOutputs[i];
+ for (const sp<TrackClientDescriptor>& client : desc->getClientIterable()) {
+ audio_attributes_t attr = client->attributes();
+ DeviceVector devices = mEngine->getOutputDevicesForAttributes(attr, nullptr, false);
+ AudioDeviceTypeAddrVector devicesTypeAddress = devices.toTypeAddrVector();
+ audio_config_base_t clientConfig = client->config();
+ audio_config_t config = audio_config_initializer(&clientConfig);
+ if (desc != mSpatializerOutput
+ && canBeSpatialized(&attr, &config, devicesTypeAddress)) {
+ streamsToInvalidate.insert(client->stream());
+ }
+ }
+ }
+
+ for (audio_stream_type_t stream : streamsToInvalidate) {
+ mpClientInterface->invalidateStream(stream);
+ }
+}
+
+status_t AudioPolicyManager::getSpatializerOutput(const audio_config_base_t *mixerConfig,
+ const audio_attributes_t *attr,
+ audio_io_handle_t *output) {
+ *output = AUDIO_IO_HANDLE_NONE;
+
+ DeviceVector devices = mEngine->getOutputDevicesForAttributes(*attr, nullptr, false);
+ AudioDeviceTypeAddrVector devicesTypeAddress = devices.toTypeAddrVector();
+ audio_config_t *configPtr = nullptr;
+ audio_config_t config;
+ if (mixerConfig != nullptr) {
+ config = audio_config_initializer(mixerConfig);
+ configPtr = &config;
+ }
+ if (!canBeSpatialized(attr, configPtr, devicesTypeAddress)) {
+ ALOGW("%s provided attributes or mixer config cannot be spatialized", __func__);
+ return BAD_VALUE;
+ }
+
+ sp<IOProfile> profile =
+ getSpatializerOutputProfile(configPtr, devicesTypeAddress);
+ if (profile == nullptr) {
+ ALOGW("%s no suitable output profile for provided attributes or mixer config", __func__);
+ return BAD_VALUE;
+ }
+
+ if (mSpatializerOutput != nullptr && mSpatializerOutput->mProfile == profile
+ && configPtr != nullptr
+ && configPtr->channel_mask == mSpatializerOutput->mMixerChannelMask) {
+ *output = mSpatializerOutput->mIoHandle;
+ ALOGV("%s returns current spatializer output %d", __func__, *output);
+ return NO_ERROR;
+ }
+ mSpatializerOutput.clear();
+ for (size_t i = 0; i < mOutputs.size(); i++) {
+ sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
+ if (!desc->isDuplicated() && desc->mProfile == profile) {
+ mSpatializerOutput = desc;
+ break;
+ }
+ }
+ if (mSpatializerOutput == nullptr) {
+ ALOGW("%s no opened spatializer output for profile %s",
+ __func__, profile->getName().c_str());
+ return BAD_VALUE;
+ }
+
+ if (configPtr != nullptr
+ && configPtr->channel_mask != mSpatializerOutput->mMixerChannelMask) {
+ audio_config_base_t savedMixerConfig = {
+ .sample_rate = mSpatializerOutput->getSamplingRate(),
+ .format = mSpatializerOutput->getFormat(),
+ .channel_mask = mSpatializerOutput->mMixerChannelMask,
+ };
+ DeviceVector savedDevices = mSpatializerOutput->devices();
+
+ closeOutput(mSpatializerOutput->mIoHandle);
+ mSpatializerOutput.clear();
+
+ const sp<SwAudioOutputDescriptor> desc =
+ new SwAudioOutputDescriptor(profile, mpClientInterface);
+ status_t status = desc->open(nullptr, mixerConfig, devices,
+ mEngine->getStreamTypeForAttributes(*attr),
+ AUDIO_OUTPUT_FLAG_SPATIALIZER, output);
+ if (status != NO_ERROR) {
+ ALOGW("%s failed opening output: status %d, output %d", __func__, status, *output);
+ if (*output != AUDIO_IO_HANDLE_NONE) {
+ desc->close();
+ }
+ // re open the spatializer output with previous channel mask
+ status_t newStatus = desc->open(nullptr, &savedMixerConfig, savedDevices,
+ mEngine->getStreamTypeForAttributes(*attr),
+ AUDIO_OUTPUT_FLAG_SPATIALIZER, output);
+ if (newStatus != NO_ERROR) {
+ if (*output != AUDIO_IO_HANDLE_NONE) {
+ desc->close();
+ }
+ ALOGE("%s failed to re-open mSpatializerOutput, status %d", __func__, newStatus);
+ } else {
+ mSpatializerOutput = desc;
+ addOutput(*output, desc);
+ }
+ mPreviousOutputs = mOutputs;
+ mpClientInterface->onAudioPortListUpdate();
+ *output = AUDIO_IO_HANDLE_NONE;
+ return status;
+ }
+ mSpatializerOutput = desc;
+ addOutput(*output, desc);
+ mPreviousOutputs = mOutputs;
+ mpClientInterface->onAudioPortListUpdate();
+ }
+
+ checkVirtualizerClientRoutes();
+
+ *output = mSpatializerOutput->mIoHandle;
+ ALOGV("%s returns new spatializer output %d", __func__, *output);
+ return NO_ERROR;
+}
+
+status_t AudioPolicyManager::releaseSpatializerOutput(audio_io_handle_t output) {
+ if (mSpatializerOutput == nullptr) {
+ return INVALID_OPERATION;
+ }
+ if (mSpatializerOutput->mIoHandle != output) {
+ return BAD_VALUE;
+ }
+
+ mSpatializerOutput.clear();
+
+ checkVirtualizerClientRoutes();
+
+ return NO_ERROR;
+}
+
// ----------------------------------------------------------------------------
// AudioPolicyManager
// ----------------------------------------------------------------------------
@@ -4866,6 +5115,8 @@
ALOGE("could not load audio policy configuration file, setting defaults");
getConfig().setDefault();
}
+ //TODO: b/193496180 use spatializer flag at audio HAL when available
+ getConfig().convertSpatializerFlag();
}
status_t AudioPolicyManager::initialize() {
@@ -5004,7 +5255,8 @@
sp<SwAudioOutputDescriptor> outputDesc = new SwAudioOutputDescriptor(outProfile,
mpClientInterface);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = outputDesc->open(nullptr, DeviceVector(supportedDevice),
+ status_t status = outputDesc->open(nullptr /* halConfig */, nullptr /* mixerConfig */,
+ DeviceVector(supportedDevice),
AUDIO_STREAM_DEFAULT,
AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
@@ -5734,14 +5986,20 @@
client->getSecondaryOutputs().begin(),
client->getSecondaryOutputs().end(),
secondaryDescs.begin(), secondaryDescs.end())) {
- std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
- std::vector<audio_io_handle_t> secondaryOutputIds;
- for (const auto& secondaryDesc : secondaryDescs) {
- secondaryOutputIds.push_back(secondaryDesc->mIoHandle);
- weakSecondaryDescs.push_back(secondaryDesc);
+ if (!audio_is_linear_pcm(client->config().format)) {
+ // If the format is not PCM, the tracks should be invalidated to get correct
+ // behavior when the secondary output is changed.
+ streamsToInvalidate.insert(client->stream());
+ } else {
+ std::vector<wp<SwAudioOutputDescriptor>> weakSecondaryDescs;
+ std::vector<audio_io_handle_t> secondaryOutputIds;
+ for (const auto &secondaryDesc: secondaryDescs) {
+ secondaryOutputIds.push_back(secondaryDesc->mIoHandle);
+ weakSecondaryDescs.push_back(secondaryDesc);
+ }
+ trackSecondaryOutputs.emplace(client->portId(), secondaryOutputIds);
+ client->setSecondaryOutputs(std::move(weakSecondaryDescs));
}
- trackSecondaryOutputs.emplace(client->portId(), secondaryOutputIds);
- client->setSecondaryOutputs(std::move(weakSecondaryDescs));
}
}
}
@@ -5855,7 +6113,7 @@
auto doGetOutputDevicesForVoice = [&]() {
return hasVoiceStream(streams) && (outputDesc == mPrimaryOutput ||
- outputDesc->isActive(toVolumeSource(AUDIO_STREAM_VOICE_CALL))) &&
+ outputDesc->isActive(toVolumeSource(AUDIO_STREAM_VOICE_CALL, false))) &&
(isInCall() ||
mOutputs.isStrategyActiveOnSameModule(productStrategy, outputDesc)) &&
!isStreamActive(AUDIO_STREAM_ENFORCED_AUDIBLE, 0);
@@ -5951,7 +6209,7 @@
devices.merge(curDevices);
for (audio_io_handle_t output : getOutputsForDevices(curDevices, mOutputs)) {
sp<AudioOutputDescriptor> outputDesc = mOutputs.valueFor(output);
- if (outputDesc->isActive(toVolumeSource(curStream))) {
+ if (outputDesc->isActive(toVolumeSource(curStream, false))) {
activeDevices.merge(outputDesc->devices());
}
}
@@ -6052,7 +6310,11 @@
// mute/unmute AUDIO_STREAM_TTS on all outputs
ALOGV("\t muting %d", mute);
uint32_t maxLatency = 0;
- auto ttsVolumeSource = toVolumeSource(AUDIO_STREAM_TTS);
+ auto ttsVolumeSource = toVolumeSource(AUDIO_STREAM_TTS, false);
+ if (ttsVolumeSource == VOLUME_SOURCE_NONE) {
+ ALOGV("\t no tts volume source available");
+ return 0;
+ }
for (size_t i = 0; i < mOutputs.size(); i++) {
sp<SwAudioOutputDescriptor> desc = mOutputs.valueAt(i);
setVolumeSourceMute(ttsVolumeSource, mute/*on*/, desc, 0 /*delay*/, DeviceTypeSet());
@@ -6162,7 +6424,7 @@
bool force,
int delayMs,
audio_patch_handle_t *patchHandle,
- bool requiresMuteCheck)
+ bool requiresMuteCheck, bool requiresVolumeCheck)
{
ALOGV("%s device %s delayMs %d", __func__, devices.toString().c_str(), delayMs);
uint32_t muteWaitMs;
@@ -6178,6 +6440,7 @@
// filter devices according to output selected
DeviceVector filteredDevices = outputDesc->filterSupportedDevices(devices);
DeviceVector prevDevices = outputDesc->devices();
+ DeviceVector availPrevDevices = mAvailableOutputDevices.filter(prevDevices);
ALOGV("setOutputDevices() prevDevice %s", prevDevices.toString().c_str());
@@ -6196,8 +6459,7 @@
// no need to proceed if new device is not AUDIO_DEVICE_NONE and not supported by current
// output profile or if new device is not supported AND previous device(s) is(are) still
// available (otherwise reset device must be done on the output)
- if (!devices.isEmpty() && filteredDevices.isEmpty() &&
- !mAvailableOutputDevices.filter(prevDevices).empty()) {
+ if (!devices.isEmpty() && filteredDevices.isEmpty() && !availPrevDevices.empty()) {
ALOGV("%s: unsupported device %s for output", __func__, devices.toString().c_str());
// restore previous device after evaluating strategy mute state
outputDesc->setDevices(prevDevices);
@@ -6211,16 +6473,20 @@
// AND the output is connected by a valid audio patch.
// Doing this check here allows the caller to call setOutputDevices() without conditions
if ((filteredDevices.isEmpty() || filteredDevices == prevDevices) &&
- !force && outputDesc->getPatchHandle() != 0) {
+ !force && outputDesc->getPatchHandle() != AUDIO_PATCH_HANDLE_NONE) {
ALOGV("%s setting same device %s or null device, force=%d, patch handle=%d", __func__,
filteredDevices.toString().c_str(), force, outputDesc->getPatchHandle());
+ if (requiresVolumeCheck && !filteredDevices.isEmpty()) {
+ ALOGV("%s setting same device on routed output, force apply volumes", __func__);
+ applyStreamVolumes(outputDesc, filteredDevices.types(), delayMs, true /*force*/);
+ }
return muteWaitMs;
}
ALOGV("%s changing device to %s", __func__, filteredDevices.toString().c_str());
// do the routing
- if (filteredDevices.isEmpty()) {
+ if (filteredDevices.isEmpty() || mAvailableOutputDevices.filter(filteredDevices).empty()) {
resetOutputDevice(outputDesc, delayMs, NULL);
} else {
PatchBuilder patchBuilder;
@@ -6384,11 +6650,11 @@
// louder than the accessibility prompt, the prompt cannot be heard, thus masking the touch
// exploration of the dialer UI. In this situation, bring the accessibility volume closer to
// the ringtone volume
- const auto callVolumeSrc = toVolumeSource(AUDIO_STREAM_VOICE_CALL);
- const auto ringVolumeSrc = toVolumeSource(AUDIO_STREAM_RING);
- const auto musicVolumeSrc = toVolumeSource(AUDIO_STREAM_MUSIC);
- const auto alarmVolumeSrc = toVolumeSource(AUDIO_STREAM_ALARM);
- const auto a11yVolumeSrc = toVolumeSource(AUDIO_STREAM_ACCESSIBILITY);
+ const auto callVolumeSrc = toVolumeSource(AUDIO_STREAM_VOICE_CALL, false);
+ const auto ringVolumeSrc = toVolumeSource(AUDIO_STREAM_RING, false);
+ const auto musicVolumeSrc = toVolumeSource(AUDIO_STREAM_MUSIC, false);
+ const auto alarmVolumeSrc = toVolumeSource(AUDIO_STREAM_ALARM, false);
+ const auto a11yVolumeSrc = toVolumeSource(AUDIO_STREAM_ACCESSIBILITY, false);
if (volumeSource == a11yVolumeSrc
&& (AUDIO_MODE_RINGTONE == mEngine->getPhoneState()) &&
@@ -6401,12 +6667,12 @@
// in-call: always cap volume by voice volume + some low headroom
if ((volumeSource != callVolumeSrc && (isInCall() ||
mOutputs.isActiveLocally(callVolumeSrc))) &&
- (volumeSource == toVolumeSource(AUDIO_STREAM_SYSTEM) ||
+ (volumeSource == toVolumeSource(AUDIO_STREAM_SYSTEM, false) ||
volumeSource == ringVolumeSrc || volumeSource == musicVolumeSrc ||
volumeSource == alarmVolumeSrc ||
- volumeSource == toVolumeSource(AUDIO_STREAM_NOTIFICATION) ||
- volumeSource == toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE) ||
- volumeSource == toVolumeSource(AUDIO_STREAM_DTMF) ||
+ volumeSource == toVolumeSource(AUDIO_STREAM_NOTIFICATION, false) ||
+ volumeSource == toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE, false) ||
+ volumeSource == toVolumeSource(AUDIO_STREAM_DTMF, false) ||
volumeSource == a11yVolumeSrc)) {
auto &voiceCurves = getVolumeCurves(callVolumeSrc);
int voiceVolumeIndex = voiceCurves.getVolumeIndex(deviceTypes);
@@ -6444,9 +6710,9 @@
AUDIO_DEVICE_OUT_BLE_HEADSET}).empty() &&
((volumeSource == alarmVolumeSrc ||
volumeSource == ringVolumeSrc) ||
- (volumeSource == toVolumeSource(AUDIO_STREAM_NOTIFICATION)) ||
- (volumeSource == toVolumeSource(AUDIO_STREAM_SYSTEM)) ||
- ((volumeSource == toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE)) &&
+ (volumeSource == toVolumeSource(AUDIO_STREAM_NOTIFICATION, false)) ||
+ (volumeSource == toVolumeSource(AUDIO_STREAM_SYSTEM, false)) ||
+ ((volumeSource == toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE, false)) &&
(mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) &&
curves.canBeMuted()) {
@@ -6532,10 +6798,10 @@
outputDesc->getMuteCount(volumeSource), outputDesc->isActive(volumeSource));
return NO_ERROR;
}
- VolumeSource callVolSrc = toVolumeSource(AUDIO_STREAM_VOICE_CALL);
- VolumeSource btScoVolSrc = toVolumeSource(AUDIO_STREAM_BLUETOOTH_SCO);
- bool isVoiceVolSrc = callVolSrc == volumeSource;
- bool isBtScoVolSrc = btScoVolSrc == volumeSource;
+ VolumeSource callVolSrc = toVolumeSource(AUDIO_STREAM_VOICE_CALL, false);
+ VolumeSource btScoVolSrc = toVolumeSource(AUDIO_STREAM_BLUETOOTH_SCO, false);
+ bool isVoiceVolSrc = (volumeSource != VOLUME_SOURCE_NONE) && (callVolSrc == volumeSource);
+ bool isBtScoVolSrc = (volumeSource != VOLUME_SOURCE_NONE) && (btScoVolSrc == volumeSource);
bool isScoRequested = isScoRequestedForComm();
// do not change in call volume if bluetooth is connected and vice versa
@@ -6560,8 +6826,9 @@
isSingleDeviceType(deviceTypes, audio_is_bluetooth_out_sco_device))) {
volumeDb = 0.0f;
}
+ const bool muted = (index == 0) && (volumeDb != 0.0f);
outputDesc->setVolume(
- volumeDb, volumeSource, curves.getStreamTypes(), deviceTypes, delayMs, force);
+ volumeDb, muted, volumeSource, curves.getStreamTypes(), deviceTypes, delayMs, force);
if (outputDesc == mPrimaryOutput && (isVoiceVolSrc || isBtScoVolSrc)) {
float voiceVolume;
@@ -6603,8 +6870,10 @@
for (auto attributes: mEngine->getAllAttributesForProductStrategy(strategy)) {
ALOGVV("%s() attributes %s, mute %d, output ID %d", __func__,
toString(attributes).c_str(), on, outputDesc->getId());
- VolumeSource source = toVolumeSource(attributes);
- if (std::find(begin(sourcesToMute), end(sourcesToMute), source) == end(sourcesToMute)) {
+ VolumeSource source = toVolumeSource(attributes, false);
+ if ((source != VOLUME_SOURCE_NONE) &&
+ (std::find(begin(sourcesToMute), end(sourcesToMute), source)
+ == end(sourcesToMute))) {
sourcesToMute.push_back(source);
}
}
@@ -6627,7 +6896,7 @@
if (on) {
if (!outputDesc->isMuted(volumeSource)) {
if (curves.canBeMuted() &&
- (volumeSource != toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE) ||
+ (volumeSource != toVolumeSource(AUDIO_STREAM_ENFORCED_AUDIBLE, false) ||
(mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) ==
AUDIO_POLICY_FORCE_NONE))) {
checkAndSetVolume(curves, volumeSource, 0, outputDesc, deviceTypes, delayMs);
@@ -7010,7 +7279,7 @@
}
sp<SwAudioOutputDescriptor> desc = new SwAudioOutputDescriptor(profile, mpClientInterface);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
- status_t status = desc->open(nullptr, devices,
+ status_t status = desc->open(nullptr /* halConfig */, nullptr /* mixerConfig */, devices,
AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
return nullptr;
@@ -7040,7 +7309,7 @@
config.offload_info.channel_mask = config.channel_mask;
config.offload_info.format = config.format;
- status = desc->open(&config, devices,
+ status = desc->open(&config, nullptr /* mixerConfig */, devices,
AUDIO_STREAM_DEFAULT, AUDIO_OUTPUT_FLAG_NONE, &output);
if (status != NO_ERROR) {
return nullptr;
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.h b/services/audiopolicy/managerdefault/AudioPolicyManager.h
index c4885e4..dcd12cd 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.h
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.h
@@ -356,6 +356,16 @@
BAD_VALUE : NO_ERROR;
}
+ virtual bool canBeSpatialized(const audio_attributes_t *attr,
+ const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices) const;
+
+ virtual status_t getSpatializerOutput(const audio_config_base_t *config,
+ const audio_attributes_t *attr,
+ audio_io_handle_t *output);
+
+ virtual status_t releaseSpatializerOutput(audio_io_handle_t output);
+
bool isCallScreenModeSupported() override;
void onNewAudioModulesAvailable() override;
@@ -421,13 +431,30 @@
{
return static_cast<VolumeSource>(volumeGroup);
}
- VolumeSource toVolumeSource(const audio_attributes_t &attributes) const
+ /**
+ * @brief toVolumeSource converts an audio attributes into a volume source
+ * (either a legacy stream or a volume group). If fallback on default is allowed, and if
+ * the audio attributes do not follow any specific product strategy's rule, it will be
+ * associated to default volume source, e.g. music. Thus, any of call of volume API
+ * using this translation function may affect the default volume source.
+ * If fallback is not allowed and no matching rule is identified for the given attributes,
+ * the volume source will be undefined, thus, no volume will be altered/modified.
+ * @param attributes to be considered
+ * @param fallbackOnDefault
+ * @return volume source associated with given attributes, otherwise either music if
+ * fallbackOnDefault is set or none.
+ */
+ VolumeSource toVolumeSource(
+ const audio_attributes_t &attributes, bool fallbackOnDefault = true) const
{
- return toVolumeSource(mEngine->getVolumeGroupForAttributes(attributes));
+ return toVolumeSource(mEngine->getVolumeGroupForAttributes(
+ attributes, fallbackOnDefault));
}
- VolumeSource toVolumeSource(audio_stream_type_t stream) const
+ VolumeSource toVolumeSource(
+ audio_stream_type_t stream, bool fallbackOnDefault = true) const
{
- return toVolumeSource(mEngine->getVolumeGroupForStreamType(stream));
+ return toVolumeSource(mEngine->getVolumeGroupForStreamType(
+ stream, fallbackOnDefault));
}
IVolumeCurves &getVolumeCurves(VolumeSource volumeSource)
{
@@ -453,14 +480,27 @@
void removeOutput(audio_io_handle_t output);
void addInput(audio_io_handle_t input, const sp<AudioInputDescriptor>& inputDesc);
- // change the route of the specified output. Returns the number of ms we have slept to
- // allow new routing to take effect in certain cases.
+ /**
+ * @brief setOutputDevices change the route of the specified output.
+ * @param outputDesc to be considered
+ * @param device to be considered to route the output
+ * @param force if true, force the routing even if no change.
+ * @param delayMs if specified, delay to apply for mute/volume op when changing device
+ * @param patchHandle if specified, the patch handle this output is connected through.
+ * @param requiresMuteCheck if specified, for e.g. when another output is on a shared device
+ * and currently active, allow to have proper drain and avoid pops
+ * @param requiresVolumeCheck true if called requires to reapply volume if the routing did
+ * not change (but the output is still routed).
+ * @return the number of ms we have slept to allow new routing to take effect in certain
+ * cases.
+ */
uint32_t setOutputDevices(const sp<SwAudioOutputDescriptor>& outputDesc,
const DeviceVector &device,
bool force = false,
int delayMs = 0,
audio_patch_handle_t *patchHandle = NULL,
- bool requiresMuteCheck = true);
+ bool requiresMuteCheck = true,
+ bool requiresVolumeCheck = false);
status_t resetOutputDevice(const sp<AudioOutputDescriptor>& outputDesc,
int delayMs = 0,
audio_patch_handle_t *patchHandle = NULL);
@@ -797,6 +837,8 @@
sp<SwAudioOutputDescriptor> mPrimaryOutput; // primary output descriptor
// list of descriptors for outputs currently opened
+ sp<SwAudioOutputDescriptor> mSpatializerOutput;
+
SwAudioOutputCollection mOutputs;
// copy of mOutputs before setDeviceConnectionState() opens new outputs
// reset to mOutputs when updateDevicesAndOutputs() is called.
@@ -933,7 +975,7 @@
audio_io_handle_t getOutputForDevices(
const DeviceVector &devices,
audio_session_t session,
- audio_stream_type_t stream,
+ const audio_attributes_t *attr,
const audio_config_t *config,
audio_output_flags_t *flags,
bool forceMutingHaptic = false);
@@ -948,6 +990,14 @@
audio_output_flags_t flags,
const DeviceVector &devices,
audio_io_handle_t *output);
+
+ sp<IOProfile> getSpatializerOutputProfile(const audio_config_t *config,
+ const AudioDeviceTypeAddrVector &devices) const;
+
+ static bool isChannelMaskSpatialized(audio_channel_mask_t channels);
+
+ void checkVirtualizerClientRoutes();
+
/**
* @brief getInputForDevice selects an input handle for a given input device and
* requester context
diff --git a/services/audiopolicy/service/Android.bp b/services/audiopolicy/service/Android.bp
index d504659..f3d4f2f 100644
--- a/services/audiopolicy/service/Android.bp
+++ b/services/audiopolicy/service/Android.bp
@@ -16,6 +16,8 @@
"AudioPolicyInterfaceImpl.cpp",
"AudioPolicyService.cpp",
"CaptureStateNotifier.cpp",
+ "Spatializer.cpp",
+ "SpatializerPoseController.cpp",
],
include_dirs: [
@@ -27,6 +29,7 @@
"libaudioclient",
"libaudioclient_aidl_conversion",
"libaudiofoundation",
+ "libaudiohal",
"libaudiopolicy",
"libaudiopolicymanagerdefault",
"libaudioutils",
@@ -34,13 +37,18 @@
"libcutils",
"libeffectsconfig",
"libhardware_legacy",
+ "libheadtracking",
+ "libheadtracking-binding",
"liblog",
"libmedia_helper",
"libmediametrics",
"libmediautils",
"libpermission",
+ "libsensor",
"libsensorprivacy",
+ "libshmemcompat",
"libutils",
+ "libstagefright_foundation",
"audioclient-types-aidl-cpp",
"audioflinger-aidl-cpp",
"audiopolicy-aidl-cpp",
@@ -48,6 +56,7 @@
"capture_state_listener-aidl-cpp",
"framework-permission-aidl-cpp",
"packagemanager_aidl-cpp",
+ "spatializer-aidl-cpp",
],
static_libs: [
@@ -56,6 +65,7 @@
],
header_libs: [
+ "libaudiohal_headers",
"libaudiopolicycommon",
"libaudiopolicyengine_interface_headers",
"libaudiopolicymanager_interface_headers",
@@ -71,6 +81,8 @@
export_shared_lib_headers: [
"libactivitymanager_aidl",
+ "libheadtracking",
+ "libheadtracking-binding",
"libsensorprivacy",
"framework-permission-aidl-cpp",
],
diff --git a/services/audiopolicy/service/AudioPolicyClientImpl.cpp b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
index cd53073..863ad56 100644
--- a/services/audiopolicy/service/AudioPolicyClientImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyClientImpl.cpp
@@ -40,7 +40,8 @@
status_t AudioPolicyService::AudioPolicyClient::openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags)
@@ -55,14 +56,17 @@
media::OpenOutputResponse response;
request.module = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_module_handle_t_int32_t(module));
- request.config = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*config));
+ request.halConfig = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_t_AudioConfig(*halConfig));
+ request.mixerConfig =
+ VALUE_OR_RETURN_STATUS(legacy2aidl_audio_config_base_t_AudioConfigBase(*mixerConfig));
request.device = VALUE_OR_RETURN_STATUS(legacy2aidl_DeviceDescriptorBase(device));
request.flags = VALUE_OR_RETURN_STATUS(legacy2aidl_audio_output_flags_t_int32_t_mask(flags));
status_t status = af->openOutput(request, &response);
if (status == OK) {
*output = VALUE_OR_RETURN_STATUS(aidl2legacy_int32_t_audio_io_handle_t(response.output));
- *config = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioConfig_audio_config_t(response.config));
+ *halConfig =
+ VALUE_OR_RETURN_STATUS(aidl2legacy_AudioConfig_audio_config_t(response.config));
*latencyMs = VALUE_OR_RETURN_STATUS(convertIntegral<uint32_t>(response.latencyMs));
}
return status;
@@ -301,4 +305,15 @@
return af->updateSecondaryOutputs(trackSecondaryOutputs);
}
+status_t AudioPolicyService::AudioPolicyClient::setDeviceConnectedState(
+ const struct audio_port_v7 *port, bool connected) {
+ sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
+ if (af == nullptr) {
+ ALOGW("%s: could not get AudioFlinger", __func__);
+ return PERMISSION_DENIED;
+ }
+ return af->setDeviceConnectedState(port, connected);
+}
+
+
} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
index 62dbc8d..ff1e674 100644
--- a/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
+++ b/services/audiopolicy/service/AudioPolicyInterfaceImpl.cpp
@@ -121,11 +121,14 @@
ALOGV("setDeviceConnectionState()");
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
- return binderStatusFromStatusT(
- mAudioPolicyManager->setDeviceConnectionState(device, state,
+ status_t status = mAudioPolicyManager->setDeviceConnectionState(device, state,
deviceAidl.address.c_str(),
deviceNameAidl.c_str(),
- encodedFormat));
+ encodedFormat);
+ if (status == NO_ERROR) {
+ onCheckSpatializer_l();
+ }
+ return binderStatusFromStatusT(status);
}
Status AudioPolicyService::getDeviceConnectionState(const media::AudioDevice& deviceAidl,
@@ -165,9 +168,13 @@
ALOGV("handleDeviceConfigChange()");
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
- return binderStatusFromStatusT(
- mAudioPolicyManager->handleDeviceConfigChange(device, deviceAidl.address.c_str(),
- deviceNameAidl.c_str(), encodedFormat));
+ status_t status = mAudioPolicyManager->handleDeviceConfigChange(
+ device, deviceAidl.address.c_str(), deviceNameAidl.c_str(), encodedFormat);
+
+ if (status == NO_ERROR) {
+ onCheckSpatializer_l();
+ }
+ return binderStatusFromStatusT(status);
}
Status AudioPolicyService::setPhoneState(media::AudioMode stateAidl, int32_t uidAidl)
@@ -234,6 +241,7 @@
Mutex::Autolock _l(mLock);
AutoCallerClear acc;
mAudioPolicyManager->setForceUse(usage, config);
+ onCheckSpatializer_l();
return Status::ok();
}
@@ -2064,8 +2072,11 @@
return binderStatusFromStatusT(NO_INIT);
}
Mutex::Autolock _l(mLock);
- return binderStatusFromStatusT(
- mAudioPolicyManager->setDevicesRoleForStrategy(strategy, role, devices));
+ status_t status = mAudioPolicyManager->setDevicesRoleForStrategy(strategy, role, devices);
+ if (status == NO_ERROR) {
+ onCheckSpatializer_l();
+ }
+ return binderStatusFromStatusT(status);
}
Status AudioPolicyService::removeDevicesRoleForStrategy(int32_t strategyAidl,
@@ -2078,8 +2089,11 @@
return binderStatusFromStatusT(NO_INIT);
}
Mutex::Autolock _l(mLock);
- return binderStatusFromStatusT(
- mAudioPolicyManager->removeDevicesRoleForStrategy(strategy, role));
+ status_t status = mAudioPolicyManager->removeDevicesRoleForStrategy(strategy, role);
+ if (status == NO_ERROR) {
+ onCheckSpatializer_l();
+ }
+ return binderStatusFromStatusT(status);
}
Status AudioPolicyService::getDevicesForRoleAndStrategy(
@@ -2207,4 +2221,46 @@
return Status::ok();
}
+Status AudioPolicyService::getSpatializer(
+ const sp<media::INativeSpatializerCallback>& callback,
+ media::GetSpatializerResponse* _aidl_return) {
+ _aidl_return->spatializer = nullptr;
+ if (callback == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ if (mSpatializer != nullptr) {
+ RETURN_IF_BINDER_ERROR(
+ binderStatusFromStatusT(mSpatializer->registerCallback(callback)));
+ _aidl_return->spatializer = mSpatializer;
+ }
+ return Status::ok();
+}
+
+Status AudioPolicyService::canBeSpatialized(
+ const std::optional<media::AudioAttributesInternal>& attrAidl,
+ const std::optional<media::AudioConfig>& configAidl,
+ const std::vector<media::AudioDevice>& devicesAidl,
+ bool* _aidl_return) {
+ if (mAudioPolicyManager == nullptr) {
+ return binderStatusFromStatusT(NO_INIT);
+ }
+ audio_attributes_t attr = AUDIO_ATTRIBUTES_INITIALIZER;
+ if (attrAidl.has_value()) {
+ attr = VALUE_OR_RETURN_BINDER_STATUS(
+ aidl2legacy_AudioAttributesInternal_audio_attributes_t(attrAidl.value()));
+ }
+ audio_config_t config = AUDIO_CONFIG_INITIALIZER;
+ if (configAidl.has_value()) {
+ config = VALUE_OR_RETURN_BINDER_STATUS(
+ aidl2legacy_AudioConfig_audio_config_t(configAidl.value()));
+ }
+ AudioDeviceTypeAddrVector devices = VALUE_OR_RETURN_BINDER_STATUS(
+ convertContainer<AudioDeviceTypeAddrVector>(devicesAidl,
+ aidl2legacy_AudioDeviceTypeAddress));
+
+ Mutex::Autolock _l(mLock);
+ *_aidl_return = mAudioPolicyManager->canBeSpatialized(&attr, &config, devices);
+ return Status::ok();
+}
+
} // namespace android
diff --git a/services/audiopolicy/service/AudioPolicyService.cpp b/services/audiopolicy/service/AudioPolicyService.cpp
index 9190251..cd83900 100644
--- a/services/audiopolicy/service/AudioPolicyService.cpp
+++ b/services/audiopolicy/service/AudioPolicyService.cpp
@@ -127,6 +127,7 @@
loadAudioPolicyManager();
mAudioPolicyManager = mCreateAudioPolicyManager(mAudioPolicyClient);
}
+
// load audio processing modules
sp<AudioPolicyEffects> audioPolicyEffects = new AudioPolicyEffects();
sp<UidPolicy> uidPolicy = new UidPolicy(this);
@@ -139,6 +140,18 @@
}
uidPolicy->registerSelf();
sensorPrivacyPolicy->registerSelf();
+
+ // Create spatializer if supported
+ if (mAudioPolicyManager != nullptr) {
+ Mutex::Autolock _l(mLock);
+ const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
+ AudioDeviceTypeAddrVector devices;
+ bool hasSpatializer = mAudioPolicyManager->canBeSpatialized(&attr, nullptr, devices);
+ if (hasSpatializer) {
+ mSpatializer = Spatializer::create(this);
+ }
+ }
+ AudioSystem::audioPolicyReady();
}
void AudioPolicyService::unloadAudioPolicyManager()
@@ -353,6 +366,60 @@
}
}
+void AudioPolicyService::onCheckSpatializer()
+{
+ Mutex::Autolock _l(mLock);
+ onCheckSpatializer_l();
+}
+
+void AudioPolicyService::onCheckSpatializer_l()
+{
+ if (mSpatializer != nullptr) {
+ mOutputCommandThread->checkSpatializerCommand();
+ }
+}
+
+void AudioPolicyService::doOnCheckSpatializer()
+{
+ Mutex::Autolock _l(mLock);
+
+ if (mSpatializer != nullptr) {
+ // Note: mSpatializer != nullptr => mAudioPolicyManager != nullptr
+ if (mSpatializer->getLevel() != media::SpatializationLevel::NONE) {
+ audio_io_handle_t currentOutput = mSpatializer->getOutput();
+ audio_io_handle_t newOutput;
+ const audio_attributes_t attr = attributes_initializer(AUDIO_USAGE_MEDIA);
+ audio_config_base_t config = mSpatializer->getAudioInConfig();
+ status_t status =
+ mAudioPolicyManager->getSpatializerOutput(&config, &attr, &newOutput);
+
+ if (status == NO_ERROR && currentOutput == newOutput) {
+ return;
+ }
+ mLock.unlock();
+ // It is OK to call detachOutput() is none is already attached.
+ mSpatializer->detachOutput();
+ if (status != NO_ERROR || newOutput == AUDIO_IO_HANDLE_NONE) {
+ mLock.lock();
+ return;
+ }
+ status = mSpatializer->attachOutput(newOutput);
+ mLock.lock();
+ if (status != NO_ERROR) {
+ mAudioPolicyManager->releaseSpatializerOutput(newOutput);
+ }
+ } else if (mSpatializer->getLevel() == media::SpatializationLevel::NONE
+ && mSpatializer->getOutput() != AUDIO_IO_HANDLE_NONE) {
+ mLock.unlock();
+ audio_io_handle_t output = mSpatializer->detachOutput();
+ mLock.lock();
+ if (output != AUDIO_IO_HANDLE_NONE) {
+ mAudioPolicyManager->releaseSpatializerOutput(output);
+ }
+ }
+ }
+}
+
status_t AudioPolicyService::clientCreateAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle,
int delayMs)
@@ -990,7 +1057,8 @@
case TRANSACTION_addDevicesRoleForCapturePreset:
case TRANSACTION_removeDevicesRoleForCapturePreset:
case TRANSACTION_clearDevicesRoleForCapturePreset:
- case TRANSACTION_getDevicesForRoleAndCapturePreset: {
+ case TRANSACTION_getDevicesForRoleAndCapturePreset:
+ case TRANSACTION_getSpatializer: {
if (!isServiceUid(IPCThreadState::self()->getCallingUid())) {
ALOGW("%s: transaction %d received from PID %d unauthorized UID %d",
__func__, code, IPCThreadState::self()->getCallingPid(),
@@ -1764,6 +1832,17 @@
mLock.lock();
} break;
+ case CHECK_SPATIALIZER: {
+ ALOGV("AudioCommandThread() processing updateUID states");
+ svc = mService.promote();
+ if (svc == 0) {
+ break;
+ }
+ mLock.unlock();
+ svc->doOnCheckSpatializer();
+ mLock.lock();
+ } break;
+
default:
ALOGW("AudioCommandThread() unknown command %d", command->mCommand);
}
@@ -2075,6 +2154,14 @@
sendCommand(command);
}
+void AudioPolicyService::AudioCommandThread::checkSpatializerCommand()
+{
+ sp<AudioCommand>command = new AudioCommand();
+ command->mCommand = CHECK_SPATIALIZER;
+ ALOGV("AudioCommandThread() adding check spatializer");
+ sendCommand(command);
+}
+
status_t AudioPolicyService::AudioCommandThread::sendCommand(sp<AudioCommand>& command, int delayMs)
{
{
diff --git a/services/audiopolicy/service/AudioPolicyService.h b/services/audiopolicy/service/AudioPolicyService.h
index 9ec5341..1a0f838 100644
--- a/services/audiopolicy/service/AudioPolicyService.h
+++ b/services/audiopolicy/service/AudioPolicyService.h
@@ -19,6 +19,7 @@
#define ANDROID_AUDIOPOLICYSERVICE_H
#include <android/media/BnAudioPolicyService.h>
+#include <android/media/GetSpatializerResponse.h>
#include <android-base/thread_annotations.h>
#include <cutils/misc.h>
#include <cutils/config_utils.h>
@@ -38,6 +39,7 @@
#include <mediautils/ServiceUtilities.h>
#include "AudioPolicyEffects.h"
#include "CaptureStateNotifier.h"
+#include "Spatializer.h"
#include <AudioPolicyInterface.h>
#include <android/hardware/BnSensorPrivacyListener.h>
#include <android/content/AttributionSourceState.h>
@@ -53,7 +55,8 @@
class AudioPolicyService :
public BinderService<AudioPolicyService>,
public media::BnAudioPolicyService,
- public IBinder::DeathRecipient
+ public IBinder::DeathRecipient,
+ public SpatializerPolicyCallback
{
friend class BinderService<AudioPolicyService>;
@@ -243,11 +246,15 @@
binder::Status registerSoundTriggerCaptureStateListener(
const sp<media::ICaptureStateListener>& listener, bool* _aidl_return) override;
- virtual status_t onTransact(
- uint32_t code,
- const Parcel& data,
- Parcel* reply,
- uint32_t flags);
+ binder::Status getSpatializer(const sp<media::INativeSpatializerCallback>& callback,
+ media::GetSpatializerResponse* _aidl_return) override;
+ binder::Status canBeSpatialized(
+ const std::optional<media::AudioAttributesInternal>& attr,
+ const std::optional<media::AudioConfig>& config,
+ const std::vector<media::AudioDevice>& devices,
+ bool* _aidl_return) override;
+
+ status_t onTransact(uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) override;
// IBinder::DeathRecipient
virtual void binderDied(const wp<IBinder>& who);
@@ -313,6 +320,16 @@
void onRoutingUpdated();
void doOnRoutingUpdated();
+ /**
+ * Spatializer SpatializerPolicyCallback implementation.
+ * onCheckSpatializer() sends an event on mOutputCommandThread which executes
+ * doOnCheckSpatializer() to check if a Spatializer output must be opened or closed
+ * by audio policy manager and attach/detach the spatializer effect accordingly.
+ */
+ void onCheckSpatializer() override;
+ void onCheckSpatializer_l();
+ void doOnCheckSpatializer();
+
void setEffectSuspended(int effectId,
audio_session_t sessionId,
bool suspended);
@@ -483,7 +500,8 @@
SET_EFFECT_SUSPENDED,
AUDIO_MODULES_UPDATE,
ROUTING_UPDATED,
- UPDATE_UID_STATES
+ UPDATE_UID_STATES,
+ CHECK_SPATIALIZER
};
AudioCommandThread (String8 name, const wp<AudioPolicyService>& service);
@@ -532,6 +550,7 @@
void audioModulesUpdateCommand();
void routingChangedCommand();
void updateUidStatesCommand();
+ void checkSpatializerCommand();
void insertCommand_l(AudioCommand *command, int delayMs = 0);
private:
class AudioCommandData;
@@ -667,7 +686,8 @@
// The audio policy manager can check if the proposed parameters are suitable or not and act accordingly.
virtual status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t *config,
+ audio_config_t *halConfig,
+ audio_config_base_t *mixerConfig,
const sp<DeviceDescriptorBase>& device,
uint32_t *latencyMs,
audio_output_flags_t flags);
@@ -761,6 +781,9 @@
status_t updateSecondaryOutputs(
const TrackSecondaryOutputsMap& trackSecondaryOutputs) override;
+ status_t setDeviceConnectedState(
+ const struct audio_port_v7 *port, bool connected) override;
+
private:
AudioPolicyService *mAudioPolicyService;
};
@@ -985,6 +1008,8 @@
CaptureStateNotifier mCaptureStateNotifier;
+ sp<Spatializer> mSpatializer;
+
void *mLibraryHandle = nullptr;
CreateAudioPolicyManagerInstance mCreateAudioPolicyManager;
DestroyAudioPolicyManagerInstance mDestroyAudioPolicyManager;
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
new file mode 100644
index 0000000..0fdbe20
--- /dev/null
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -0,0 +1,746 @@
+/*
+**
+** Copyright 2021, The Android Open Source Project
+**
+** Licensed under the Apache License, Version 2.0 (the "License");
+** you may not use this file except in compliance with the License.
+** You may obtain a copy of the License at
+**
+** http://www.apache.org/licenses/LICENSE-2.0
+**
+** Unless required by applicable law or agreed to in writing, software
+** distributed under the License is distributed on an "AS IS" BASIS,
+** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+** See the License for the specific language governing permissions and
+** limitations under the License.
+*/
+
+
+#define LOG_TAG "Spatializer"
+//#define LOG_NDEBUG 0
+#include <utils/Log.h>
+
+#include <limits.h>
+#include <stdint.h>
+#include <sys/types.h>
+
+#include <android/content/AttributionSourceState.h>
+#include <audio_utils/fixedfft.h>
+#include <cutils/bitops.h>
+#include <hardware/sensors.h>
+#include <media/audiohal/EffectsFactoryHalInterface.h>
+#include <media/stagefright/foundation/AHandler.h>
+#include <media/stagefright/foundation/AMessage.h>
+#include <media/ShmemCompat.h>
+#include <mediautils/ServiceUtilities.h>
+#include <utils/Thread.h>
+
+#include "Spatializer.h"
+
+namespace android {
+
+using aidl_utils::statusTFromBinderStatus;
+using aidl_utils::binderStatusFromStatusT;
+using android::content::AttributionSourceState;
+using binder::Status;
+using media::HeadTrackingMode;
+using media::Pose3f;
+using media::SpatializationLevel;
+using media::SpatializationMode;
+using media::SpatializerHeadTrackingMode;
+using media::SensorPoseProvider;
+
+using namespace std::chrono_literals;
+
+#define VALUE_OR_RETURN_BINDER_STATUS(x) \
+ ({ auto _tmp = (x); \
+ if (!_tmp.ok()) return aidl_utils::binderStatusFromStatusT(_tmp.error()); \
+ std::move(_tmp.value()); })
+
+// ---------------------------------------------------------------------------
+
+class Spatializer::EngineCallbackHandler : public AHandler {
+public:
+ EngineCallbackHandler(wp<Spatializer> spatializer)
+ : mSpatializer(spatializer) {
+ }
+
+ enum {
+ // Device state callbacks
+ kWhatOnFramesProcessed, // AudioEffect::EVENT_FRAMES_PROCESSED
+ kWhatOnHeadToStagePose, // SpatializerPoseController::Listener::onHeadToStagePose
+ kWhatOnActualModeChange, // SpatializerPoseController::Listener::onActualModeChange
+ };
+ static constexpr const char *kNumFramesKey = "numFrames";
+ static constexpr const char *kModeKey = "mode";
+ static constexpr const char *kTranslation0Key = "translation0";
+ static constexpr const char *kTranslation1Key = "translation1";
+ static constexpr const char *kTranslation2Key = "translation2";
+ static constexpr const char *kRotation0Key = "rotation0";
+ static constexpr const char *kRotation1Key = "rotation1";
+ static constexpr const char *kRotation2Key = "rotation2";
+
+ void onMessageReceived(const sp<AMessage> &msg) override {
+ switch (msg->what()) {
+ case kWhatOnFramesProcessed: {
+ sp<Spatializer> spatializer = mSpatializer.promote();
+ if (spatializer == nullptr) {
+ ALOGW("%s: Cannot promote spatializer", __func__);
+ return;
+ }
+ int numFrames;
+ if (!msg->findInt32(kNumFramesKey, &numFrames)) {
+ ALOGE("%s: Cannot find num frames!", __func__);
+ return;
+ }
+ if (numFrames > 0) {
+ spatializer->calculateHeadPose();
+ }
+ } break;
+ case kWhatOnHeadToStagePose: {
+ sp<Spatializer> spatializer = mSpatializer.promote();
+ if (spatializer == nullptr) {
+ ALOGW("%s: Cannot promote spatializer", __func__);
+ return;
+ }
+ std::vector<float> headToStage(sHeadPoseKeys.size());
+ for (size_t i = 0 ; i < sHeadPoseKeys.size(); i++) {
+ if (!msg->findFloat(sHeadPoseKeys[i], &headToStage[i])) {
+ ALOGE("%s: Cannot find kTranslation0Key!", __func__);
+ return;
+ }
+ }
+ spatializer->onHeadToStagePoseMsg(headToStage);
+ } break;
+ case kWhatOnActualModeChange: {
+ sp<Spatializer> spatializer = mSpatializer.promote();
+ if (spatializer == nullptr) {
+ ALOGW("%s: Cannot promote spatializer", __func__);
+ return;
+ }
+ int mode;
+ if (!msg->findInt32(EngineCallbackHandler::kModeKey, &mode)) {
+ ALOGE("%s: Cannot find actualMode!", __func__);
+ return;
+ }
+ spatializer->onActualModeChangeMsg(static_cast<HeadTrackingMode>(mode));
+ } break;
+ default:
+ LOG_ALWAYS_FATAL("Invalid callback message %d", msg->what());
+ }
+ }
+private:
+ wp<Spatializer> mSpatializer;
+};
+
+const std::vector<const char *> Spatializer::sHeadPoseKeys = {
+ Spatializer::EngineCallbackHandler::kTranslation0Key,
+ Spatializer::EngineCallbackHandler::kTranslation1Key,
+ Spatializer::EngineCallbackHandler::kTranslation2Key,
+ Spatializer::EngineCallbackHandler::kRotation0Key,
+ Spatializer::EngineCallbackHandler::kRotation1Key,
+ Spatializer::EngineCallbackHandler::kRotation2Key,
+};
+
+// ---------------------------------------------------------------------------
+sp<Spatializer> Spatializer::create(SpatializerPolicyCallback *callback) {
+ sp<Spatializer> spatializer;
+
+ sp<EffectsFactoryHalInterface> effectsFactoryHal = EffectsFactoryHalInterface::create();
+ if (effectsFactoryHal == nullptr) {
+ ALOGW("%s failed to create effect factory interface", __func__);
+ return spatializer;
+ }
+
+ std::vector<effect_descriptor_t> descriptors;
+ status_t status =
+ effectsFactoryHal->getDescriptors(FX_IID_SPATIALIZER, &descriptors);
+ if (status != NO_ERROR) {
+ ALOGW("%s failed to get spatializer descriptor, error %d", __func__, status);
+ return spatializer;
+ }
+ ALOG_ASSERT(!descriptors.empty(),
+ "%s getDescriptors() returned no error but empty list", __func__);
+
+ //TODO: get supported spatialization modes from FX engine or descriptor
+
+ sp<EffectHalInterface> effect;
+ status = effectsFactoryHal->createEffect(&descriptors[0].uuid, AUDIO_SESSION_OUTPUT_STAGE,
+ AUDIO_IO_HANDLE_NONE, AUDIO_PORT_HANDLE_NONE, &effect);
+ ALOGI("%s FX create status %d effect %p", __func__, status, effect.get());
+
+ if (status == NO_ERROR && effect != nullptr) {
+ spatializer = new Spatializer(descriptors[0], callback);
+ if (spatializer->loadEngineConfiguration(effect) != NO_ERROR) {
+ spatializer.clear();
+ }
+ }
+
+ return spatializer;
+}
+
+Spatializer::Spatializer(effect_descriptor_t engineDescriptor, SpatializerPolicyCallback* callback)
+ : mEngineDescriptor(engineDescriptor),
+ mPolicyCallback(callback) {
+ ALOGV("%s", __func__);
+}
+
+void Spatializer::onFirstRef() {
+ mLooper = new ALooper;
+ mLooper->setName("Spatializer-looper");
+ mLooper->start(
+ /*runOnCallingThread*/false,
+ /*canCallJava*/ false,
+ PRIORITY_AUDIO);
+
+ mHandler = new EngineCallbackHandler(this);
+ mLooper->registerHandler(mHandler);
+}
+
+Spatializer::~Spatializer() {
+ ALOGV("%s", __func__);
+ if (mLooper != nullptr) {
+ mLooper->stop();
+ mLooper->unregisterHandler(mHandler->id());
+ }
+ mLooper.clear();
+ mHandler.clear();
+}
+
+status_t Spatializer::loadEngineConfiguration(sp<EffectHalInterface> effect) {
+ ALOGV("%s", __func__);
+
+ std::vector<bool> supportsHeadTracking;
+ status_t status = getHalParameter<false>(effect, SPATIALIZER_PARAM_HEADTRACKING_SUPPORTED,
+ &supportsHeadTracking);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ mSupportsHeadTracking = supportsHeadTracking[0];
+
+ status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_LEVELS, &mLevels);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_SPATIALIZATION_MODES,
+ &mSpatializationModes);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ status = getHalParameter<true>(effect, SPATIALIZER_PARAM_SUPPORTED_CHANNEL_MASKS,
+ &mChannelMasks);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ return NO_ERROR;
+}
+
+/** Gets the channel mask, sampling rate and format set for the spatializer input. */
+audio_config_base_t Spatializer::getAudioInConfig() const {
+ std::lock_guard lock(mLock);
+ audio_config_base_t config = AUDIO_CONFIG_BASE_INITIALIZER;
+ // For now use highest supported channel count
+ uint32_t maxCount = 0;
+ for ( auto mask : mChannelMasks) {
+ if (audio_channel_count_from_out_mask(mask) > maxCount) {
+ config.channel_mask = mask;
+ }
+ }
+ return config;
+}
+
+status_t Spatializer::registerCallback(
+ const sp<media::INativeSpatializerCallback>& callback) {
+ std::lock_guard lock(mLock);
+ if (callback == nullptr) {
+ return BAD_VALUE;
+ }
+
+ sp<IBinder> binder = IInterface::asBinder(callback);
+ status_t status = binder->linkToDeath(this);
+ if (status == NO_ERROR) {
+ mSpatializerCallback = callback;
+ }
+ ALOGV("%s status %d", __func__, status);
+ return status;
+}
+
+// IBinder::DeathRecipient
+void Spatializer::binderDied(__unused const wp<IBinder> &who) {
+ {
+ std::lock_guard lock(mLock);
+ mLevel = SpatializationLevel::NONE;
+ mSpatializerCallback.clear();
+ }
+ ALOGV("%s", __func__);
+ mPolicyCallback->onCheckSpatializer();
+}
+
+// ISpatializer
+Status Spatializer::getSupportedLevels(std::vector<SpatializationLevel> *levels) {
+ ALOGV("%s", __func__);
+ if (levels == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ levels->push_back(SpatializationLevel::NONE);
+ levels->insert(levels->end(), mLevels.begin(), mLevels.end());
+ return Status::ok();
+}
+
+Status Spatializer::setLevel(SpatializationLevel level) {
+ ALOGV("%s level %d", __func__, (int)level);
+ if (level != SpatializationLevel::NONE
+ && std::find(mLevels.begin(), mLevels.end(), level) == mLevels.end()) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ sp<media::INativeSpatializerCallback> callback;
+ bool levelChanged = false;
+ {
+ std::lock_guard lock(mLock);
+ levelChanged = mLevel != level;
+ mLevel = level;
+ callback = mSpatializerCallback;
+
+ if (levelChanged && mEngine != nullptr) {
+ setEffectParameter_l(SPATIALIZER_PARAM_LEVEL, std::vector<SpatializationLevel>{level});
+ }
+ }
+
+ if (levelChanged) {
+ mPolicyCallback->onCheckSpatializer();
+ if (callback != nullptr) {
+ callback->onLevelChanged(level);
+ }
+ }
+ return Status::ok();
+}
+
+Status Spatializer::getLevel(SpatializationLevel *level) {
+ if (level == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ *level = mLevel;
+ ALOGV("%s level %d", __func__, (int)*level);
+ return Status::ok();
+}
+
+Status Spatializer::isHeadTrackingSupported(bool *supports) {
+ ALOGV("%s mSupportsHeadTracking %d", __func__, mSupportsHeadTracking);
+ if (supports == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ *supports = mSupportsHeadTracking;
+ return Status::ok();
+}
+
+Status Spatializer::getSupportedHeadTrackingModes(
+ std::vector<SpatializerHeadTrackingMode>* modes) {
+ std::lock_guard lock(mLock);
+ ALOGV("%s", __func__);
+ if (modes == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+
+ modes->push_back(SpatializerHeadTrackingMode::DISABLED);
+ if (mSupportsHeadTracking) {
+ if (mHeadSensor != SpatializerPoseController::INVALID_SENSOR) {
+ modes->push_back(SpatializerHeadTrackingMode::RELATIVE_WORLD);
+ if (mScreenSensor != SpatializerPoseController::INVALID_SENSOR) {
+ modes->push_back(SpatializerHeadTrackingMode::RELATIVE_SCREEN);
+ }
+ }
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setDesiredHeadTrackingMode(SpatializerHeadTrackingMode mode) {
+ ALOGV("%s mode %d", __func__, (int)mode);
+
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::lock_guard lock(mLock);
+ switch (mode) {
+ case SpatializerHeadTrackingMode::OTHER:
+ return binderStatusFromStatusT(BAD_VALUE);
+ case SpatializerHeadTrackingMode::DISABLED:
+ mDesiredHeadTrackingMode = HeadTrackingMode::STATIC;
+ break;
+ case SpatializerHeadTrackingMode::RELATIVE_WORLD:
+ mDesiredHeadTrackingMode = HeadTrackingMode::WORLD_RELATIVE;
+ break;
+ case SpatializerHeadTrackingMode::RELATIVE_SCREEN:
+ mDesiredHeadTrackingMode = HeadTrackingMode::SCREEN_RELATIVE;
+ break;
+ }
+
+ if (mPoseController != nullptr) {
+ mPoseController->setDesiredMode(mDesiredHeadTrackingMode);
+ }
+
+ return Status::ok();
+}
+
+Status Spatializer::getActualHeadTrackingMode(SpatializerHeadTrackingMode *mode) {
+ if (mode == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ *mode = mActualHeadTrackingMode;
+ ALOGV("%s mode %d", __func__, (int)*mode);
+ return Status::ok();
+}
+
+Status Spatializer::recenterHeadTracker() {
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::lock_guard lock(mLock);
+ if (mPoseController != nullptr) {
+ mPoseController->recenter();
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setGlobalTransform(const std::vector<float>& screenToStage) {
+ ALOGV("%s", __func__);
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::optional<Pose3f> maybePose = Pose3f::fromVector(screenToStage);
+ if (!maybePose.has_value()) {
+ ALOGW("Invalid screenToStage vector.");
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ if (mPoseController != nullptr) {
+ mPoseController->setScreenToStagePose(maybePose.value());
+ }
+ return Status::ok();
+}
+
+Status Spatializer::release() {
+ ALOGV("%s", __func__);
+ bool levelChanged = false;
+ {
+ std::lock_guard lock(mLock);
+ if (mSpatializerCallback == nullptr) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+
+ sp<IBinder> binder = IInterface::asBinder(mSpatializerCallback);
+ binder->unlinkToDeath(this);
+ mSpatializerCallback.clear();
+
+ levelChanged = mLevel != SpatializationLevel::NONE;
+ mLevel = SpatializationLevel::NONE;
+ }
+
+ if (levelChanged) {
+ mPolicyCallback->onCheckSpatializer();
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setHeadSensor(int sensorHandle) {
+ ALOGV("%s sensorHandle %d", __func__, sensorHandle);
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::lock_guard lock(mLock);
+ mHeadSensor = sensorHandle;
+ if (mPoseController != nullptr) {
+ mPoseController->setHeadSensor(mHeadSensor);
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setScreenSensor(int sensorHandle) {
+ ALOGV("%s sensorHandle %d", __func__, sensorHandle);
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::lock_guard lock(mLock);
+ mScreenSensor = sensorHandle;
+ if (mPoseController != nullptr) {
+ mPoseController->setScreenSensor(mScreenSensor);
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setDisplayOrientation(float physicalToLogicalAngle) {
+ ALOGV("%s physicalToLogicalAngle %f", __func__, physicalToLogicalAngle);
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ std::lock_guard lock(mLock);
+ mDisplayOrientation = physicalToLogicalAngle;
+ if (mPoseController != nullptr) {
+ mPoseController->setDisplayOrientation(mDisplayOrientation);
+ }
+ if (mEngine != nullptr) {
+ setEffectParameter_l(
+ SPATIALIZER_PARAM_DISPLAY_ORIENTATION, std::vector<float>{physicalToLogicalAngle});
+ }
+ return Status::ok();
+}
+
+Status Spatializer::setHingeAngle(float hingeAngle) {
+ std::lock_guard lock(mLock);
+ ALOGV("%s hingeAngle %f", __func__, hingeAngle);
+ if (mEngine != nullptr) {
+ setEffectParameter_l(SPATIALIZER_PARAM_HINGE_ANGLE, std::vector<float>{hingeAngle});
+ }
+ return Status::ok();
+}
+
+Status Spatializer::getSupportedModes(std::vector<SpatializationMode> *modes) {
+ ALOGV("%s", __func__);
+ if (modes == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ *modes = mSpatializationModes;
+ return Status::ok();
+}
+
+Status Spatializer::registerHeadTrackingCallback(
+ const sp<media::ISpatializerHeadTrackingCallback>& callback) {
+ ALOGV("%s callback %p", __func__, callback.get());
+ std::lock_guard lock(mLock);
+ if (!mSupportsHeadTracking) {
+ return binderStatusFromStatusT(INVALID_OPERATION);
+ }
+ mHeadTrackingCallback = callback;
+ return Status::ok();
+}
+
+Status Spatializer::setParameter(int key, const std::vector<unsigned char>& value) {
+ ALOGV("%s key %d", __func__, key);
+ std::lock_guard lock(mLock);
+ status_t status = INVALID_OPERATION;
+ if (mEngine != nullptr) {
+ status = setEffectParameter_l(key, value);
+ }
+ return binderStatusFromStatusT(status);
+}
+
+Status Spatializer::getParameter(int key, std::vector<unsigned char> *value) {
+ ALOGV("%s key %d value size %d", __func__, key,
+ (value != nullptr ? (int)value->size() : -1));
+ if (value == nullptr) {
+ return binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ status_t status = INVALID_OPERATION;
+ if (mEngine != nullptr) {
+ ALOGV("%s key %d mEngine %p", __func__, key, mEngine.get());
+ status = getEffectParameter_l(key, value);
+ }
+ return binderStatusFromStatusT(status);
+}
+
+Status Spatializer::getOutput(int *output) {
+ ALOGV("%s", __func__);
+ if (output == nullptr) {
+ binderStatusFromStatusT(BAD_VALUE);
+ }
+ std::lock_guard lock(mLock);
+ *output = VALUE_OR_RETURN_BINDER_STATUS(legacy2aidl_audio_io_handle_t_int32_t(mOutput));
+ ALOGV("%s got output %d", __func__, *output);
+ return Status::ok();
+}
+
+// SpatializerPoseController::Listener
+void Spatializer::onHeadToStagePose(const Pose3f& headToStage) {
+ ALOGV("%s", __func__);
+ LOG_ALWAYS_FATAL_IF(!mSupportsHeadTracking,
+ "onHeadToStagePose() called with no head tracking support!");
+
+ auto vec = headToStage.toVector();
+ LOG_ALWAYS_FATAL_IF(vec.size() != sHeadPoseKeys.size(),
+ "%s invalid head to stage vector size %zu", __func__, vec.size());
+
+ sp<AMessage> msg =
+ new AMessage(EngineCallbackHandler::kWhatOnHeadToStagePose, mHandler);
+ for (size_t i = 0 ; i < sHeadPoseKeys.size(); i++) {
+ msg->setFloat(sHeadPoseKeys[i], vec[i]);
+ }
+ msg->post();
+}
+
+void Spatializer::onHeadToStagePoseMsg(const std::vector<float>& headToStage) {
+ ALOGV("%s", __func__);
+ sp<media::ISpatializerHeadTrackingCallback> callback;
+ {
+ std::lock_guard lock(mLock);
+ callback = mHeadTrackingCallback;
+ if (mEngine != nullptr) {
+ setEffectParameter_l(SPATIALIZER_PARAM_HEAD_TO_STAGE, headToStage);
+ }
+ }
+
+ if (callback != nullptr) {
+ callback->onHeadToSoundStagePoseUpdated(headToStage);
+ }
+}
+
+void Spatializer::onActualModeChange(HeadTrackingMode mode) {
+ ALOGV("%s(%d)", __func__, (int)mode);
+ sp<AMessage> msg =
+ new AMessage(EngineCallbackHandler::kWhatOnActualModeChange, mHandler);
+ msg->setInt32(EngineCallbackHandler::kModeKey, static_cast<int>(mode));
+ msg->post();
+}
+
+void Spatializer::onActualModeChangeMsg(HeadTrackingMode mode) {
+ ALOGV("%s(%d)", __func__, (int) mode);
+ sp<media::ISpatializerHeadTrackingCallback> callback;
+ SpatializerHeadTrackingMode spatializerMode;
+ {
+ std::lock_guard lock(mLock);
+ if (!mSupportsHeadTracking) {
+ spatializerMode = SpatializerHeadTrackingMode::DISABLED;
+ } else {
+ switch (mode) {
+ case HeadTrackingMode::STATIC:
+ spatializerMode = SpatializerHeadTrackingMode::DISABLED;
+ break;
+ case HeadTrackingMode::WORLD_RELATIVE:
+ spatializerMode = SpatializerHeadTrackingMode::RELATIVE_WORLD;
+ break;
+ case HeadTrackingMode::SCREEN_RELATIVE:
+ spatializerMode = SpatializerHeadTrackingMode::RELATIVE_SCREEN;
+ break;
+ default:
+ LOG_ALWAYS_FATAL("Unknown mode: %d", mode);
+ }
+ }
+ mActualHeadTrackingMode = spatializerMode;
+ callback = mHeadTrackingCallback;
+ }
+ if (callback != nullptr) {
+ callback->onHeadTrackingModeChanged(spatializerMode);
+ }
+}
+
+status_t Spatializer::attachOutput(audio_io_handle_t output) {
+ std::shared_ptr<SpatializerPoseController> poseController;
+ bool outputChanged = false;
+ sp<media::INativeSpatializerCallback> callback;
+
+ {
+ std::lock_guard lock(mLock);
+ ALOGV("%s output %d mOutput %d", __func__, (int)output, (int)mOutput);
+ if (mOutput != AUDIO_IO_HANDLE_NONE) {
+ LOG_ALWAYS_FATAL_IF(mEngine == nullptr, "%s output set without FX engine", __func__);
+ // remove FX instance
+ mEngine->setEnabled(false);
+ mEngine.clear();
+ }
+ // create FX instance on output
+ AttributionSourceState attributionSource = AttributionSourceState();
+ mEngine = new AudioEffect(attributionSource);
+ mEngine->set(nullptr, &mEngineDescriptor.uuid, 0, Spatializer::engineCallback /* cbf */,
+ this /* user */, AUDIO_SESSION_OUTPUT_STAGE, output, {} /* device */,
+ false /* probe */, true /* notifyFramesProcessed */);
+ status_t status = mEngine->initCheck();
+ ALOGV("%s mEngine create status %d", __func__, (int)status);
+ if (status != NO_ERROR) {
+ return status;
+ }
+
+ setEffectParameter_l(SPATIALIZER_PARAM_LEVEL,
+ std::vector<SpatializationLevel>{mLevel});
+ setEffectParameter_l(SPATIALIZER_PARAM_HEADTRACKING_MODE,
+ std::vector<SpatializerHeadTrackingMode>{mActualHeadTrackingMode});
+
+ mEngine->setEnabled(true);
+ outputChanged = mOutput != output;
+ mOutput = output;
+
+ if (mSupportsHeadTracking) {
+ mPoseController = std::make_shared<SpatializerPoseController>(
+ static_cast<SpatializerPoseController::Listener*>(this), 10ms, 50ms);
+ LOG_ALWAYS_FATAL_IF(mPoseController == nullptr,
+ "%s could not allocate pose controller", __func__);
+
+ mPoseController->setDesiredMode(mDesiredHeadTrackingMode);
+ mPoseController->setHeadSensor(mHeadSensor);
+ mPoseController->setScreenSensor(mScreenSensor);
+ mPoseController->setDisplayOrientation(mDisplayOrientation);
+ poseController = mPoseController;
+ }
+ callback = mSpatializerCallback;
+ }
+ if (poseController != nullptr) {
+ poseController->waitUntilCalculated();
+ }
+
+ if (outputChanged && callback != nullptr) {
+ callback->onOutputChanged(output);
+ }
+
+ return NO_ERROR;
+}
+
+audio_io_handle_t Spatializer::detachOutput() {
+ audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
+ sp<media::INativeSpatializerCallback> callback;
+
+ {
+ std::lock_guard lock(mLock);
+ ALOGV("%s mOutput %d", __func__, (int)mOutput);
+ if (mOutput == AUDIO_IO_HANDLE_NONE) {
+ return output;
+ }
+ // remove FX instance
+ mEngine->setEnabled(false);
+ mEngine.clear();
+ output = mOutput;
+ mOutput = AUDIO_IO_HANDLE_NONE;
+ mPoseController.reset();
+
+ callback = mSpatializerCallback;
+ }
+
+ if (callback != nullptr) {
+ callback->onOutputChanged(AUDIO_IO_HANDLE_NONE);
+ }
+ return output;
+}
+
+void Spatializer::calculateHeadPose() {
+ ALOGV("%s", __func__);
+ std::lock_guard lock(mLock);
+ if (mPoseController != nullptr) {
+ mPoseController->calculateAsync();
+ }
+}
+
+void Spatializer::engineCallback(int32_t event, void *user, void *info) {
+ if (user == nullptr) {
+ return;
+ }
+ Spatializer* const me = reinterpret_cast<Spatializer *>(user);
+ switch (event) {
+ case AudioEffect::EVENT_FRAMES_PROCESSED: {
+ int frames = info == nullptr ? 0 : *(int*)info;
+ ALOGD("%s frames processed %d for me %p", __func__, frames, me);
+ me->postFramesProcessedMsg(frames);
+ } break;
+ default:
+ ALOGD("%s event %d", __func__, event);
+ break;
+ }
+}
+
+void Spatializer::postFramesProcessedMsg(int frames) {
+ sp<AMessage> msg =
+ new AMessage(EngineCallbackHandler::kWhatOnFramesProcessed, mHandler);
+ msg->setInt32(EngineCallbackHandler::kNumFramesKey, frames);
+ msg->post();
+}
+
+} // namespace android
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
new file mode 100644
index 0000000..4d77b78
--- /dev/null
+++ b/services/audiopolicy/service/Spatializer.h
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ANDROID_MEDIA_SPATIALIZER_H
+#define ANDROID_MEDIA_SPATIALIZER_H
+
+#include <android/media/BnEffect.h>
+#include <android/media/BnSpatializer.h>
+#include <android/media/SpatializationLevel.h>
+#include <android/media/SpatializationMode.h>
+#include <android/media/SpatializerHeadTrackingMode.h>
+#include <media/audiohal/EffectHalInterface.h>
+#include <media/stagefright/foundation/ALooper.h>
+#include <media/AudioEffect.h>
+#include <system/audio_effects/effect_spatializer.h>
+
+#include "SpatializerPoseController.h"
+
+namespace android {
+
+
+// ----------------------------------------------------------------------------
+
+/**
+ * A callback interface from the Spatializer object or its parent AudioPolicyService.
+ * This is implemented by the audio policy service hosting the Spatializer to perform
+ * actions needed when a state change inside the Spatializer requires some audio system
+ * changes that cannot be performed by the Spatializer. For instance opening or closing a
+ * spatializer output stream when the spatializer is enabled or disabled
+ */
+class SpatializerPolicyCallback {
+public:
+ /** Called when a stage change occurs that requires the parent audio policy service to take
+ * some action.
+ */
+ virtual void onCheckSpatializer() = 0;
+
+ virtual ~SpatializerPolicyCallback() = default;
+};
+/**
+ * The Spatializer class implements all functional controlling the multichannel spatializer
+ * with head tracking implementation in the native audio service: audio policy and audio flinger.
+ * It presents an AIDL interface available to the java audio service to discover the availability
+ * of the feature and options, control its state and register an active head tracking sensor.
+ * It maintains the current state of the platform spatializer and applies the stored parameters
+ * when the spatializer engine is created and enabled.
+ * Based on the requested spatializer level, it will request the creation of a specialized output
+ * mixer to the audio policy service which will in turn notify the Spatializer of the output
+ * stream on which a spatializer engine should be created, configured and enabled.
+ * The spatializer also hosts the head tracking management logic. This logic receives the
+ * desired head tracking mode and selected head tracking sensor, registers a sensor event listener
+ * and derives the compounded head pose information to the spatializer engine.
+ *
+ * Workflow:
+ * - Initialization: when the audio policy service starts, it checks if a spatializer effect
+ * engine exists and if the audio policy manager reports a dedicated spatializer output profile.
+ * If both conditions are met, a Spatializer object is created
+ * - Capabilities discovery: AudioService will call AudioSystem::canBeSpatialized() and if true,
+ * acquire an ISpatializer interface with AudioSystem::getSpatializer(). This interface
+ * will be used to query the implementation capabilities and configure the spatializer.
+ * - Enabling: when ISpatializer::setLevel() sets a level different from NONE the spatializer
+ * is considered enabled. The audio policy callback onCheckSpatializer() is called. This
+ * triggers a request to audio policy manager to open a spatialization output stream and a
+ * spatializer mixer is created in audio flinger. When an output is returned by audio policy
+ * manager, Spatializer::attachOutput() is called which creates and enables the spatializer
+ * stage engine on the specified output.
+ * - Disabling: when the spatialization level is set to NONE, the spatializer is considered
+ * disabled. The audio policy callback onCheckSpatializer() is called. This triggers a call
+ * to Spatializer::detachOutput() and the spatializer engine is released. Then a request is
+ * made to audio policy manager to release and close the spatializer output stream and the
+ * spatializer mixer thread is destroyed.
+ */
+class Spatializer : public media::BnSpatializer,
+ public IBinder::DeathRecipient,
+ private SpatializerPoseController::Listener {
+ public:
+ static sp<Spatializer> create(SpatializerPolicyCallback *callback);
+
+ ~Spatializer() override;
+
+ /** RefBase */
+ void onFirstRef();
+
+ /** ISpatializer, see ISpatializer.aidl */
+ binder::Status release() override;
+ binder::Status getSupportedLevels(std::vector<media::SpatializationLevel>* levels) override;
+ binder::Status setLevel(media::SpatializationLevel level) override;
+ binder::Status getLevel(media::SpatializationLevel *level) override;
+ binder::Status isHeadTrackingSupported(bool *supports);
+ binder::Status getSupportedHeadTrackingModes(
+ std::vector<media::SpatializerHeadTrackingMode>* modes) override;
+ binder::Status setDesiredHeadTrackingMode(
+ media::SpatializerHeadTrackingMode mode) override;
+ binder::Status getActualHeadTrackingMode(
+ media::SpatializerHeadTrackingMode* mode) override;
+ binder::Status recenterHeadTracker() override;
+ binder::Status setGlobalTransform(const std::vector<float>& screenToStage) override;
+ binder::Status setHeadSensor(int sensorHandle) override;
+ binder::Status setScreenSensor(int sensorHandle) override;
+ binder::Status setDisplayOrientation(float physicalToLogicalAngle) override;
+ binder::Status setHingeAngle(float hingeAngle) override;
+ binder::Status getSupportedModes(std::vector<media::SpatializationMode>* modes) override;
+ binder::Status registerHeadTrackingCallback(
+ const sp<media::ISpatializerHeadTrackingCallback>& callback) override;
+ binder::Status setParameter(int key, const std::vector<unsigned char>& value) override;
+ binder::Status getParameter(int key, std::vector<unsigned char> *value) override;
+ binder::Status getOutput(int *output);
+
+ /** IBinder::DeathRecipient. Listen to the death of the INativeSpatializerCallback. */
+ virtual void binderDied(const wp<IBinder>& who);
+
+ /** Registers a INativeSpatializerCallback when a client is attached to this Spatializer
+ * by audio policy service.
+ */
+ status_t registerCallback(const sp<media::INativeSpatializerCallback>& callback);
+
+ status_t loadEngineConfiguration(sp<EffectHalInterface> effect);
+
+ /** Level getter for use by local classes. */
+ media::SpatializationLevel getLevel() const { std::lock_guard lock(mLock); return mLevel; }
+
+ /** Called by audio policy service when the special output mixer dedicated to spatialization
+ * is opened and the spatializer engine must be created.
+ */
+ status_t attachOutput(audio_io_handle_t output);
+ /** Called by audio policy service when the special output mixer dedicated to spatialization
+ * is closed and the spatializer engine must be release.
+ */
+ audio_io_handle_t detachOutput();
+ /** Returns the output stream the spatializer is attached to. */
+ audio_io_handle_t getOutput() const { std::lock_guard lock(mLock); return mOutput; }
+
+ /** Gets the channel mask, sampling rate and format set for the spatializer input. */
+ audio_config_base_t getAudioInConfig() const;
+
+ void calculateHeadPose();
+
+private:
+ Spatializer(effect_descriptor_t engineDescriptor,
+ SpatializerPolicyCallback *callback);
+
+ static void engineCallback(int32_t event, void* user, void *info);
+
+ // From VirtualizerStageController::Listener
+ void onHeadToStagePose(const media::Pose3f& headToStage) override;
+ void onActualModeChange(media::HeadTrackingMode mode) override;
+
+ void onHeadToStagePoseMsg(const std::vector<float>& headToStage);
+ void onActualModeChangeMsg(media::HeadTrackingMode mode);
+
+ static constexpr int kMaxEffectParamValues = 10;
+ /**
+ * Get a parameter from spatializer engine by calling the effect HAL command method directly.
+ * To be used when the engine instance mEngine is not yet created in the effect framework.
+ * When MULTI_VALUES is false, the expected reply is only one value of type T.
+ * When MULTI_VALUES is true, the expected reply is made of a number (of type T) indicating
+ * how many values are returned, followed by this number for values of type T.
+ */
+ template<bool MULTI_VALUES, typename T>
+ status_t getHalParameter(sp<EffectHalInterface> effect, uint32_t type,
+ std::vector<T> *values) {
+ static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+ uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1];
+ uint32_t reply[sizeof(effect_param_t) / sizeof(uint32_t) + 2 + kMaxEffectParamValues];
+
+ effect_param_t *p = (effect_param_t *)cmd;
+ p->psize = sizeof(uint32_t);
+ if (MULTI_VALUES) {
+ p->vsize = (kMaxEffectParamValues + 1) * sizeof(T);
+ } else {
+ p->vsize = sizeof(T);
+ }
+ *(uint32_t *)p->data = type;
+ uint32_t replySize = sizeof(effect_param_t) + p->psize + p->vsize;
+
+ status_t status = effect->command(EFFECT_CMD_GET_PARAM,
+ sizeof(effect_param_t) + sizeof(uint32_t), cmd,
+ &replySize, reply);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ if (p->status != NO_ERROR) {
+ return p->status;
+ }
+ if (replySize <
+ sizeof(effect_param_t) + sizeof(uint32_t) + (MULTI_VALUES ? 2 : 1) * sizeof(T)) {
+ return BAD_VALUE;
+ }
+
+ T *params = (T *)((uint8_t *)reply + sizeof(effect_param_t) + sizeof(uint32_t));
+ int numParams = 1;
+ if (MULTI_VALUES) {
+ numParams = (int)*params++;
+ }
+ if (numParams > kMaxEffectParamValues) {
+ return BAD_VALUE;
+ }
+ (*values).clear();
+ std::copy(¶ms[0], ¶ms[numParams], back_inserter(*values));
+ return NO_ERROR;
+ }
+
+ /**
+ * Set a parameter to spatializer engine by calling setParameter on mEngine AudioEffect object.
+ * It is possible to pass more than one value of type T according to the parameter type
+ * according to values vector size.
+ */
+ template<typename T>
+ status_t setEffectParameter_l(uint32_t type, const std::vector<T>& values) REQUIRES(mLock) {
+ static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+ uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values.size()];
+ effect_param_t *p = (effect_param_t *)cmd;
+ p->psize = sizeof(uint32_t);
+ p->vsize = sizeof(T) * values.size();
+ *(uint32_t *)p->data = type;
+ memcpy((uint32_t *)p->data + 1, values.data(), sizeof(T) * values.size());
+
+ status_t status = mEngine->setParameter(p);
+ if (status != NO_ERROR) {
+ return status;
+ }
+ if (p->status != NO_ERROR) {
+ return p->status;
+ }
+ return NO_ERROR;
+ }
+
+ /**
+ * Get a parameter from spatializer engine by calling getParameter on AudioEffect object.
+ * It is possible to read more than one value of type T according to the parameter type
+ * by specifying values vector size.
+ */
+ template<typename T>
+ status_t getEffectParameter_l(uint32_t type, std::vector<T> *values) REQUIRES(mLock) {
+ static_assert(sizeof(T) <= sizeof(uint32_t), "The size of T must less than 32 bits");
+
+ uint32_t cmd[sizeof(effect_param_t) / sizeof(uint32_t) + 1 + values->size()];
+ effect_param_t *p = (effect_param_t *)cmd;
+ p->psize = sizeof(uint32_t);
+ p->vsize = sizeof(T) * values->size();
+ *(uint32_t *)p->data = type;
+
+ status_t status = mEngine->getParameter(p);
+
+ if (status != NO_ERROR) {
+ return status;
+ }
+ if (p->status != NO_ERROR) {
+ return p->status;
+ }
+
+ int numValues = std::min(p->vsize / sizeof(T), values->size());
+ (*values).clear();
+ T *retValues = (T *)((uint8_t *)p->data + sizeof(uint32_t));
+ std::copy(&retValues[0], &retValues[numValues], back_inserter(*values));
+
+ return NO_ERROR;
+ }
+
+ void postFramesProcessedMsg(int frames);
+
+ /** Effect engine descriptor */
+ const effect_descriptor_t mEngineDescriptor;
+ /** Callback interface to parent audio policy service */
+ SpatializerPolicyCallback* mPolicyCallback;
+
+ /** Mutex protecting internal state */
+ mutable std::mutex mLock;
+
+ /** Client AudioEffect for the engine */
+ sp<AudioEffect> mEngine GUARDED_BY(mLock);
+ /** Output stream the spatializer mixer thread is attached to */
+ audio_io_handle_t mOutput GUARDED_BY(mLock) = AUDIO_IO_HANDLE_NONE;
+
+ /** Callback interface to the client (AudioService) controlling this`Spatializer */
+ sp<media::INativeSpatializerCallback> mSpatializerCallback GUARDED_BY(mLock);
+
+ /** Callback interface for head tracking */
+ sp<media::ISpatializerHeadTrackingCallback> mHeadTrackingCallback GUARDED_BY(mLock);
+
+ /** Requested spatialization level */
+ media::SpatializationLevel mLevel GUARDED_BY(mLock) = media::SpatializationLevel::NONE;
+
+ /** Control logic for head-tracking, etc. */
+ std::shared_ptr<SpatializerPoseController> mPoseController GUARDED_BY(mLock);
+
+ /** Last requested head tracking mode */
+ media::HeadTrackingMode mDesiredHeadTrackingMode GUARDED_BY(mLock)
+ = media::HeadTrackingMode::STATIC;
+
+ /** Last-reported actual head-tracking mode. */
+ media::SpatializerHeadTrackingMode mActualHeadTrackingMode GUARDED_BY(mLock)
+ = media::SpatializerHeadTrackingMode::DISABLED;
+
+ /** Selected Head pose sensor */
+ int32_t mHeadSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+
+ /** Selected Screen pose sensor */
+ int32_t mScreenSensor GUARDED_BY(mLock) = SpatializerPoseController::INVALID_SENSOR;
+
+ /** Last display orientation received */
+ static constexpr float kDisplayOrientationInvalid = 1000;
+ float mDisplayOrientation GUARDED_BY(mLock) = kDisplayOrientationInvalid;
+
+ std::vector<media::SpatializationLevel> mLevels;
+ std::vector<media::SpatializationMode> mSpatializationModes;
+ std::vector<audio_channel_mask_t> mChannelMasks;
+ bool mSupportsHeadTracking;
+
+ // Looper thread for mEngine callbacks
+ class EngineCallbackHandler;
+
+ sp<ALooper> mLooper;
+ sp<EngineCallbackHandler> mHandler;
+
+ static const std::vector<const char *> sHeadPoseKeys;
+};
+
+
+}; // namespace android
+
+#endif // ANDROID_MEDIA_SPATIALIZER_H
diff --git a/services/audiopolicy/service/SpatializerPoseController.cpp b/services/audiopolicy/service/SpatializerPoseController.cpp
new file mode 100644
index 0000000..ffedf63
--- /dev/null
+++ b/services/audiopolicy/service/SpatializerPoseController.cpp
@@ -0,0 +1,243 @@
+/*
+ * Copyright 2021, The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include "SpatializerPoseController.h"
+
+#define LOG_TAG "SpatializerPoseController"
+//#define LOG_NDEBUG 0
+#include <sensor/Sensor.h>
+#include <utils/Log.h>
+#include <utils/SystemClock.h>
+
+namespace android {
+
+using media::createHeadTrackingProcessor;
+using media::HeadTrackingMode;
+using media::HeadTrackingProcessor;
+using media::Pose3f;
+using media::SensorPoseProvider;
+using media::Twist3f;
+
+using namespace std::chrono_literals;
+
+namespace {
+
+// This is how fast, in m/s, we allow position to shift during rate-limiting.
+constexpr auto kMaxTranslationalVelocity = 2;
+
+// This is how fast, in rad/s, we allow rotation angle to shift during rate-limiting.
+constexpr auto kMaxRotationalVelocity = 4 * M_PI;
+
+// This should be set to the typical time scale that the translation sensors used drift in. This
+// means, loosely, for how long we can trust the reading to be "accurate enough". This would
+// determine the time constants used for high-pass filtering those readings. If the value is set
+// too high, we may experience drift. If it is set too low, we may experience poses tending toward
+// identity too fast.
+constexpr auto kTranslationalDriftTimeConstant = 20s;
+
+// This should be set to the typical time scale that the rotation sensors used drift in. This
+// means, loosely, for how long we can trust the reading to be "accurate enough". This would
+// determine the time constants used for high-pass filtering those readings. If the value is set
+// too high, we may experience drift. If it is set too low, we may experience poses tending toward
+// identity too fast.
+constexpr auto kRotationalDriftTimeConstant = 20s;
+
+// This is how far into the future we predict the head pose, using linear extrapolation based on
+// twist (velocity). It should be set to a value that matches the characteristic durations of moving
+// one's head. The higher we set this, the more latency we are able to reduce, but setting this too
+// high will result in high prediction errors whenever the head accelerates (changes velocity).
+constexpr auto kPredictionDuration = 10ms;
+
+// After losing this many consecutive samples from either sensor, we would treat the measurement as
+// stale;
+constexpr auto kMaxLostSamples = 4;
+
+// Time units for system clock ticks. This is what the Sensor Framework timestamps represent and
+// what we use for pose filtering.
+using Ticks = std::chrono::nanoseconds;
+
+// How many ticks in a second.
+constexpr auto kTicksPerSecond = Ticks::period::den;
+
+} // namespace
+
+SpatializerPoseController::SpatializerPoseController(Listener* listener,
+ std::chrono::microseconds sensorPeriod,
+ std::chrono::microseconds maxUpdatePeriod)
+ : mListener(listener),
+ mSensorPeriod(sensorPeriod),
+ mProcessor(createHeadTrackingProcessor(HeadTrackingProcessor::Options{
+ .maxTranslationalVelocity = kMaxTranslationalVelocity / kTicksPerSecond,
+ .maxRotationalVelocity = kMaxRotationalVelocity / kTicksPerSecond,
+ .translationalDriftTimeConstant = Ticks(kTranslationalDriftTimeConstant).count(),
+ .rotationalDriftTimeConstant = Ticks(kRotationalDriftTimeConstant).count(),
+ .freshnessTimeout = Ticks(sensorPeriod * kMaxLostSamples).count(),
+ .predictionDuration = Ticks(kPredictionDuration).count(),
+ })),
+ mPoseProvider(SensorPoseProvider::create("headtracker", this)),
+ mThread([this, maxUpdatePeriod] {
+ while (true) {
+ Pose3f headToStage;
+ std::optional<HeadTrackingMode> modeIfChanged;
+ {
+ std::unique_lock lock(mMutex);
+ mCondVar.wait_for(lock, maxUpdatePeriod,
+ [this] { return mShouldExit || mShouldCalculate; });
+ if (mShouldExit) {
+ ALOGV("Exiting thread");
+ return;
+ }
+
+ // Calculate.
+ std::tie(headToStage, modeIfChanged) = calculate_l();
+ }
+
+ // Invoke the callbacks outside the lock.
+ mListener->onHeadToStagePose(headToStage);
+ if (modeIfChanged) {
+ mListener->onActualModeChange(modeIfChanged.value());
+ }
+
+ {
+ std::lock_guard lock(mMutex);
+ if (!mCalculated) {
+ mCalculated = true;
+ mCondVar.notify_all();
+ }
+ mShouldCalculate = false;
+ }
+ }
+ }) {}
+
+SpatializerPoseController::~SpatializerPoseController() {
+ {
+ std::unique_lock lock(mMutex);
+ mShouldExit = true;
+ mCondVar.notify_all();
+ }
+ mThread.join();
+}
+
+void SpatializerPoseController::setHeadSensor(int32_t sensor) {
+ std::lock_guard lock(mMutex);
+ // Stop current sensor, if valid and different from the other sensor.
+ if (mHeadSensor != INVALID_SENSOR && mHeadSensor != mScreenSensor) {
+ mPoseProvider->stopSensor(mHeadSensor);
+ }
+
+ if (sensor != INVALID_SENSOR) {
+ if (sensor != mScreenSensor) {
+ // Start new sensor.
+ mHeadSensor =
+ mPoseProvider->startSensor(sensor, mSensorPeriod) ? sensor : INVALID_SENSOR;
+ } else {
+ // Sensor is already enabled.
+ mHeadSensor = mScreenSensor;
+ }
+ } else {
+ mHeadSensor = INVALID_SENSOR;
+ }
+
+ mProcessor->recenter(true, false);
+}
+
+void SpatializerPoseController::setScreenSensor(int32_t sensor) {
+ std::lock_guard lock(mMutex);
+ // Stop current sensor, if valid and different from the other sensor.
+ if (mScreenSensor != INVALID_SENSOR && mScreenSensor != mHeadSensor) {
+ mPoseProvider->stopSensor(mScreenSensor);
+ }
+
+ if (sensor != INVALID_SENSOR) {
+ if (sensor != mHeadSensor) {
+ // Start new sensor.
+ mScreenSensor =
+ mPoseProvider->startSensor(sensor, mSensorPeriod) ? sensor : INVALID_SENSOR;
+ } else {
+ // Sensor is already enabled.
+ mScreenSensor = mHeadSensor;
+ }
+ } else {
+ mScreenSensor = INVALID_SENSOR;
+ }
+
+ mProcessor->recenter(false, true);
+}
+
+void SpatializerPoseController::setDesiredMode(HeadTrackingMode mode) {
+ std::lock_guard lock(mMutex);
+ mProcessor->setDesiredMode(mode);
+}
+
+void SpatializerPoseController::setScreenToStagePose(const Pose3f& screenToStage) {
+ std::lock_guard lock(mMutex);
+ mProcessor->setScreenToStagePose(screenToStage);
+}
+
+void SpatializerPoseController::setDisplayOrientation(float physicalToLogicalAngle) {
+ std::lock_guard lock(mMutex);
+ mProcessor->setDisplayOrientation(physicalToLogicalAngle);
+}
+
+void SpatializerPoseController::calculateAsync() {
+ std::lock_guard lock(mMutex);
+ mShouldCalculate = true;
+ mCondVar.notify_all();
+}
+
+void SpatializerPoseController::waitUntilCalculated() {
+ std::unique_lock lock(mMutex);
+ mCondVar.wait(lock, [this] { return mCalculated; });
+}
+
+std::tuple<media::Pose3f, std::optional<media::HeadTrackingMode>>
+SpatializerPoseController::calculate_l() {
+ Pose3f headToStage;
+ HeadTrackingMode mode;
+ std::optional<media::HeadTrackingMode> modeIfChanged;
+
+ mProcessor->calculate(elapsedRealtimeNano());
+ headToStage = mProcessor->getHeadToStagePose();
+ mode = mProcessor->getActualMode();
+ if (!mActualMode.has_value() || mActualMode.value() != mode) {
+ mActualMode = mode;
+ modeIfChanged = mode;
+ }
+ return std::make_tuple(headToStage, modeIfChanged);
+}
+
+void SpatializerPoseController::recenter() {
+ std::lock_guard lock(mMutex);
+ mProcessor->recenter();
+}
+
+void SpatializerPoseController::onPose(int64_t timestamp, int32_t sensor, const Pose3f& pose,
+ const std::optional<Twist3f>& twist, bool isNewReference) {
+ std::lock_guard lock(mMutex);
+ if (sensor == mHeadSensor) {
+ mProcessor->setWorldToHeadPose(timestamp, pose, twist.value_or(Twist3f()));
+ if (isNewReference) {
+ mProcessor->recenter(true, false);
+ }
+ }
+ if (sensor == mScreenSensor) {
+ mProcessor->setWorldToScreenPose(timestamp, pose);
+ if (isNewReference) {
+ mProcessor->recenter(false, true);
+ }
+ }
+}
+
+} // namespace android
diff --git a/services/audiopolicy/service/SpatializerPoseController.h b/services/audiopolicy/service/SpatializerPoseController.h
new file mode 100644
index 0000000..2b5c189
--- /dev/null
+++ b/services/audiopolicy/service/SpatializerPoseController.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#pragma once
+
+#include <chrono>
+#include <condition_variable>
+#include <limits>
+#include <memory>
+#include <mutex>
+#include <thread>
+
+#include <media/HeadTrackingProcessor.h>
+#include <media/SensorPoseProvider.h>
+
+namespace android {
+
+/**
+ * This class encapsulates the logic for pose processing, intended for driving a spatializer effect.
+ * This includes integration with the Sensor sub-system for retrieving sensor data, doing all the
+ * necessary processing, etc.
+ *
+ * Calculations happen on a dedicated thread and published to the client via the Listener interface.
+ * A calculation may be triggered in one of two ways:
+ * - By calling calculateAsync() - calculation will be kicked off in the background.
+ * - By setting a timeout in the ctor, a calculation will be triggered after the timeout elapsed
+ * from the last calculateAsync() call.
+ *
+ * This class is thread-safe.
+ */
+class SpatializerPoseController : private media::SensorPoseProvider::Listener {
+ public:
+ static constexpr int32_t INVALID_SENSOR = media::SensorPoseProvider::INVALID_HANDLE;
+
+ /**
+ * Listener interface for getting pose and mode updates.
+ * Methods will always be invoked from a designated thread.
+ */
+ class Listener {
+ public:
+ virtual ~Listener() = default;
+
+ virtual void onHeadToStagePose(const media::Pose3f&) = 0;
+ virtual void onActualModeChange(media::HeadTrackingMode) = 0;
+ };
+
+ /**
+ * Ctor.
+ * sensorPeriod determines how often to receive updates from the sensors (input rate).
+ * maxUpdatePeriod determines how often to produce an output when calculateAsync() isn't
+ * invoked.
+ */
+ SpatializerPoseController(Listener* listener, std::chrono::microseconds sensorPeriod,
+ std::chrono::microseconds maxUpdatePeriod);
+
+ /** Dtor. */
+ ~SpatializerPoseController();
+
+ /**
+ * Set the sensor that is to be used for head-tracking.
+ * INVALID_SENSOR can be used to disable head-tracking.
+ */
+ void setHeadSensor(int32_t sensor);
+
+ /**
+ * Set the sensor that is to be used for screen-tracking.
+ * INVALID_SENSOR can be used to disable screen-tracking.
+ */
+ void setScreenSensor(int32_t sensor);
+
+ /** Sets the desired head-tracking mode. */
+ void setDesiredMode(media::HeadTrackingMode mode);
+
+ /**
+ * Set the screen-to-stage pose, used in all modes.
+ */
+ void setScreenToStagePose(const media::Pose3f& screenToStage);
+
+ /**
+ * Sets the display orientation.
+ * Orientation is expressed in the angle of rotation from the physical "up" side of the screen
+ * to the logical "up" side of the content displayed the screen. Counterclockwise angles, as
+ * viewed while facing the screen are positive.
+ */
+ void setDisplayOrientation(float physicalToLogicalAngle);
+
+ /**
+ * This causes the current poses for both the head and screen to be considered "center".
+ */
+ void recenter();
+
+ /**
+ * This call triggers the recalculation of the output and the invocation of the relevant
+ * callbacks. This call is async and the callbacks will be triggered shortly after.
+ */
+ void calculateAsync();
+
+ /**
+ * Blocks until calculation and invocation of the respective callbacks has happened at least
+ * once. Do not call from within callbacks.
+ */
+ void waitUntilCalculated();
+
+ private:
+ mutable std::mutex mMutex;
+ Listener* const mListener;
+ const std::chrono::microseconds mSensorPeriod;
+ // Order matters for the following two members to ensure correct destruction.
+ std::unique_ptr<media::HeadTrackingProcessor> mProcessor;
+ std::unique_ptr<media::SensorPoseProvider> mPoseProvider;
+ int32_t mHeadSensor = media::SensorPoseProvider::INVALID_HANDLE;
+ int32_t mScreenSensor = media::SensorPoseProvider::INVALID_HANDLE;
+ std::optional<media::HeadTrackingMode> mActualMode;
+ std::thread mThread;
+ std::condition_variable mCondVar;
+ bool mShouldCalculate = true;
+ bool mShouldExit = false;
+ bool mCalculated = false;
+
+ void onPose(int64_t timestamp, int32_t sensor, const media::Pose3f& pose,
+ const std::optional<media::Twist3f>& twist, bool isNewReference) override;
+
+ /**
+ * Calculates the new outputs and updates internal state. Must be called with the lock held.
+ * Returns values that should be passed to the respective callbacks.
+ */
+ std::tuple<media::Pose3f, std::optional<media::HeadTrackingMode>> calculate_l();
+};
+
+} // namespace android
diff --git a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
index f7b0565..adef8f1 100644
--- a/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyManagerTestClient.h
@@ -37,7 +37,8 @@
status_t openOutput(audio_module_handle_t module,
audio_io_handle_t *output,
- audio_config_t * /*config*/,
+ audio_config_t * /*halConfig*/,
+ audio_config_base_t * /*mixerConfig*/,
const sp<DeviceDescriptorBase>& /*device*/,
uint32_t * /*latencyMs*/,
audio_output_flags_t /*flags*/) override {
@@ -102,6 +103,11 @@
++mAudioPortListUpdateCount;
}
+ status_t setDeviceConnectedState(
+ const struct audio_port_v7 *port __unused, bool connected __unused) override {
+ return NO_ERROR;
+ }
+
// Helper methods for tests
size_t getActivePatchesCount() const { return mActivePatches.size(); }
diff --git a/services/audiopolicy/tests/AudioPolicyTestClient.h b/services/audiopolicy/tests/AudioPolicyTestClient.h
index 1384864..da85658 100644
--- a/services/audiopolicy/tests/AudioPolicyTestClient.h
+++ b/services/audiopolicy/tests/AudioPolicyTestClient.h
@@ -30,7 +30,8 @@
}
status_t openOutput(audio_module_handle_t /*module*/,
audio_io_handle_t* /*output*/,
- audio_config_t* /*config*/,
+ audio_config_t* /*halConfig*/,
+ audio_config_base_t* /*mixerConfig*/,
const sp<DeviceDescriptorBase>& /*device*/,
uint32_t* /*latencyMs*/,
audio_output_flags_t /*flags*/) override { return NO_INIT; }
@@ -95,6 +96,10 @@
const TrackSecondaryOutputsMap& trackSecondaryOutputs __unused) override {
return NO_INIT;
}
+ status_t setDeviceConnectedState(
+ const struct audio_port_v7 *port __unused, bool connected __unused) override {
+ return NO_INIT;
+ }
};
} // namespace android
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index 0b6b1fb..229964c 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -1843,9 +1843,11 @@
// Set rotate-and-crop override behavior
if (mOverrideRotateAndCropMode != ANDROID_SCALER_ROTATE_AND_CROP_AUTO) {
client->setRotateAndCropOverride(mOverrideRotateAndCropMode);
- } else if (CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(clientPackageName,
- orientation, facing)) {
- client->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_90);
+ } else if (effectiveApiLevel == API_2) {
+
+ client->setRotateAndCropOverride(
+ CameraServiceProxyWrapper::getRotateAndCropOverride(
+ clientPackageName, facing, multiuser_get_user_id(clientUid)));
}
// Set camera muting behavior
@@ -2192,7 +2194,6 @@
newDeviceState |= vendorBits;
ALOGV("%s: New device state 0x%" PRIx64, __FUNCTION__, newDeviceState);
- Mutex::Autolock l(mServiceLock);
mCameraProviderManager->notifyDeviceStateChange(newDeviceState);
return Status::ok();
@@ -2226,14 +2227,12 @@
for (auto& current : clients) {
if (current != nullptr) {
const auto basicClient = current->getValue();
- if (basicClient.get() != nullptr) {
- if (CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(
- basicClient->getPackageName(), basicClient->getCameraOrientation(),
- basicClient->getCameraFacing())) {
- basicClient->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_90);
- } else {
- basicClient->setRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE);
- }
+ if (basicClient.get() != nullptr && basicClient->canCastToApiClient(API_2)) {
+ basicClient->setRotateAndCropOverride(
+ CameraServiceProxyWrapper::getRotateAndCropOverride(
+ basicClient->getPackageName(),
+ basicClient->getCameraFacing(),
+ multiuser_get_user_id(basicClient->getClientUid())));
}
}
}
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index 944b8ab..8c72bd7 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -62,7 +62,7 @@
bool overrideForPerfClass):
Camera2ClientBase(cameraService, cameraClient, clientPackageName, clientFeatureId,
cameraDeviceId, api1CameraId, cameraFacing, sensorOrientation,
- clientPid, clientUid, servicePid, overrideForPerfClass),
+ clientPid, clientUid, servicePid, overrideForPerfClass, /*legacyClient*/ true),
mParameters(api1CameraId, cameraFacing)
{
ATRACE_CALL();
diff --git a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
index eed2654..a38d7ae 100755
--- a/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
+++ b/services/camera/libcameraservice/api1/client2/JpegProcessor.cpp
@@ -78,7 +78,8 @@
}
// Find out buffer size for JPEG
- ssize_t maxJpegSize = device->getJpegBufferSize(params.pictureWidth, params.pictureHeight);
+ ssize_t maxJpegSize = device->getJpegBufferSize(device->infoPhysical(String8("")),
+ params.pictureWidth, params.pictureHeight);
if (maxJpegSize <= 0) {
ALOGE("%s: Camera %d: Jpeg buffer size (%zu) is invalid ",
__FUNCTION__, mId, maxJpegSize);
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 1f3d478..971628a 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -379,6 +379,12 @@
}
String8 physicalId(it.id.c_str());
+ bool hasTestPatternModePhysicalKey = std::find(mSupportedPhysicalRequestKeys.begin(),
+ mSupportedPhysicalRequestKeys.end(), ANDROID_SENSOR_TEST_PATTERN_MODE) !=
+ mSupportedPhysicalRequestKeys.end();
+ bool hasTestPatternDataPhysicalKey = std::find(mSupportedPhysicalRequestKeys.begin(),
+ mSupportedPhysicalRequestKeys.end(), ANDROID_SENSOR_TEST_PATTERN_DATA) !=
+ mSupportedPhysicalRequestKeys.end();
if (physicalId != mDevice->getId()) {
auto found = std::find(requestedPhysicalIds.begin(), requestedPhysicalIds.end(),
it.id);
@@ -404,7 +410,8 @@
}
}
- physicalSettingsList.push_back({it.id, filteredParams});
+ physicalSettingsList.push_back({it.id, filteredParams,
+ hasTestPatternModePhysicalKey, hasTestPatternDataPhysicalKey});
}
} else {
physicalSettingsList.push_back({it.id, it.settings});
@@ -1689,7 +1696,7 @@
bool isCompositeStream = false;
for (const auto& gbp : mConfiguredOutputs[streamId].getGraphicBufferProducers()) {
sp<Surface> s = new Surface(gbp, false /*controlledByApp*/);
- isCompositeStream = camera3::DepthCompositeStream::isDepthCompositeStream(s) |
+ isCompositeStream = camera3::DepthCompositeStream::isDepthCompositeStream(s) ||
camera3::HeicCompositeStream::isHeicCompositeStream(s);
if (isCompositeStream) {
auto compositeIdx = mCompositeStreamMap.indexOfKey(IInterface::asBinder(gbp));
diff --git a/services/camera/libcameraservice/api2/CompositeStream.h b/services/camera/libcameraservice/api2/CompositeStream.h
index 600bd28..d32b71c 100644
--- a/services/camera/libcameraservice/api2/CompositeStream.h
+++ b/services/camera/libcameraservice/api2/CompositeStream.h
@@ -117,6 +117,41 @@
// Composite streams should behave accordingly.
void enableErrorState();
+ // Utility class to lock and unlock a GraphicBuffer
+ class GraphicBufferLocker {
+ public:
+ GraphicBufferLocker(sp<GraphicBuffer> buffer) : _buffer(buffer) {}
+
+ status_t lockAsync(void** dstBuffer, int fenceFd) {
+ if (_buffer == nullptr) return BAD_VALUE;
+
+ status_t res = OK;
+ if (!_locked) {
+ status_t res = _buffer->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN,
+ dstBuffer, fenceFd);
+ if (res == OK) {
+ _locked = true;
+ }
+ }
+ return res;
+ }
+
+ ~GraphicBufferLocker() {
+ if (_locked && _buffer != nullptr) {
+ auto res = _buffer->unlock();
+ if (res != OK) {
+ ALOGE("%s: Error trying to unlock buffer: %s (%d)", __FUNCTION__,
+ strerror(-res), res);
+ }
+ }
+ }
+
+ private:
+ sp<GraphicBuffer> _buffer;
+ bool _locked = false;
+ };
+
+
wp<CameraDeviceBase> mDevice;
wp<camera3::StatusTracker> mStatusTracker;
wp<hardware::camera2::ICameraDeviceCallbacks> mRemoteCallback;
diff --git a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
index a66a592..aa057c7 100644
--- a/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/DepthCompositeStream.cpp
@@ -297,7 +297,8 @@
}
sp<GraphicBuffer> gb = GraphicBuffer::from(anb);
- res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, fenceFd);
+ GraphicBufferLocker gbLocker(gb);
+ res = gbLocker.lockAsync(&dstBuffer, fenceFd);
if (res != OK) {
ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
strerror(-res), res);
diff --git a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
index a73ffb9..6058429 100644
--- a/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
+++ b/services/camera/libcameraservice/api2/HeicCompositeStream.cpp
@@ -1130,7 +1130,8 @@
// Copy the content of the file to memory.
sp<GraphicBuffer> gb = GraphicBuffer::from(inputFrame.anb);
void* dstBuffer;
- auto res = gb->lockAsync(GRALLOC_USAGE_SW_WRITE_OFTEN, &dstBuffer, inputFrame.fenceFd);
+ GraphicBufferLocker gbLocker(gb);
+ auto res = gbLocker.lockAsync(&dstBuffer, inputFrame.fenceFd);
if (res != OK) {
ALOGE("%s: Error trying to lock output buffer fence: %s (%d)", __FUNCTION__,
strerror(-res), res);
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 13d044a..5e086c0 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -55,13 +55,14 @@
int clientPid,
uid_t clientUid,
int servicePid,
- bool overrideForPerfClass):
+ bool overrideForPerfClass,
+ bool legacyClient):
TClientBase(cameraService, remoteCallback, clientPackageName, clientFeatureId,
cameraId, api1CameraId, cameraFacing, sensorOrientation, clientPid, clientUid,
servicePid),
mSharedCameraCallbacks(remoteCallback),
mDeviceVersion(cameraService->getDeviceVersion(TClientBase::mCameraIdStr)),
- mDevice(new Camera3Device(cameraId, overrideForPerfClass)),
+ mDevice(new Camera3Device(cameraId, overrideForPerfClass, legacyClient)),
mDeviceActive(false), mApi1CameraId(api1CameraId)
{
ALOGI("Camera %s: Opened. Client: %s (PID %d, UID %d)", cameraId.string(),
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index 6246f7b..c49ea2c 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -56,7 +56,8 @@
int clientPid,
uid_t clientUid,
int servicePid,
- bool overrideForPerfClass);
+ bool overrideForPerfClass,
+ bool legacyClient = false);
virtual ~Camera2ClientBase();
virtual status_t initialize(sp<CameraProviderManager> manager, const String8& monitorTags);
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 85b0cc2..b42f3f6 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -99,13 +99,24 @@
virtual status_t dump(int fd, const Vector<String16> &args) = 0;
/**
- * The physical camera device's static characteristics metadata buffer
+ * The physical camera device's static characteristics metadata buffer, or
+ * the logical camera's static characteristics if physical id is empty.
*/
virtual const CameraMetadata& infoPhysical(const String8& physicalId) const = 0;
struct PhysicalCameraSettings {
std::string cameraId;
CameraMetadata metadata;
+
+ // Whether the physical camera supports testPatternMode/testPatternData
+ bool mHasTestPatternModeTag = true;
+ bool mHasTestPatternDataTag = true;
+
+ // Original value of TEST_PATTERN_MODE and DATA so that they can be
+ // restored when sensor muting is turned off
+ int32_t mOriginalTestPatternMode = 0;
+ int32_t mOriginalTestPatternData[4] = {};
+
};
typedef List<PhysicalCameraSettings> PhysicalCameraSettingsList;
@@ -294,7 +305,8 @@
* Get Jpeg buffer size for a given jpeg resolution.
* Negative values are error codes.
*/
- virtual ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const = 0;
+ virtual ssize_t getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+ uint32_t height) const = 0;
/**
* Connect HAL notifications to a listener. Overwrites previous
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 0e0f15f..0cce2ca 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -359,7 +359,13 @@
for (auto& provider : mProviders) {
ALOGV("%s: Notifying %s for new state 0x%" PRIx64,
__FUNCTION__, provider->mProviderName.c_str(), newState);
+ // b/199240726 Camera providers can for example try to add/remove
+ // camera devices as part of the state change notification. Holding
+ // 'mInterfaceMutex' while calling 'notifyDeviceStateChange' can
+ // result in a recursive deadlock.
+ mInterfaceMutex.unlock();
status_t singleRes = provider->notifyDeviceStateChange(mDeviceState);
+ mInterfaceMutex.lock();
if (singleRes != OK) {
ALOGE("%s: Unable to notify provider %s about device state change",
__FUNCTION__,
@@ -367,6 +373,7 @@
res = singleRes;
// continue to do the rest of the providers instead of returning now
}
+ provider->notifyDeviceInfoStateChangeLocked(mDeviceState);
}
return res;
}
@@ -1185,10 +1192,12 @@
}
bool CameraProviderManager::isHiddenPhysicalCamera(const std::string& cameraId) const {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
return isHiddenPhysicalCameraInternal(cameraId).first;
}
status_t CameraProviderManager::filterSmallJpegSizes(const std::string& cameraId) {
+ std::lock_guard<std::mutex> lock(mInterfaceMutex);
for (auto& provider : mProviders) {
for (auto& deviceInfo : provider->mDevices) {
if (deviceInfo->mId == cameraId) {
@@ -1629,6 +1638,7 @@
return BAD_VALUE;
}
if (deviceInfo == nullptr) return BAD_VALUE;
+ deviceInfo->notifyDeviceStateChange(mDeviceState);
deviceInfo->mStatus = initialStatus;
bool isAPI1Compatible = deviceInfo->isAPI1Compatible();
@@ -2042,6 +2052,14 @@
return OK;
}
+void CameraProviderManager::ProviderInfo::notifyDeviceInfoStateChangeLocked(
+ hardware::hidl_bitfield<provider::V2_5::DeviceState> newDeviceState) {
+ std::lock_guard<std::mutex> lock(mLock);
+ for (auto it = mDevices.begin(); it != mDevices.end(); it++) {
+ (*it)->notifyDeviceStateChange(newDeviceState);
+ }
+}
+
status_t CameraProviderManager::ProviderInfo::notifyDeviceStateChange(
hardware::hidl_bitfield<provider::V2_5::DeviceState> newDeviceState) {
mDeviceState = newDeviceState;
@@ -2296,6 +2314,18 @@
return;
}
+ if (mCameraCharacteristics.exists(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS)) {
+ const auto &stateMap = mCameraCharacteristics.find(ANDROID_INFO_DEVICE_STATE_ORIENTATIONS);
+ if ((stateMap.count > 0) && ((stateMap.count % 2) == 0)) {
+ for (size_t i = 0; i < stateMap.count; i += 2) {
+ mDeviceStateOrientationMap.emplace(stateMap.data.i64[i], stateMap.data.i64[i+1]);
+ }
+ } else {
+ ALOGW("%s: Invalid ANDROID_INFO_DEVICE_STATE_ORIENTATIONS map size: %zu", __FUNCTION__,
+ stateMap.count);
+ }
+ }
+
mSystemCameraKind = getSystemCameraKind();
status_t res = fixupMonochromeTags();
@@ -2424,6 +2454,16 @@
CameraProviderManager::ProviderInfo::DeviceInfo3::~DeviceInfo3() {}
+void CameraProviderManager::ProviderInfo::DeviceInfo3::notifyDeviceStateChange(
+ hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState> newState) {
+
+ if (!mDeviceStateOrientationMap.empty() &&
+ (mDeviceStateOrientationMap.find(newState) != mDeviceStateOrientationMap.end())) {
+ mCameraCharacteristics.update(ANDROID_SENSOR_ORIENTATION,
+ &mDeviceStateOrientationMap[newState], 1);
+ }
+}
+
status_t CameraProviderManager::ProviderInfo::DeviceInfo3::setTorchMode(bool enabled) {
return setTorchModeForDevice<InterfaceT>(enabled);
}
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index baf20c9..fdb2673 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -40,7 +40,6 @@
#include <camera/VendorTagDescriptor.h>
namespace android {
-
/**
* The vendor tag descriptor class that takes HIDL vendor tag information as
* input. Not part of VendorTagDescriptor class because that class is used
@@ -287,12 +286,6 @@
sp<hardware::camera::device::V3_2::ICameraDeviceSession> *session);
/**
- * Save the ICameraProvider while it is being used by a camera or torch client
- */
- void saveRef(DeviceMode usageType, const std::string &cameraId,
- sp<hardware::camera::provider::V2_4::ICameraProvider> provider);
-
- /**
* Notify that the camera or torch is no longer being used by a camera client
*/
void removeRef(DeviceMode usageType, const std::string &cameraId);
@@ -435,6 +428,10 @@
/**
* Notify provider about top-level device physical state changes
+ *
+ * Note that 'mInterfaceMutex' should not be held when calling this method.
+ * It is possible for camera providers to add/remove devices and try to
+ * acquire it.
*/
status_t notifyDeviceStateChange(
hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
@@ -443,6 +440,15 @@
std::vector<std::unordered_set<std::string>> getConcurrentCameraIdCombinations();
/**
+ * Notify 'DeviceInfo' instanced about top-level device physical state changes
+ *
+ * Note that 'mInterfaceMutex' should be held when calling this method.
+ */
+ void notifyDeviceInfoStateChangeLocked(
+ hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+ newDeviceState);
+
+ /**
* Query the camera provider for concurrent stream configuration support
*/
status_t isConcurrentSessionConfigurationSupported(
@@ -494,6 +500,9 @@
return INVALID_OPERATION;
}
virtual status_t filterSmallJpegSizes() = 0;
+ virtual void notifyDeviceStateChange(
+ hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+ /*newState*/) {}
template<class InterfaceT>
sp<InterfaceT> startDeviceInterface();
@@ -554,6 +563,9 @@
bool *status /*out*/)
override;
virtual status_t filterSmallJpegSizes() override;
+ virtual void notifyDeviceStateChange(
+ hardware::hidl_bitfield<hardware::camera::provider::V2_5::DeviceState>
+ newState) override;
DeviceInfo3(const std::string& name, const metadata_vendor_id_t tagId,
const std::string &id, uint16_t minorVersion,
@@ -563,6 +575,8 @@
virtual ~DeviceInfo3();
private:
CameraMetadata mCameraCharacteristics;
+ // Map device states to sensor orientations
+ std::unordered_map<int64_t, int32_t> mDeviceStateOrientationMap;
// A copy of mCameraCharacteristics without performance class
// override
std::unique_ptr<CameraMetadata> mCameraCharNoPCOverride;
@@ -663,6 +677,12 @@
sp<hardware::camera::provider::V2_6::ICameraProvider> &interface2_6);
};
+ /**
+ * Save the ICameraProvider while it is being used by a camera or torch client
+ */
+ void saveRef(DeviceMode usageType, const std::string &cameraId,
+ sp<hardware::camera::provider::V2_4::ICameraProvider> provider);
+
// Utility to find a DeviceInfo by ID; pointer is only valid while mInterfaceMutex is held
// and the calling code doesn't mutate the list of providers or their lists of devices.
// Finds the first device of the given ID that falls within the requested version range
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index 87c1c75..de418da 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -75,8 +75,9 @@
namespace android {
-Camera3Device::Camera3Device(const String8 &id, bool overrideForPerfClass):
+Camera3Device::Camera3Device(const String8 &id, bool overrideForPerfClass, bool legacyClient):
mId(id),
+ mLegacyClient(legacyClient),
mOperatingMode(NO_MODE),
mIsConstrainedHighSpeedConfiguration(false),
mStatus(STATUS_UNINITIALIZED),
@@ -601,15 +602,16 @@
return usage;
}
-ssize_t Camera3Device::getJpegBufferSize(uint32_t width, uint32_t height) const {
+ssize_t Camera3Device::getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+ uint32_t height) const {
// Get max jpeg size (area-wise) for default sensor pixel mode
camera3::Size maxDefaultJpegResolution =
- SessionConfigurationUtils::getMaxJpegResolution(mDeviceInfo,
+ SessionConfigurationUtils::getMaxJpegResolution(info,
/*isUltraHighResolutionSensor*/false);
// Get max jpeg size (area-wise) for max resolution sensor pixel mode / 0 if
// not ultra high res sensor
camera3::Size uhrMaxJpegResolution =
- SessionConfigurationUtils::getMaxJpegResolution(mDeviceInfo,
+ SessionConfigurationUtils::getMaxJpegResolution(info,
/*isUltraHighResolution*/true);
if (maxDefaultJpegResolution.width == 0) {
ALOGE("%s: Camera %s: Can't find valid available jpeg sizes in static metadata!",
@@ -625,7 +627,7 @@
// Get max jpeg buffer size
ssize_t maxJpegBufferSize = 0;
- camera_metadata_ro_entry jpegBufMaxSize = mDeviceInfo.find(ANDROID_JPEG_MAX_SIZE);
+ camera_metadata_ro_entry jpegBufMaxSize = info.find(ANDROID_JPEG_MAX_SIZE);
if (jpegBufMaxSize.count == 0) {
ALOGE("%s: Camera %s: Can't find maximum JPEG size in static metadata!", __FUNCTION__,
mId.string());
@@ -655,9 +657,9 @@
return jpegBufferSize;
}
-ssize_t Camera3Device::getPointCloudBufferSize() const {
+ssize_t Camera3Device::getPointCloudBufferSize(const CameraMetadata &info) const {
const int FLOATS_PER_POINT=4;
- camera_metadata_ro_entry maxPointCount = mDeviceInfo.find(ANDROID_DEPTH_MAX_DEPTH_SAMPLES);
+ camera_metadata_ro_entry maxPointCount = info.find(ANDROID_DEPTH_MAX_DEPTH_SAMPLES);
if (maxPointCount.count == 0) {
ALOGE("%s: Camera %s: Can't find maximum depth point cloud size in static metadata!",
__FUNCTION__, mId.string());
@@ -668,14 +670,14 @@
return maxBytesForPointCloud;
}
-ssize_t Camera3Device::getRawOpaqueBufferSize(int32_t width, int32_t height,
- bool maxResolution) const {
+ssize_t Camera3Device::getRawOpaqueBufferSize(const CameraMetadata &info, int32_t width,
+ int32_t height, bool maxResolution) const {
const int PER_CONFIGURATION_SIZE = 3;
const int WIDTH_OFFSET = 0;
const int HEIGHT_OFFSET = 1;
const int SIZE_OFFSET = 2;
camera_metadata_ro_entry rawOpaqueSizes =
- mDeviceInfo.find(
+ info.find(
camera3::SessionConfigurationUtils::getAppropriateModeTag(
ANDROID_SENSOR_OPAQUE_RAW_SIZE,
maxResolution));
@@ -1089,7 +1091,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface
+ *mInterface, mLegacyClient
};
for (const auto& result : results) {
@@ -1148,7 +1150,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface
+ *mInterface, mLegacyClient
};
for (const auto& result : results) {
@@ -1189,7 +1191,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- *mInterface
+ *mInterface, mLegacyClient
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
@@ -1451,7 +1453,7 @@
if (format == HAL_PIXEL_FORMAT_BLOB) {
ssize_t blobBufferSize;
if (dataSpace == HAL_DATASPACE_DEPTH) {
- blobBufferSize = getPointCloudBufferSize();
+ blobBufferSize = getPointCloudBufferSize(infoPhysical(physicalCameraId));
if (blobBufferSize <= 0) {
SET_ERR_L("Invalid point cloud buffer size %zd", blobBufferSize);
return BAD_VALUE;
@@ -1459,7 +1461,7 @@
} else if (dataSpace == static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
blobBufferSize = width * height;
} else {
- blobBufferSize = getJpegBufferSize(width, height);
+ blobBufferSize = getJpegBufferSize(infoPhysical(physicalCameraId), width, height);
if (blobBufferSize <= 0) {
SET_ERR_L("Invalid jpeg buffer size %zd", blobBufferSize);
return BAD_VALUE;
@@ -1473,7 +1475,8 @@
bool maxResolution =
sensorPixelModesUsed.find(ANDROID_SENSOR_PIXEL_MODE_MAXIMUM_RESOLUTION) !=
sensorPixelModesUsed.end();
- ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(width, height, maxResolution);
+ ssize_t rawOpaqueBufferSize = getRawOpaqueBufferSize(infoPhysical(physicalCameraId), width,
+ height, maxResolution);
if (rawOpaqueBufferSize <= 0) {
SET_ERR_L("Invalid RAW opaque buffer size %zd", rawOpaqueBufferSize);
return BAD_VALUE;
@@ -2455,22 +2458,24 @@
}
if (mSupportCameraMute) {
- auto testPatternModeEntry =
- newRequest->mSettingsList.begin()->metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
- newRequest->mOriginalTestPatternMode = testPatternModeEntry.count > 0 ?
- testPatternModeEntry.data.i32[0] :
- ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
+ for (auto& settings : newRequest->mSettingsList) {
+ auto testPatternModeEntry =
+ settings.metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
+ settings.mOriginalTestPatternMode = testPatternModeEntry.count > 0 ?
+ testPatternModeEntry.data.i32[0] :
+ ANDROID_SENSOR_TEST_PATTERN_MODE_OFF;
- auto testPatternDataEntry =
- newRequest->mSettingsList.begin()->metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
- if (testPatternDataEntry.count >= 4) {
- memcpy(newRequest->mOriginalTestPatternData, testPatternDataEntry.data.i32,
- sizeof(CaptureRequest::mOriginalTestPatternData));
- } else {
- newRequest->mOriginalTestPatternData[0] = 0;
- newRequest->mOriginalTestPatternData[1] = 0;
- newRequest->mOriginalTestPatternData[2] = 0;
- newRequest->mOriginalTestPatternData[3] = 0;
+ auto testPatternDataEntry =
+ settings.metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
+ if (testPatternDataEntry.count >= 4) {
+ memcpy(settings.mOriginalTestPatternData, testPatternDataEntry.data.i32,
+ sizeof(PhysicalCameraSettings::mOriginalTestPatternData));
+ } else {
+ settings.mOriginalTestPatternData[0] = 0;
+ settings.mOriginalTestPatternData[1] = 0;
+ settings.mOriginalTestPatternData[2] = 0;
+ settings.mOriginalTestPatternData[3] = 0;
+ }
}
}
@@ -2677,6 +2682,7 @@
}
mGroupIdPhysicalCameraMap.clear();
+ bool composerSurfacePresent = false;
for (size_t i = 0; i < mOutputStreams.size(); i++) {
// Don't configure bidi streams twice, nor add them twice to the list
@@ -2701,7 +2707,8 @@
// always occupy the initial entry.
if (outputStream->data_space == HAL_DATASPACE_V0_JFIF) {
bufferSizes[k] = static_cast<uint32_t>(
- getJpegBufferSize(outputStream->width, outputStream->height));
+ getJpegBufferSize(infoPhysical(String8(outputStream->physical_camera_id)),
+ outputStream->width, outputStream->height));
} else if (outputStream->data_space ==
static_cast<android_dataspace>(HAL_DATASPACE_JPEG_APP_SEGMENTS)) {
bufferSizes[k] = outputStream->width * outputStream->height;
@@ -2716,6 +2723,10 @@
const String8& physicalCameraId = mOutputStreams[i]->getPhysicalCameraId();
mGroupIdPhysicalCameraMap[streamGroupId].insert(physicalCameraId);
}
+
+ if (outputStream->usage & GraphicBuffer::USAGE_HW_COMPOSER) {
+ composerSurfacePresent = true;
+ }
}
config.streams = streams.editArray();
@@ -2783,6 +2794,8 @@
}
}
+ mRequestThread->setComposerSurface(composerSurfacePresent);
+
// Request thread needs to know to avoid using repeat-last-settings protocol
// across configure_streams() calls
if (notifyRequestThread) {
@@ -4179,6 +4192,7 @@
mCurrentAfTriggerId(0),
mCurrentPreCaptureTriggerId(0),
mRotateAndCropOverride(ANDROID_SCALER_ROTATE_AND_CROP_NONE),
+ mComposerOutput(false),
mCameraMute(ANDROID_SENSOR_TEST_PATTERN_MODE_OFF),
mCameraMuteChanged(false),
mRepeatingLastFrameNumber(
@@ -4829,7 +4843,11 @@
bool triggersMixedIn = (triggerCount > 0 || mPrevTriggers > 0);
mPrevTriggers = triggerCount;
- bool rotateAndCropChanged = overrideAutoRotateAndCrop(captureRequest);
+ // Do not override rotate&crop for stream configurations that include
+ // SurfaceViews(HW_COMPOSER) output. The display rotation there will be
+ // compensated by NATIVE_WINDOW_TRANSFORM_INVERSE_DISPLAY
+ bool rotateAndCropChanged = mComposerOutput ? false :
+ overrideAutoRotateAndCrop(captureRequest);
bool testPatternChanged = overrideTestPattern(captureRequest);
// If the request is the same as last, or we had triggers now or last time or
@@ -5335,6 +5353,13 @@
return OK;
}
+status_t Camera3Device::RequestThread::setComposerSurface(bool composerSurfacePresent) {
+ ATRACE_CALL();
+ Mutex::Autolock l(mTriggerMutex);
+ mComposerOutput = composerSurfacePresent;
+ return OK;
+}
+
status_t Camera3Device::RequestThread::setCameraMute(int32_t muteMode) {
ATRACE_CALL();
Mutex::Autolock l(mTriggerMutex);
@@ -5908,48 +5933,53 @@
bool changed = false;
- int32_t testPatternMode = request->mOriginalTestPatternMode;
- int32_t testPatternData[4] = {
- request->mOriginalTestPatternData[0],
- request->mOriginalTestPatternData[1],
- request->mOriginalTestPatternData[2],
- request->mOriginalTestPatternData[3]
- };
+ // For a multi-camera, the physical cameras support the same set of
+ // test pattern modes as the logical camera.
+ for (auto& settings : request->mSettingsList) {
+ CameraMetadata &metadata = settings.metadata;
- if (mCameraMute != ANDROID_SENSOR_TEST_PATTERN_MODE_OFF) {
- testPatternMode = mCameraMute;
- testPatternData[0] = 0;
- testPatternData[1] = 0;
- testPatternData[2] = 0;
- testPatternData[3] = 0;
- }
-
- CameraMetadata &metadata = request->mSettingsList.begin()->metadata;
-
- auto testPatternEntry = metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
- if (testPatternEntry.count > 0) {
- if (testPatternEntry.data.i32[0] != testPatternMode) {
- testPatternEntry.data.i32[0] = testPatternMode;
- changed = true;
+ int32_t testPatternMode = settings.mOriginalTestPatternMode;
+ int32_t testPatternData[4] = {
+ settings.mOriginalTestPatternData[0],
+ settings.mOriginalTestPatternData[1],
+ settings.mOriginalTestPatternData[2],
+ settings.mOriginalTestPatternData[3]
+ };
+ if (mCameraMute != ANDROID_SENSOR_TEST_PATTERN_MODE_OFF) {
+ testPatternMode = mCameraMute;
+ testPatternData[0] = 0;
+ testPatternData[1] = 0;
+ testPatternData[2] = 0;
+ testPatternData[3] = 0;
}
- } else {
- metadata.update(ANDROID_SENSOR_TEST_PATTERN_MODE,
- &testPatternMode, 1);
- changed = true;
- }
- auto testPatternColor = metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
- if (testPatternColor.count >= 4) {
- for (size_t i = 0; i < 4; i++) {
- if (testPatternColor.data.i32[i] != testPatternData[i]) {
- testPatternColor.data.i32[i] = testPatternData[i];
+ auto testPatternEntry = metadata.find(ANDROID_SENSOR_TEST_PATTERN_MODE);
+ bool supportTestPatternModeKey = settings.mHasTestPatternModeTag;
+ if (testPatternEntry.count > 0) {
+ if (testPatternEntry.data.i32[0] != testPatternMode) {
+ testPatternEntry.data.i32[0] = testPatternMode;
changed = true;
}
+ } else if (supportTestPatternModeKey) {
+ metadata.update(ANDROID_SENSOR_TEST_PATTERN_MODE,
+ &testPatternMode, 1);
+ changed = true;
}
- } else {
- metadata.update(ANDROID_SENSOR_TEST_PATTERN_DATA,
- testPatternData, 4);
- changed = true;
+
+ auto testPatternColor = metadata.find(ANDROID_SENSOR_TEST_PATTERN_DATA);
+ bool supportTestPatternDataKey = settings.mHasTestPatternDataTag;
+ if (testPatternColor.count >= 4) {
+ for (size_t i = 0; i < 4; i++) {
+ if (testPatternColor.data.i32[i] != testPatternData[i]) {
+ testPatternColor.data.i32[i] = testPatternData[i];
+ changed = true;
+ }
+ }
+ } else if (supportTestPatternDataKey) {
+ metadata.update(ANDROID_SENSOR_TEST_PATTERN_DATA,
+ testPatternData, 4);
+ changed = true;
+ }
}
return changed;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index df941b2..e8a6a08 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -90,7 +90,7 @@
public camera3::FlushBufferInterface {
public:
- explicit Camera3Device(const String8& id, bool overrideForPerfClass);
+ explicit Camera3Device(const String8& id, bool overrideForPerfClass, bool legacyClient = false);
virtual ~Camera3Device();
@@ -192,9 +192,11 @@
status_t prepare(int maxCount, int streamId) override;
- ssize_t getJpegBufferSize(uint32_t width, uint32_t height) const override;
- ssize_t getPointCloudBufferSize() const;
- ssize_t getRawOpaqueBufferSize(int32_t width, int32_t height, bool maxResolution) const;
+ ssize_t getJpegBufferSize(const CameraMetadata &info, uint32_t width,
+ uint32_t height) const override;
+ ssize_t getPointCloudBufferSize(const CameraMetadata &info) const;
+ ssize_t getRawOpaqueBufferSize(const CameraMetadata &info, int32_t width, int32_t height,
+ bool maxResolution) const;
// Methods called by subclasses
void notifyStatus(bool idle); // updates from StatusTracker
@@ -333,6 +335,9 @@
// Camera device ID
const String8 mId;
+ // Legacy camera client flag
+ bool mLegacyClient;
+
// Current stream configuration mode;
int mOperatingMode;
// Current session wide parameters
@@ -573,10 +578,6 @@
// overriding of ROTATE_AND_CROP value and adjustment of coordinates
// in several other controls in both the request and the result
bool mRotateAndCropAuto;
- // Original value of TEST_PATTERN_MODE and DATA so that they can be
- // restored when sensor muting is turned off
- int32_t mOriginalTestPatternMode;
- int32_t mOriginalTestPatternData[4];
// Whether this capture request has its zoom ratio set to 1.0x before
// the framework overrides it for camera HAL consumption.
@@ -584,7 +585,6 @@
// The systemTime timestamp when the request is created.
nsecs_t mRequestTimeNs;
-
// Whether this capture request's distortion correction update has
// been done.
bool mDistortionCorrectionUpdated = false;
@@ -919,6 +919,7 @@
status_t setRotateAndCropAutoBehavior(
camera_metadata_enum_android_scaler_rotate_and_crop_t rotateAndCropValue);
+ status_t setComposerSurface(bool composerSurfacePresent);
status_t setCameraMute(int32_t muteMode);
@@ -1071,6 +1072,7 @@
uint32_t mCurrentAfTriggerId;
uint32_t mCurrentPreCaptureTriggerId;
camera_metadata_enum_android_scaler_rotate_and_crop_t mRotateAndCropOverride;
+ bool mComposerOutput;
int32_t mCameraMute; // 0 = no mute, otherwise the TEST_PATTERN_MODE to use
bool mCameraMuteChanged;
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
index 8cc6833..b121e5d 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.cpp
@@ -48,7 +48,7 @@
status_t Camera3FakeStream::returnBufferLocked(
const camera_stream_buffer &,
- nsecs_t, const std::vector<size_t>&) {
+ nsecs_t, int32_t, const std::vector<size_t>&) {
ATRACE_CALL();
ALOGE("%s: Stream %d: Fake stream cannot return buffers!", __FUNCTION__, mId);
return INVALID_OPERATION;
@@ -58,6 +58,7 @@
const camera_stream_buffer &,
nsecs_t,
bool,
+ int32_t,
const std::vector<size_t>&,
/*out*/
sp<Fence>*) {
diff --git a/services/camera/libcameraservice/device3/Camera3FakeStream.h b/services/camera/libcameraservice/device3/Camera3FakeStream.h
index 914ccbf..c11a3e4 100644
--- a/services/camera/libcameraservice/device3/Camera3FakeStream.h
+++ b/services/camera/libcameraservice/device3/Camera3FakeStream.h
@@ -109,6 +109,7 @@
const camera_stream_buffer &buffer,
nsecs_t timestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids,
/*out*/
sp<Fence> *releaseFenceOut);
@@ -134,7 +135,7 @@
const std::vector<size_t>& surface_ids = std::vector<size_t>());
virtual status_t returnBufferLocked(
const camera_stream_buffer &buffer,
- nsecs_t timestamp, const std::vector<size_t>& surface_ids);
+ nsecs_t timestamp, int32_t transform, const std::vector<size_t>& surface_ids);
virtual status_t configureQueueLocked();
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
index 0204d49..5f7e4cf 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.cpp
@@ -225,6 +225,7 @@
const camera_stream_buffer &buffer,
nsecs_t timestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids) {
status_t res;
@@ -241,7 +242,7 @@
}
sp<Fence> releaseFence;
- res = returnBufferCheckedLocked(buffer, timestamp, output, surface_ids,
+ res = returnBufferCheckedLocked(buffer, timestamp, output, transform, surface_ids,
&releaseFence);
// Res may be an error, but we still want to decrement our owned count
// to enable clean shutdown. So we'll just return the error but otherwise
diff --git a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
index 90c8a7b..6135b7e 100644
--- a/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
+++ b/services/camera/libcameraservice/device3/Camera3IOStreamBase.h
@@ -68,12 +68,14 @@
const camera_stream_buffer &buffer,
nsecs_t timestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids = std::vector<size_t>());
virtual status_t returnBufferCheckedLocked(
const camera_stream_buffer &buffer,
nsecs_t timestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids,
/*out*/
sp<Fence> *releaseFenceOut) = 0;
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.cpp b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
index 6d8317b..6eb798e 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.cpp
@@ -106,6 +106,7 @@
const camera_stream_buffer &buffer,
nsecs_t timestamp,
bool output,
+ int32_t /*transform*/,
const std::vector<size_t>&,
/*out*/
sp<Fence> *releaseFenceOut) {
@@ -175,7 +176,7 @@
const camera_stream_buffer &buffer) {
ATRACE_CALL();
- return returnAnyBufferLocked(buffer, /*timestamp*/0, /*output*/false);
+ return returnAnyBufferLocked(buffer, /*timestamp*/0, /*output*/false, /*transform*/ -1);
}
status_t Camera3InputStream::getInputBufferProducerLocked(
diff --git a/services/camera/libcameraservice/device3/Camera3InputStream.h b/services/camera/libcameraservice/device3/Camera3InputStream.h
index 46221d1..6f66bca 100644
--- a/services/camera/libcameraservice/device3/Camera3InputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3InputStream.h
@@ -62,6 +62,7 @@
const camera_stream_buffer &buffer,
nsecs_t timestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids,
/*out*/
sp<Fence> *releaseFenceOut);
diff --git a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
index a7e64ce..b702e20 100644
--- a/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OfflineSession.cpp
@@ -261,7 +261,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- mBufferRecords
+ mBufferRecords, /*legacyClient*/ false
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -301,7 +301,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- mBufferRecords
+ mBufferRecords, /*legacyClient*/ false
};
std::lock_guard<std::mutex> lock(mProcessCaptureResultLock);
@@ -336,7 +336,7 @@
mNumPartialResults, mVendorTagId, mDeviceInfo, mPhysicalDeviceInfoMap,
mResultMetadataQueue, mDistortionMappers, mZoomRatioMappers, mRotateAndCropMappers,
mTagMonitor, mInputStream, mOutputStreams, mSessionStatsBuilder, listener, *this, *this,
- mBufferRecords
+ mBufferRecords, /*legacyClient*/ false
};
for (const auto& msg : msgs) {
camera3::notify(states, msg);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index 03b77fc..e60fdb3 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -263,14 +263,14 @@
status_t Camera3OutputStream::returnBufferLocked(
const camera_stream_buffer &buffer,
- nsecs_t timestamp, const std::vector<size_t>& surface_ids) {
+ nsecs_t timestamp, int32_t transform, const std::vector<size_t>& surface_ids) {
ATRACE_HFR_CALL();
if (mHandoutTotalBufferCount == 1) {
returnPrefetchedBuffersLocked();
}
- status_t res = returnAnyBufferLocked(buffer, timestamp, /*output*/true, surface_ids);
+ status_t res = returnAnyBufferLocked(buffer, timestamp, /*output*/true, transform, surface_ids);
if (res != OK) {
return res;
@@ -286,6 +286,7 @@
const camera_stream_buffer &buffer,
nsecs_t timestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids,
/*out*/
sp<Fence> *releaseFenceOut) {
@@ -346,6 +347,10 @@
mTraceFirstBuffer = false;
}
+ if (transform != -1) {
+ setTransformLocked(transform);
+ }
+
/* Certain consumers (such as AudioSource or HardwareComposer) use
* MONOTONIC time, causing time misalignment if camera timestamp is
* in BOOTTIME. Do the conversion if necessary. */
@@ -545,7 +550,7 @@
mHandoutTotalBufferCount = 0;
mFrameCount = 0;
mLastTimestamp = 0;
- mUseMonoTimestamp = (isConsumedByHWComposer() | isVideoStream());
+ mUseMonoTimestamp = (isConsumedByHWComposer() || isVideoStream());
res = native_window_set_buffer_count(mConsumer.get(),
mTotalBufferCount);
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index ad03b53..0872687 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -246,6 +246,7 @@
const camera_stream_buffer &buffer,
nsecs_t timestamp,
bool output,
+ int32_t transform,
const std::vector<size_t>& surface_ids,
/*out*/
sp<Fence> *releaseFenceOut);
@@ -332,7 +333,7 @@
virtual status_t returnBufferLocked(
const camera_stream_buffer &buffer,
- nsecs_t timestamp, const std::vector<size_t>& surface_ids);
+ nsecs_t timestamp, int32_t transform, const std::vector<size_t>& surface_ids);
virtual status_t queueBufferToConsumer(sp<ANativeWindow>& consumer,
ANativeWindowBuffer* buffer, int anwReleaseFence,
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index 5a97f4b..5c54dc7 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -42,10 +42,13 @@
#include <android/hardware/camera/device/3.5/ICameraDeviceCallback.h>
#include <android/hardware/camera/device/3.5/ICameraDeviceSession.h>
+#include <camera/CameraUtils.h>
#include <camera_metadata_hidden.h>
#include "device3/Camera3OutputUtils.h"
+#include "system/camera_metadata.h"
+
using namespace android::camera3;
using namespace android::hardware::camera;
@@ -464,12 +467,12 @@
/*requested*/true, request.requestTimeNs, states.sessionStatsBuilder,
/*timestampIncreasing*/true,
request.outputSurfaces, request.resultExtras,
- request.errorBufStrategy);
+ request.errorBufStrategy, request.transform);
// Note down the just completed frame number
if (request.hasInputBuffer) {
states.lastCompletedReprocessFrameNumber = frameNumber;
- } else if (request.zslCapture) {
+ } else if (request.zslCapture && request.stillCapture) {
states.lastCompletedZslFrameNumber = frameNumber;
} else {
states.lastCompletedRegularFrameNumber = frameNumber;
@@ -555,6 +558,31 @@
if (result->partial_result != 0)
request.resultExtras.partialResultCount = result->partial_result;
+ if ((result->result != nullptr) && !states.legacyClient) {
+ camera_metadata_ro_entry entry;
+ auto ret = find_camera_metadata_ro_entry(result->result,
+ ANDROID_LOGICAL_MULTI_CAMERA_ACTIVE_PHYSICAL_ID, &entry);
+ if ((ret == OK) && (entry.count > 0)) {
+ std::string physicalId(reinterpret_cast<const char *>(entry.data.u8));
+ auto deviceInfo = states.physicalDeviceInfoMap.find(physicalId);
+ if (deviceInfo != states.physicalDeviceInfoMap.end()) {
+ auto orientation = deviceInfo->second.find(ANDROID_SENSOR_ORIENTATION);
+ if (orientation.count > 0) {
+ ret = CameraUtils::getRotationTransform(deviceInfo->second,
+ &request.transform);
+ if (ret != OK) {
+ ALOGE("%s: Failed to calculate current stream transformation: %s (%d)",
+ __FUNCTION__, strerror(-ret), ret);
+ }
+ } else {
+ ALOGE("%s: Physical device orientation absent!", __FUNCTION__);
+ }
+ } else {
+ ALOGE("%s: Physical device not found in device info map found!", __FUNCTION__);
+ }
+ }
+ }
+
// Check if this result carries only partial metadata
if (states.usePartialResult && result->result != NULL) {
if (result->partial_result > states.numPartialResults || result->partial_result < 1) {
@@ -846,7 +874,7 @@
SessionStatsBuilder& sessionStatsBuilder, bool timestampIncreasing,
const SurfaceMap& outputSurfaces,
const CaptureResultExtras &inResultExtras,
- ERROR_BUF_STRATEGY errorBufStrategy) {
+ ERROR_BUF_STRATEGY errorBufStrategy, int32_t transform) {
for (size_t i = 0; i < numBuffers; i++)
{
@@ -889,11 +917,11 @@
if (it != outputSurfaces.end()) {
res = stream->returnBuffer(
outputBuffers[i], timestamp, timestampIncreasing, it->second,
- inResultExtras.frameNumber);
+ inResultExtras.frameNumber, transform);
} else {
res = stream->returnBuffer(
- outputBuffers[i], timestamp, timestampIncreasing, std::vector<size_t> (),
- inResultExtras.frameNumber);
+ outputBuffers[i], timestamp, timestampIncreasing,
+ std::vector<size_t> (), inResultExtras.frameNumber, transform);
}
}
// Note: stream may be deallocated at this point, if this buffer was
@@ -925,7 +953,7 @@
sb.status = CAMERA_BUFFER_STATUS_ERROR;
stream->returnBuffer(sb, /*timestamp*/0,
timestampIncreasing, std::vector<size_t> (),
- inResultExtras.frameNumber);
+ inResultExtras.frameNumber, transform);
if (listener != nullptr) {
CaptureResultExtras extras = inResultExtras;
@@ -941,14 +969,15 @@
void returnAndRemovePendingOutputBuffers(bool useHalBufManager,
sp<NotificationListener> listener, InFlightRequest& request,
SessionStatsBuilder& sessionStatsBuilder) {
- bool timestampIncreasing = !(request.zslCapture || request.hasInputBuffer);
+ bool timestampIncreasing =
+ !((request.zslCapture && request.stillCapture) || request.hasInputBuffer);
returnOutputBuffers(useHalBufManager, listener,
request.pendingOutputBuffers.array(),
request.pendingOutputBuffers.size(),
request.shutterTimestamp, /*requested*/true,
request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
request.outputSurfaces, request.resultExtras,
- request.errorBufStrategy);
+ request.errorBufStrategy, request.transform);
// Remove error buffers that are not cached.
for (auto iter = request.pendingOutputBuffers.begin();
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index 142889a..06b7ab4 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -118,7 +118,8 @@
const SurfaceMap& outputSurfaces = SurfaceMap{},
// Used to send buffer error callback when failing to return buffer
const CaptureResultExtras &resultExtras = CaptureResultExtras{},
- ERROR_BUF_STRATEGY errorBufStrategy = ERROR_BUF_RETURN);
+ ERROR_BUF_STRATEGY errorBufStrategy = ERROR_BUF_RETURN,
+ int32_t transform = -1);
// helper function to return the output buffers to output streams, and
// remove the returned buffers from the inflight request's pending buffers
@@ -165,6 +166,7 @@
SetErrorInterface& setErrIntf;
InflightRequestUpdateInterface& inflightIntf;
BufferRecordsInterface& bufferRecordsIntf;
+ bool legacyClient;
};
// Handle one capture result. Assume callers hold the lock to serialize all
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.cpp b/services/camera/libcameraservice/device3/Camera3Stream.cpp
index 02b6585..afcfd2a 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Stream.cpp
@@ -557,7 +557,7 @@
for (size_t i = 0; i < mPreparedBufferIdx; i++) {
mPreparedBuffers.editItemAt(i).release_fence = -1;
mPreparedBuffers.editItemAt(i).status = CAMERA_BUFFER_STATUS_ERROR;
- returnBufferLocked(mPreparedBuffers[i], 0);
+ returnBufferLocked(mPreparedBuffers[i], 0, /*transform*/ -1);
}
mPreparedBuffers.clear();
mPreparedBufferIdx = 0;
@@ -714,7 +714,7 @@
status_t Camera3Stream::returnBuffer(const camera_stream_buffer &buffer,
nsecs_t timestamp, bool timestampIncreasing,
- const std::vector<size_t>& surface_ids, uint64_t frameNumber) {
+ const std::vector<size_t>& surface_ids, uint64_t frameNumber, int32_t transform) {
ATRACE_HFR_CALL();
Mutex::Autolock l(mLock);
@@ -743,7 +743,7 @@
*
* Do this for getBuffer as well.
*/
- status_t res = returnBufferLocked(b, timestamp, surface_ids);
+ status_t res = returnBufferLocked(b, timestamp, transform, surface_ids);
if (res == OK) {
fireBufferListenersLocked(b, /*acquired*/false, /*output*/true, timestamp, frameNumber);
}
@@ -931,7 +931,7 @@
}
status_t Camera3Stream::returnBufferLocked(const camera_stream_buffer &,
- nsecs_t, const std::vector<size_t>&) {
+ nsecs_t, int32_t, const std::vector<size_t>&) {
ALOGE("%s: This type of stream does not support output", __FUNCTION__);
return INVALID_OPERATION;
}
diff --git a/services/camera/libcameraservice/device3/Camera3Stream.h b/services/camera/libcameraservice/device3/Camera3Stream.h
index 5a364ab..fc75f79 100644
--- a/services/camera/libcameraservice/device3/Camera3Stream.h
+++ b/services/camera/libcameraservice/device3/Camera3Stream.h
@@ -354,7 +354,7 @@
status_t returnBuffer(const camera_stream_buffer &buffer,
nsecs_t timestamp, bool timestampIncreasing,
const std::vector<size_t>& surface_ids = std::vector<size_t>(),
- uint64_t frameNumber = 0);
+ uint64_t frameNumber = 0, int32_t transform = -1);
/**
* Fill in the camera_stream_buffer with the next valid buffer for this
@@ -517,7 +517,7 @@
virtual status_t getBufferLocked(camera_stream_buffer *buffer,
const std::vector<size_t>& surface_ids = std::vector<size_t>());
virtual status_t returnBufferLocked(const camera_stream_buffer &buffer,
- nsecs_t timestamp,
+ nsecs_t timestamp, int32_t transform,
const std::vector<size_t>& surface_ids = std::vector<size_t>());
virtual status_t getBuffersLocked(std::vector<OutstandingBuffer>*);
diff --git a/services/camera/libcameraservice/device3/Camera3StreamInterface.h b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
index 2d3397c..3aa5a3c 100644
--- a/services/camera/libcameraservice/device3/Camera3StreamInterface.h
+++ b/services/camera/libcameraservice/device3/Camera3StreamInterface.h
@@ -359,7 +359,7 @@
virtual status_t returnBuffer(const camera_stream_buffer &buffer,
nsecs_t timestamp, bool timestampIncreasing = true,
const std::vector<size_t>& surface_ids = std::vector<size_t>(),
- uint64_t frameNumber = 0) = 0;
+ uint64_t frameNumber = 0, int32_t transform = -1) = 0;
/**
* Fill in the camera_stream_buffer with the next valid buffer for this
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index 523a2c7..42fa8db 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -122,6 +122,9 @@
// What shared surfaces an output should go to
SurfaceMap outputSurfaces;
+ // Current output transformation
+ int32_t transform;
+
// TODO: dedupe
static const nsecs_t kDefaultExpectedDuration = 100000000; // 100 ms
@@ -140,7 +143,8 @@
stillCapture(false),
zslCapture(false),
rotateAndCropAuto(false),
- requestTimeNs(0) {
+ requestTimeNs(0),
+ transform(-1) {
}
InFlightRequest(int numBuffers, CaptureResultExtras extras, bool hasInput,
@@ -165,7 +169,8 @@
rotateAndCropAuto(rotateAndCropAuto),
cameraIdsWithZoom(idsWithZoom),
requestTimeNs(requestNs),
- outputSurfaces(outSurfaces) {
+ outputSurfaces(outSurfaces),
+ transform(-1) {
}
};
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
index 76927c0..8699543 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.cpp
@@ -120,13 +120,12 @@
proxyBinder->pingForUserUpdate();
}
-bool CameraServiceProxyWrapper::isRotateAndCropOverrideNeeded(
- String16 packageName, int sensorOrientation, int lensFacing) {
+int CameraServiceProxyWrapper::getRotateAndCropOverride(String16 packageName, int lensFacing,
+ int userId) {
sp<ICameraServiceProxy> proxyBinder = getCameraServiceProxy();
if (proxyBinder == nullptr) return true;
- bool ret = true;
- auto status = proxyBinder->isRotateAndCropOverrideNeeded(packageName, sensorOrientation,
- lensFacing, &ret);
+ int ret = 0;
+ auto status = proxyBinder->getRotateAndCropOverride(packageName, lensFacing, userId, &ret);
if (!status.isOk()) {
ALOGE("%s: Failed during top activity orientation query: %s", __FUNCTION__,
status.exceptionMessage().c_str());
diff --git a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
index ad9db68..f701e94 100644
--- a/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
+++ b/services/camera/libcameraservice/utils/CameraServiceProxyWrapper.h
@@ -91,9 +91,8 @@
// Ping camera service proxy for user update
static void pingCameraServiceProxy();
- // Check whether the current top activity needs a rotate and crop override.
- static bool isRotateAndCropOverrideNeeded(String16 packageName, int sensorOrientation,
- int lensFacing);
+ // Return the current top activity rotate and crop override.
+ static int getRotateAndCropOverride(String16 packageName, int lensFacing, int userId);
};
} // android
diff --git a/services/mediacodec/registrant/CodecServiceRegistrant.cpp b/services/mediacodec/registrant/CodecServiceRegistrant.cpp
index b479433..1de9efe 100644
--- a/services/mediacodec/registrant/CodecServiceRegistrant.cpp
+++ b/services/mediacodec/registrant/CodecServiceRegistrant.cpp
@@ -17,7 +17,7 @@
//#define LOG_NDEBUG 0
#define LOG_TAG "CodecServiceRegistrant"
-#include <android-base/properties.h>
+#include <android/api-level.h>
#include <android-base/logging.h>
#include <android-base/properties.h>
@@ -416,48 +416,33 @@
using namespace ::android::hardware::media::c2;
- int platformVersion =
- android::base::GetIntProperty("ro.build.version.sdk", int32_t(29));
- // STOPSHIP: Remove code name checking once platform version bumps up to 30.
- std::string codeName =
- android::base::GetProperty("ro.build.version.codename", "");
- if (codeName == "S") {
- platformVersion = 31;
- }
+ int platformVersion = android_get_device_api_level();
- switch (platformVersion) {
- case 31: {
- android::sp<V1_2::IComponentStore> storeV1_2 =
- new V1_2::utils::ComponentStore(store);
- if (storeV1_2->registerAsService("software") != android::OK) {
- LOG(ERROR) << "Cannot register software Codec2 v1.2 service.";
- return;
- }
- break;
- }
- case 30: {
- android::sp<V1_1::IComponentStore> storeV1_1 =
- new V1_1::utils::ComponentStore(store);
- if (storeV1_1->registerAsService("software") != android::OK) {
- LOG(ERROR) << "Cannot register software Codec2 v1.1 service.";
- return;
- }
- break;
- }
- case 29: {
- android::sp<V1_0::IComponentStore> storeV1_0 =
- new V1_0::utils::ComponentStore(store);
- if (storeV1_0->registerAsService("software") != android::OK) {
- LOG(ERROR) << "Cannot register software Codec2 v1.0 service.";
- return;
- }
- break;
- }
- default: {
- LOG(ERROR) << "The platform version " << platformVersion <<
- " is not supported.";
+ if (platformVersion >= __ANDROID_API_S__) {
+ android::sp<V1_2::IComponentStore> storeV1_2 =
+ new V1_2::utils::ComponentStore(store);
+ if (storeV1_2->registerAsService("software") != android::OK) {
+ LOG(ERROR) << "Cannot register software Codec2 v1.2 service.";
return;
}
+ } else if (platformVersion == __ANDROID_API_R__) {
+ android::sp<V1_1::IComponentStore> storeV1_1 =
+ new V1_1::utils::ComponentStore(store);
+ if (storeV1_1->registerAsService("software") != android::OK) {
+ LOG(ERROR) << "Cannot register software Codec2 v1.1 service.";
+ return;
+ }
+ } else if (platformVersion == __ANDROID_API_Q__) {
+ android::sp<V1_0::IComponentStore> storeV1_0 =
+ new V1_0::utils::ComponentStore(store);
+ if (storeV1_0->registerAsService("software") != android::OK) {
+ LOG(ERROR) << "Cannot register software Codec2 v1.0 service.";
+ return;
+ }
+ } else { // platformVersion < __ANDROID_API_Q__
+ LOG(ERROR) << "The platform version " << platformVersion <<
+ " is not supported.";
+ return;
}
if (!ionPropertiesDefined()) {
using IComponentStore =
diff --git a/services/mediametrics/Android.bp b/services/mediametrics/Android.bp
index 154b4ff..c98d5fc 100644
--- a/services/mediametrics/Android.bp
+++ b/services/mediametrics/Android.bp
@@ -149,7 +149,8 @@
"statsd_mediaparser.cpp",
"statsd_nuplayer.cpp",
"statsd_recorder.cpp",
- "StringUtils.cpp"
+ "StringUtils.cpp",
+ "ValidateId.cpp",
],
proto: {
@@ -184,6 +185,10 @@
"libplatformprotos",
],
+ header_libs: [
+ "libaaudio_headers",
+ ],
+
include_dirs: [
"system/media/audio_utils/include",
],
diff --git a/services/mediametrics/AudioAnalytics.cpp b/services/mediametrics/AudioAnalytics.cpp
index 45c9f56..218d9dd 100644
--- a/services/mediametrics/AudioAnalytics.cpp
+++ b/services/mediametrics/AudioAnalytics.cpp
@@ -21,6 +21,7 @@
#include "AudioAnalytics.h"
+#include <aaudio/AAudio.h> // error codes
#include <audio_utils/clock.h> // clock conversions
#include <cutils/properties.h>
#include <statslog.h> // statsd
@@ -29,6 +30,7 @@
#include "AudioTypes.h" // string to int conversions
#include "MediaMetricsService.h" // package info
#include "StringUtils.h"
+#include "ValidateId.h"
#define PROP_AUDIO_ANALYTICS_CLOUD_ENABLED "persist.audio.analytics.cloud.enabled"
@@ -63,6 +65,59 @@
}
}
+// The status variable contains status_t codes which are used by
+// the core audio framework.
+//
+// We also consider AAudio status codes as they are non-overlapping with status_t
+// and compiler checked here.
+//
+// Caution: As AAUDIO_ERROR codes have a unique range (AAUDIO_ERROR_BASE = -900),
+// overlap with status_t should not present an issue.
+//
+// See: system/core/libutils/include/utils/Errors.h
+// frameworks/av/media/libaaudio/include/aaudio/AAudio.h
+//
+// Compare with mediametrics::statusToStatusString
+//
+inline constexpr const char* extendedStatusToStatusString(status_t status) {
+ switch (status) {
+ case BAD_VALUE: // status_t
+ case AAUDIO_ERROR_ILLEGAL_ARGUMENT:
+ case AAUDIO_ERROR_INVALID_FORMAT:
+ case AAUDIO_ERROR_INVALID_RATE:
+ case AAUDIO_ERROR_NULL:
+ case AAUDIO_ERROR_OUT_OF_RANGE:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT;
+ case DEAD_OBJECT: // status_t
+ case FAILED_TRANSACTION: // status_t
+ case AAUDIO_ERROR_DISCONNECTED:
+ case AAUDIO_ERROR_INVALID_HANDLE:
+ case AAUDIO_ERROR_NO_SERVICE:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_IO;
+ case NO_MEMORY: // status_t
+ case AAUDIO_ERROR_NO_FREE_HANDLES:
+ case AAUDIO_ERROR_NO_MEMORY:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_MEMORY;
+ case PERMISSION_DENIED: // status_t
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_SECURITY;
+ case INVALID_OPERATION: // status_t
+ case NO_INIT: // status_t
+ case AAUDIO_ERROR_INVALID_STATE:
+ case AAUDIO_ERROR_UNAVAILABLE:
+ case AAUDIO_ERROR_UNIMPLEMENTED:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_STATE;
+ case WOULD_BLOCK: // status_t
+ case AAUDIO_ERROR_TIMEOUT:
+ case AAUDIO_ERROR_WOULD_BLOCK:
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_TIMEOUT;
+ default:
+ if (status >= 0) return AMEDIAMETRICS_PROP_STATUS_VALUE_OK; // non-negative values "OK"
+ [[fallthrough]]; // negative values are error.
+ case UNKNOWN_ERROR: // status_t
+ return AMEDIAMETRICS_PROP_STATUS_VALUE_UNKNOWN;
+ }
+}
+
static constexpr const auto LOG_LEVEL = android::base::VERBOSE;
static constexpr int PREVIOUS_STATE_EXPIRE_SEC = 60 * 60; // 1 hour.
@@ -391,11 +446,15 @@
{
if (!startsWith(item->getKey(), AMEDIAMETRICS_KEY_PREFIX_AUDIO)) return BAD_VALUE;
status_t status = mAnalyticsState->submit(item, isTrusted);
+
+ // Status is selectively authenticated.
+ processStatus(item);
+
if (status != NO_ERROR) return status; // may not be permitted.
// Only if the item was successfully submitted (permission)
// do we check triggered actions.
- checkActions(item);
+ processActions(item);
return NO_ERROR;
}
@@ -429,7 +488,7 @@
return { ss.str(), lines - ll };
}
-void AudioAnalytics::checkActions(const std::shared_ptr<const mediametrics::Item>& item)
+void AudioAnalytics::processActions(const std::shared_ptr<const mediametrics::Item>& item)
{
auto actions = mActions.getActionsForItem(item); // internally locked.
// Execute actions with no lock held.
@@ -438,6 +497,36 @@
}
}
+void AudioAnalytics::processStatus(const std::shared_ptr<const mediametrics::Item>& item)
+{
+ int32_t status;
+ if (!item->get(AMEDIAMETRICS_PROP_STATUS, &status)) return;
+
+ // Any record with a status will automatically be added to a heat map.
+ // Standard information.
+ const auto key = item->getKey();
+ const auto uid = item->getUid();
+
+ // from audio.track.10 -> prefix = audio.track, suffix = 10
+ // from audio.track.error -> prefix = audio.track, suffix = error
+ const auto [prefixKey, suffixKey] = stringutils::splitPrefixKey(key);
+
+ std::string message;
+ item->get(AMEDIAMETRICS_PROP_STATUSMESSAGE, &message); // optional
+
+ int32_t subCode = 0; // not used
+ (void)item->get(AMEDIAMETRICS_PROP_STATUSSUBCODE, &subCode); // optional
+
+ std::string eventStr; // optional
+ item->get(AMEDIAMETRICS_PROP_EVENT, &eventStr);
+
+ const std::string statusString = extendedStatusToStatusString(status);
+
+ // Add to the heat map - we automatically track every item's status to see
+ // the types of errors and the frequency of errors.
+ mHeatMap.add(prefixKey, suffixKey, eventStr, statusString, uid, message, subCode);
+}
+
// HELPER METHODS
std::string AudioAnalytics::getThreadFromTrack(const std::string& track) const
@@ -563,7 +652,7 @@
const auto flagsForStats = types::lookup<types::INPUT_FLAG, short_enum_type_t>(flags);
const auto sourceForStats = types::lookup<types::SOURCE_TYPE, short_enum_type_t>(source);
// Android S
- const auto logSessionIdForStats = stringutils::sanitizeLogSessionId(logSessionId);
+ const auto logSessionIdForStats = ValidateId::get()->validateId(logSessionId);
LOG(LOG_LEVEL) << "key:" << key
<< " id:" << id
@@ -718,7 +807,7 @@
types::lookup<types::TRACK_TRAITS, short_enum_type_t>(traits);
const auto usageForStats = types::lookup<types::USAGE, short_enum_type_t>(usage);
// Android S
- const auto logSessionIdForStats = stringutils::sanitizeLogSessionId(logSessionId);
+ const auto logSessionIdForStats = ValidateId::get()->validateId(logSessionId);
LOG(LOG_LEVEL) << "key:" << key
<< " id:" << id
@@ -967,10 +1056,10 @@
if (channelMask != 0) {
switch (direction) {
case 1: // Output, keep sync with AudioTypes#getAAudioDirection()
- channelCount = audio_channel_count_from_out_mask(channelMask);
+ channelCount = (int32_t)audio_channel_count_from_out_mask(channelMask);
break;
case 2: // Input, keep sync with AudioTypes#getAAudioDirection()
- channelCount = audio_channel_count_from_in_mask(channelMask);
+ channelCount = (int32_t)audio_channel_count_from_in_mask(channelMask);
break;
default:
ALOGW("Invalid direction %d", direction);
diff --git a/services/mediametrics/AudioAnalytics.h b/services/mediametrics/AudioAnalytics.h
index 2b41a95..9b54cf3 100644
--- a/services/mediametrics/AudioAnalytics.h
+++ b/services/mediametrics/AudioAnalytics.h
@@ -20,6 +20,7 @@
#include "AnalyticsActions.h"
#include "AnalyticsState.h"
#include "AudioPowerUsage.h"
+#include "HeatMap.h"
#include "StatsdLog.h"
#include "TimedAction.h"
#include "Wrap.h"
@@ -73,11 +74,23 @@
std::pair<std::string, int32_t> dump(
int32_t lines = INT32_MAX, int64_t sinceNs = 0, const char *prefix = nullptr) const;
+ /**
+ * Returns a pair consisting of the dump string and the number of lines in the string.
+ *
+ * HeatMap dump.
+ */
+ std::pair<std::string, int32_t> dumpHeatMap(int32_t lines = INT32_MAX) const {
+ return mHeatMap.dump(lines);
+ }
+
void clear() {
// underlying state is locked.
mPreviousAnalyticsState->clear();
mAnalyticsState->clear();
+ // Clears the status map
+ mHeatMap.clear();
+
// Clear power usage state.
mAudioPowerUsage.clear();
}
@@ -96,11 +109,18 @@
*/
/**
- * Checks for any pending actions for a particular item.
+ * Processes any pending actions for a particular item.
*
* \param item to check against the current AnalyticsActions.
*/
- void checkActions(const std::shared_ptr<const mediametrics::Item>& item);
+ void processActions(const std::shared_ptr<const mediametrics::Item>& item);
+
+ /**
+ * Processes status information contained in the item.
+ *
+ * \param item to check against for status handling
+ */
+ void processStatus(const std::shared_ptr<const mediametrics::Item>& item);
// HELPER METHODS
/**
@@ -124,6 +144,9 @@
TimedAction mTimedAction; // locked internally
const std::shared_ptr<StatsdLog> mStatsdLog; // locked internally, ok for multiple threads.
+ static constexpr size_t kHeatEntries = 100;
+ HeatMap mHeatMap{kHeatEntries}; // locked internally, ok for multiple threads.
+
// DeviceUse is a nested class which handles audio device usage accounting.
// We define this class at the end to ensure prior variables all properly constructed.
// TODO: Track / Thread interaction
diff --git a/services/mediametrics/AudioTypes.cpp b/services/mediametrics/AudioTypes.cpp
index 838cdd5..b67967b 100644
--- a/services/mediametrics/AudioTypes.cpp
+++ b/services/mediametrics/AudioTypes.cpp
@@ -15,8 +15,10 @@
*/
#include "AudioTypes.h"
+#include "MediaMetricsConstants.h"
#include "StringUtils.h"
#include <media/TypeConverter.h> // requires libmedia_helper to get the Audio code.
+#include <statslog.h> // statsd
namespace android::mediametrics::types {
diff --git a/services/mediametrics/HeatMap.h b/services/mediametrics/HeatMap.h
new file mode 100644
index 0000000..950501a
--- /dev/null
+++ b/services/mediametrics/HeatMap.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <iomanip>
+#include <map>
+#include <sstream>
+#include "MediaMetricsConstants.h"
+
+namespace android::mediametrics {
+
+/**
+ * HeatData accumulates statistics on the status reported for a given key.
+ *
+ * HeatData is a helper class used by HeatMap to represent statistics. We expose it
+ * here for testing purposes currently.
+ *
+ * Note: This class is not thread safe, so mutual exclusion should be obtained by the caller
+ * which in this case is HeatMap. HeatMap getData() returns a local copy of HeatData, so use
+ * of that is thread-safe.
+ */
+class HeatData {
+ /* HeatData for a key is stored in a map based on the event (e.g. "start", "pause", create)
+ * and then another map based on the status (e.g. "ok", "argument", "state").
+ */
+ std::map<std::string /* event */,
+ std::map<std::string /* status name */, size_t /* count, nonzero */>> mMap;
+
+public:
+ /**
+ * Add status data.
+ *
+ * \param suffix (ignored) the suffix to the key that was stripped, if any.
+ * \param event the event (e.g. create, start, pause, stop, etc.).
+ * \param uid (ignored) the uid associated with the error.
+ * \param message (ignored) the status message, if any.
+ * \param subCode (ignored) the status subcode, if any.
+ */
+ void add(const std::string& suffix, const std::string& event, const std::string& status,
+ uid_t uid, const std::string& message, int32_t subCode) {
+ // Perhaps there could be a more detailed print.
+ (void)suffix;
+ (void)uid;
+ (void)message;
+ (void)subCode;
+ ++mMap[event][status];
+ }
+
+ /** Returns the number of event names with status. */
+ size_t size() const {
+ return mMap.size();
+ }
+
+ /**
+ * Returns a deque with pairs indicating the count of Oks and Errors.
+ * The first pair is total, the other pairs are in order of mMap.
+ *
+ * Example return value of {ok, error} pairs:
+ * total key1 key2
+ * { { 2, 1 }, { 1, 0 }, { 1, 1 } }
+ */
+ std::deque<std::pair<size_t /* oks */, size_t /* errors */>> heatCount() const {
+ size_t totalOk = 0;
+ size_t totalError = 0;
+ std::deque<std::pair<size_t /* oks */, size_t /* errors */>> heat;
+ for (const auto &eventPair : mMap) {
+ size_t ok = 0;
+ size_t error = 0;
+ for (const auto &[name, count] : eventPair.second) {
+ if (name == AMEDIAMETRICS_PROP_STATUS_VALUE_OK) {
+ ok += count;
+ } else {
+ error += count;
+ }
+ }
+ totalOk += ok;
+ totalError += error;
+ heat.emplace_back(ok, error);
+ }
+ heat.emplace_front(totalOk, totalError);
+ return heat;
+ }
+
+ /** Returns the error fraction from a pair <oks, errors>, a float between 0.f to 1.f. */
+ static float fraction(const std::pair<size_t, size_t>& count) {
+ return (float)count.second / (count.first + count.second);
+ }
+
+ /** Returns the HeatMap information in a single line string. */
+ std::string dump() const {
+ const auto heat = heatCount();
+ auto it = heat.begin();
+ std::stringstream ss;
+ ss << "{ ";
+ float errorFraction = fraction(*it++);
+ if (errorFraction > 0.f) {
+ ss << std::fixed << std::setprecision(2) << errorFraction << " ";
+ }
+ for (const auto &eventPair : mMap) {
+ ss << eventPair.first << ": { ";
+ errorFraction = fraction(*it++);
+ if (errorFraction > 0.f) {
+ ss << std::fixed << std::setprecision(2) << errorFraction << " ";
+ }
+ for (const auto &[name, count]: eventPair.second) {
+ ss << "[ " << name << " : " << count << " ] ";
+ }
+ ss << "} ";
+ }
+ ss << " }";
+ return ss.str();
+ }
+};
+
+/**
+ * HeatMap is a thread-safe collection that counts activity of status errors per key.
+ *
+ * The classic heat map is a 2D picture with intensity shown by color.
+ * Here we accumulate the status results from keys to see if there are consistent
+ * failures in the system.
+ *
+ * TODO(b/210855555): Heatmap improvements.
+ * 1) heat decays in intensity in time for past events, currently we don't decay.
+ */
+
+class HeatMap {
+ const size_t mMaxSize;
+ mutable std::mutex mLock;
+ size_t mRejected GUARDED_BY(mLock) = 0;
+ std::map<std::string, HeatData> mMap GUARDED_BY(mLock);
+
+public:
+ /**
+ * Constructs a HeatMap.
+ *
+ * \param maxSize the maximum number of elements that are tracked.
+ */
+ explicit HeatMap(size_t maxSize) : mMaxSize(maxSize) {
+ }
+
+ /** Returns the number of keys. */
+ size_t size() const {
+ std::lock_guard l(mLock);
+ return mMap.size();
+ }
+
+ /** Clears error history. */
+ void clear() {
+ std::lock_guard l(mLock);
+ return mMap.clear();
+ }
+
+ /** Returns number of keys rejected due to space. */
+ size_t rejected() const {
+ std::lock_guard l(mLock);
+ return mRejected;
+ }
+
+ /** Returns a copy of the heat data associated with key. */
+ HeatData getData(const std::string& key) const {
+ std::lock_guard l(mLock);
+ return mMap.count(key) == 0 ? HeatData{} : mMap.at(key);
+ }
+
+ /**
+ * Adds a new entry.
+ * \param key the key category (e.g. audio.track).
+ * \param suffix (ignored) the suffix to the key that was stripped, if any.
+ * \param event the event (e.g. create, start, pause, stop, etc.).
+ * \param uid (ignored) the uid associated with the error.
+ * \param message (ignored) the status message, if any.
+ * \param subCode (ignored) the status subcode, if any.
+ */
+ void add(const std::string& key, const std::string& suffix, const std::string& event,
+ const std::string& status, uid_t uid, const std::string& message, int32_t subCode) {
+ std::lock_guard l(mLock);
+
+ // Hard limit on heat map entries.
+ // TODO: have better GC.
+ if (mMap.size() == mMaxSize && mMap.count(key) == 0) {
+ ++mRejected;
+ return;
+ }
+ mMap[key].add(suffix, event, status, uid, message, subCode);
+ }
+
+ /**
+ * Returns a pair consisting of the dump string and the number of lines in the string.
+ */
+ std::pair<std::string, int32_t> dump(int32_t lines = INT32_MAX) const {
+ std::stringstream ss;
+ int32_t ll = lines;
+ std::lock_guard l(mLock);
+ if (ll > 0) {
+ ss << "Error Heat Map (rejected: " << mRejected << "):\n";
+ --ll;
+ }
+ // TODO: restriction is implemented alphabetically not on priority.
+ for (const auto& [name, data] : mMap) {
+ if (ll <= 0) break;
+ ss << name << ": " << data.dump() << "\n";
+ --ll;
+ }
+ return { ss.str(), lines - ll };
+ }
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/LruSet.h b/services/mediametrics/LruSet.h
new file mode 100644
index 0000000..1f0ab60
--- /dev/null
+++ b/services/mediametrics/LruSet.h
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <list>
+#include <sstream>
+#include <unordered_map>
+
+namespace android::mediametrics {
+
+/**
+ * LruSet keeps a set of the last "Size" elements added or accessed.
+ *
+ * (Lru stands for least-recently-used eviction policy).
+ *
+ * Runs in O(1) time for add, remove, and check. Internally implemented
+ * with an unordered_map and a list. In order to remove elements,
+ * a list iterator is stored in the unordered_map
+ * (noting that std::list::erase() contractually
+ * does not affect iterators other than the one erased).
+ */
+
+template <typename T>
+class LruSet {
+ const size_t mMaxSize;
+ std::list<T> mAccessOrder; // front is the most recent, back is the oldest.
+ // item T with its access order iterator.
+ std::unordered_map<T, typename std::list<T>::iterator> mMap;
+
+public:
+ /**
+ * Constructs a LruSet which checks whether the element was
+ * accessed or added recently.
+ *
+ * The parameter maxSize is used to cap growth of LruSet;
+ * eviction is based on least recently used LRU.
+ * If maxSize is zero, the LruSet contains no elements
+ * and check() always returns false.
+ *
+ * \param maxSize the maximum number of elements that are tracked.
+ */
+ explicit LruSet(size_t maxSize) : mMaxSize(maxSize) {}
+
+ /**
+ * Returns the number of entries in the LruSet.
+ *
+ * This is a number between 0 and maxSize.
+ */
+ size_t size() const {
+ return mMap.size();
+ }
+
+ /** Clears the container contents. */
+ void clear() {
+ mMap.clear();
+ mAccessOrder.clear();
+ }
+
+ /** Returns a string dump of the last n entries. */
+ std::string dump(size_t n) const {
+ std::stringstream ss;
+ auto it = mAccessOrder.cbegin();
+ for (size_t i = 0; i < n && it != mAccessOrder.cend(); ++i) {
+ ss << *it++ << "\n";
+ }
+ return ss.str();
+ }
+
+ /** Adds a new item to the set. */
+ void add(const T& t) {
+ if (mMaxSize == 0) return;
+ auto it = mMap.find(t);
+ if (it != mMap.end()) { // already exists.
+ mAccessOrder.erase(it->second); // move-to-front on the chronologically ordered list.
+ } else if (mAccessOrder.size() >= mMaxSize) {
+ const T last = mAccessOrder.back();
+ mAccessOrder.pop_back();
+ mMap.erase(last);
+ }
+ mAccessOrder.push_front(t);
+ mMap[t] = mAccessOrder.begin();
+ }
+
+ /**
+ * Removes an item from the set.
+ *
+ * \param t item to be removed.
+ * \return false if the item doesn't exist.
+ */
+ bool remove(const T& t) {
+ auto it = mMap.find(t);
+ if (it == mMap.end()) return false;
+ mAccessOrder.erase(it->second);
+ mMap.erase(it);
+ return true;
+ }
+
+ /** Returns true if t is present (and moves the access order of t to the front). */
+ bool check(const T& t) { // not const, as it adjusts the least-recently-used order.
+ auto it = mMap.find(t);
+ if (it == mMap.end()) return false;
+ mAccessOrder.erase(it->second);
+ mAccessOrder.push_front(it->first);
+ it->second = mAccessOrder.begin();
+ return true;
+ }
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/MediaMetricsService.cpp b/services/mediametrics/MediaMetricsService.cpp
index 1d64878..636b343 100644
--- a/services/mediametrics/MediaMetricsService.cpp
+++ b/services/mediametrics/MediaMetricsService.cpp
@@ -19,6 +19,7 @@
#include <utils/Log.h>
#include "MediaMetricsService.h"
+#include "ValidateId.h"
#include "iface_statsd.h"
#include <pwd.h> //getpwuid
@@ -204,6 +205,15 @@
// now attach either the item or its dup to a const shared pointer
std::shared_ptr<const mediametrics::Item> sitem(release ? item : item->dup());
+ // register log session ids with singleton.
+ if (startsWith(item->getKey(), "metrics.manager")) {
+ std::string logSessionId;
+ if (item->get("logSessionId", &logSessionId)
+ && mediametrics::stringutils::isLogSessionId(logSessionId.c_str())) {
+ mediametrics::ValidateId::get()->registerId(logSessionId);
+ }
+ }
+
(void)mAudioAnalytics.submit(sitem, isTrusted);
(void)dump2Statsd(sitem, mStatsdLog); // failure should be logged in function.
@@ -309,8 +319,19 @@
result << "-- some lines may be truncated --\n";
}
+ const int32_t heatLinesToDump = all ? INT32_MAX : 20;
+ const auto [ heatDumpString, heatLines] =
+ mAudioAnalytics.dumpHeatMap(heatLinesToDump);
+ result << "\n" << heatDumpString;
+ if (heatLines == heatLinesToDump) {
+ result << "-- some lines may be truncated --\n";
+ }
+
+ result << "\nLogSessionId:\n"
+ << mediametrics::ValidateId::get()->dump();
+
// Dump the statsd atoms we sent out.
- result << "Statsd atoms:\n"
+ result << "\nStatsd atoms:\n"
<< mStatsdLog->dumpToString(" " /* prefix */,
all ? STATSD_LOG_LINES_MAX : STATSD_LOG_LINES_DUMP);
}
diff --git a/services/mediametrics/StringUtils.h b/services/mediametrics/StringUtils.h
index 01034d9..a56f5b8 100644
--- a/services/mediametrics/StringUtils.h
+++ b/services/mediametrics/StringUtils.h
@@ -167,4 +167,41 @@
return ss.str();
}
+/**
+ * Returns true if the string is non-null, not empty, and contains only digits.
+ */
+inline constexpr bool isNumeric(const char *s)
+{
+ if (s == nullptr || *s == 0) return false;
+ do {
+ if (!isdigit(*s)) return false;
+ } while (*++s != 0);
+ return true; // all digits
+}
+
+/**
+ * Extracts out the prefix from the key, returning a pair of prefix, suffix.
+ *
+ * Usually the key is something like:
+ * Prefix.(ID)
+ * where ID is an integer,
+ * or "error" if the id was not returned because of failure,
+ * or "status" if general status.
+ *
+ * Example: audio.track.10 -> prefix = audio.track, suffix = 10
+ * audio.track.error -> prefix = audio.track, suffix = error
+ * audio.track.status -> prefix = audio.track, suffix = status
+ * audio.mute -> prefix = audio.mute, suffix = ""
+ */
+inline std::pair<std::string /* prefix */,
+ std::string /* suffix */> splitPrefixKey(const std::string &key)
+{
+ const size_t split = key.rfind('.');
+ const char* suffix = key.c_str() + split + 1;
+ if (*suffix && (!strcmp(suffix, "error") || !strcmp(suffix, "status") || isNumeric(suffix))) {
+ return { key.substr(0, split), suffix };
+ }
+ return { key, "" };
+}
+
} // namespace android::mediametrics::stringutils
diff --git a/services/mediametrics/ValidateId.cpp b/services/mediametrics/ValidateId.cpp
new file mode 100644
index 0000000..0cc8593
--- /dev/null
+++ b/services/mediametrics/ValidateId.cpp
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "MediaMetricsService" // not ValidateId
+#include <utils/Log.h>
+
+#include "ValidateId.h"
+
+namespace android::mediametrics {
+
+std::string ValidateId::dump() const
+{
+ std::stringstream ss;
+ ss << "Entries:" << mIdSet.size() << " InvalidIds:" << mInvalidIds << "\n";
+ ss << mIdSet.dump(10);
+ return ss.str();
+}
+
+void ValidateId::registerId(const std::string& id)
+{
+ if (id.empty()) return;
+ if (!mediametrics::stringutils::isLogSessionId(id.c_str())) {
+ ALOGW("%s: rejecting malformed id %s", __func__, id.c_str());
+ return;
+ }
+ ALOGV("%s: registering %s", __func__, id.c_str());
+ mIdSet.add(id);
+}
+
+const std::string& ValidateId::validateId(const std::string& id)
+{
+ static const std::string empty{};
+ if (id.empty()) return empty;
+
+ // reject because the id is malformed
+ if (!mediametrics::stringutils::isLogSessionId(id.c_str())) {
+ ALOGW("%s: rejecting malformed id %s", __func__, id.c_str());
+ ++mInvalidIds;
+ return empty;
+ }
+
+ // reject because the id is unregistered
+ if (!mIdSet.check(id)) {
+ ALOGW("%s: rejecting unregistered id %s", __func__, id.c_str());
+ ++mInvalidIds;
+ return empty;
+ }
+ return id;
+}
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/ValidateId.h b/services/mediametrics/ValidateId.h
new file mode 100644
index 0000000..166b39a
--- /dev/null
+++ b/services/mediametrics/ValidateId.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include "LruSet.h"
+#include "StringUtils.h"
+#include "Wrap.h"
+
+namespace android::mediametrics {
+
+/*
+ * ValidateId is used to check whether the log session id is properly formed
+ * and has been registered (i.e. from the Java MediaMetricsManagerService).
+ *
+ * The default memory window to track registered ids is set to SINGLETON_LRU_SET_SIZE.
+ *
+ * This class is not thread-safe, but the singleton returned by get() uses LockWrap<>
+ * to ensure thread-safety.
+ */
+class ValidateId {
+ mediametrics::LruSet<std::string> mIdSet;
+ size_t mInvalidIds = 0; // count invalid ids encountered.
+public:
+ /** Creates a ValidateId object with size memory window. */
+ explicit ValidateId(size_t size) : mIdSet{size} {}
+
+ /** Returns a string dump of recent contents and stats. */
+ std::string dump() const;
+
+ /**
+ * Registers the id string.
+ *
+ * If id string is malformed (not 16 Base64Url chars), it is ignored.
+ * Once registered, calling validateId() will return id (instead of the empty string).
+ * ValidateId may "forget" the id after not encountering it within the past N ids,
+ * where N is the size set in the constructor.
+ *
+ * param id string (from MediaMetricsManagerService).
+ */
+ void registerId(const std::string& id);
+
+ /**
+ * Returns the empty string if id string is malformed (not 16 Base64Url chars)
+ * or if id string has not been seen (in the recent size ids);
+ * otherwise it returns the same id parameter.
+ *
+ * \param id string (to be sent to statsd).
+ */
+ const std::string& validateId(const std::string& id);
+
+ /** Singleton set size */
+ static inline constexpr size_t SINGLETON_LRU_SET_SIZE = 2000;
+
+ using LockedValidateId = mediametrics::LockWrap<ValidateId>;
+ /**
+ * Returns a singleton locked ValidateId object that is thread-safe using LockWrap<>.
+ *
+ * The Singleton ValidateId object is created with size LRU_SET_SIZE (during first call).
+ */
+ static inline LockedValidateId& get() {
+ static LockedValidateId privateSet{SINGLETON_LRU_SET_SIZE};
+ return privateSet;
+ }
+};
+
+} // namespace android::mediametrics
diff --git a/services/mediametrics/statsd_audiorecord.cpp b/services/mediametrics/statsd_audiorecord.cpp
index 41efcaa..a7b045e 100644
--- a/services/mediametrics/statsd_audiorecord.cpp
+++ b/services/mediametrics/statsd_audiorecord.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -80,16 +80,20 @@
}
int64_t created_millis = -1;
+ // not currently sent from client.
if (item->getInt64("android.media.audiorecord.createdMs", &created_millis)) {
metrics_proto.set_created_millis(created_millis);
}
int64_t duration_millis = -1;
- if (item->getInt64("android.media.audiorecord.durationMs", &duration_millis)) {
+ double durationMs = 0.;
+ if (item->getDouble("android.media.audiorecord.durationMs", &durationMs)) {
+ duration_millis = (int64_t)durationMs;
metrics_proto.set_duration_millis(duration_millis);
}
int32_t count = -1;
+ // not currently sent from client. (see start count instead).
if (item->getInt32("android.media.audiorecord.n", &count)) {
metrics_proto.set_count(count);
}
@@ -129,7 +133,7 @@
}
int64_t start_count = -1;
- if (item->getInt64("android.media.audiorecord.startcount", &start_count)) {
+ if (item->getInt64("android.media.audiorecord.startCount", &start_count)) {
metrics_proto.set_start_count(start_count);
}
@@ -143,8 +147,7 @@
// log_session_id (string)
std::string logSessionId;
(void)item->getString("android.media.audiorecord.logSessionId", &logSessionId);
- const auto log_session_id =
- mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+ const auto log_session_id = mediametrics::ValidateId::get()->validateId(logSessionId);
android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIORECORD_REPORTED,
diff --git a/services/mediametrics/statsd_audiotrack.cpp b/services/mediametrics/statsd_audiotrack.cpp
index 59627ae..67514e9 100644
--- a/services/mediametrics/statsd_audiotrack.cpp
+++ b/services/mediametrics/statsd_audiotrack.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -56,52 +56,47 @@
// flesh out the protobuf we'll hand off with our data
//
- // static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
+ // Do not change this without changing AudioTrack.cpp collection.
+
// optional string streamType;
std::string stream_type;
if (item->getString("android.media.audiotrack.streamtype", &stream_type)) {
metrics_proto.set_stream_type(stream_type);
}
- // static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
// optional string contentType;
std::string content_type;
if (item->getString("android.media.audiotrack.type", &content_type)) {
metrics_proto.set_content_type(content_type);
}
- // static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
// optional string trackUsage;
std::string track_usage;
if (item->getString("android.media.audiotrack.usage", &track_usage)) {
metrics_proto.set_track_usage(track_usage);
}
- // static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
- // optional int32 samplerate;
+ // optional int32 sampleRate;
int32_t sample_rate = -1;
- if (item->getInt32("android.media.audiotrack.samplerate", &sample_rate)) {
+ if (item->getInt32("android.media.audiotrack.sampleRate", &sample_rate)) {
metrics_proto.set_sample_rate(sample_rate);
}
- // static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
// optional int64 channelMask;
int64_t channel_mask = -1;
- if (item->getInt64("android.media.audiotrack.channelmask", &channel_mask)) {
+ if (item->getInt64("android.media.audiotrack.channelMask", &channel_mask)) {
metrics_proto.set_channel_mask(channel_mask);
}
- // NB: These are not yet exposed as public Java API constants.
- // static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
- // optional int32 underrunframes;
+ // optional int32 underrunFrames;
int32_t underrun_frames = -1;
- if (item->getInt32("android.media.audiotrack.underrunframes", &underrun_frames)) {
+ if (item->getInt32("android.media.audiotrack.underrunFrames", &underrun_frames)) {
metrics_proto.set_underrun_frames(underrun_frames);
}
- // static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
- // optional int32 startupglitch;
+ // optional int32 glitch.startup;
int32_t startup_glitch = -1;
+ // Not currently sent from client.
if (item->getInt32("android.media.audiotrack.glitch.startup", &startup_glitch)) {
metrics_proto.set_startup_glitch(startup_glitch);
}
@@ -137,8 +132,7 @@
// log_session_id (string)
std::string logSessionId;
(void)item->getString("android.media.audiotrack.logSessionId", &logSessionId);
- const auto log_session_id =
- mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+ const auto log_session_id = mediametrics::ValidateId::get()->validateId(logSessionId);
android::util::BytesField bf_serialized( serialized.c_str(), serialized.size());
int result = android::util::stats_write(android::util::MEDIAMETRICS_AUDIOTRACK_REPORTED,
diff --git a/services/mediametrics/statsd_codec.cpp b/services/mediametrics/statsd_codec.cpp
index 46cbdc8..17a3a5f 100644
--- a/services/mediametrics/statsd_codec.cpp
+++ b/services/mediametrics/statsd_codec.cpp
@@ -34,7 +34,7 @@
#include "cleaner.h"
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -228,7 +228,7 @@
std::string sessionId;
if (item->getString("android.media.mediacodec.log-session-id", &sessionId)) {
- sessionId = mediametrics::stringutils::sanitizeLogSessionId(sessionId);
+ sessionId = mediametrics::ValidateId::get()->validateId(sessionId);
metrics_proto.set_log_session_id(sessionId);
}
AStatsEvent_writeString(event, codec.c_str());
@@ -390,6 +390,48 @@
}
AStatsEvent_writeInt32(event, qpBMaxOri);
+ // int32_t configColorStandard = -1;
+ // if (item->getInt32("android.media.mediacodec.config-color-standard", &configColorStandard)) {
+ // metrics_proto.set_config_color_standard(configColorStandard);
+ // }
+ // AStatsEvent_writeInt32(event, configColorStandard);
+
+ // int32_t configColorRange = -1;
+ // if (item->getInt32("android.media.mediacodec.config-color-range", &configColorRange)) {
+ // metrics_proto.set_config_color_range(configColorRange);
+ // }
+ // AStatsEvent_writeInt32(event, configColorRange);
+
+ // int32_t configColorTransfer = -1;
+ // if (item->getInt32("android.media.mediacodec.config-color-transfer", &configColorTransfer)) {
+ // metrics_proto.set_config_color_transfer(configColorTransfer);
+ // }
+ // AStatsEvent_writeInt32(event, configColorTransfer);
+
+ // int32_t parsedColorStandard = -1;
+ // if (item->getInt32("android.media.mediacodec.parsed-color-standard", &parsedColorStandard)) {
+ // metrics_proto.set_parsed_color_standard(parsedColorStandard);
+ // }
+ // AStatsEvent_writeInt32(event, parsedColorStandard);
+
+ // int32_t parsedColorRange = -1;
+ // if (item->getInt32("android.media.mediacodec.parsed-color-range", &parsedColorRange)) {
+ // metrics_proto.set_parsed_color_range(parsedColorRange);
+ // }
+ // AStatsEvent_writeInt32(event, parsedColorRange);
+
+ // int32_t parsedColorTransfer = -1;
+ // if (item->getInt32("android.media.mediacodec.parsed-color-transfer", &parsedColorTransfer)) {
+ // metrics_proto.set_parsed_color_transfer(parsedColorTransfer);
+ // }
+ // AStatsEvent_writeInt32(event, parsedColorTransfer);
+
+ // int32_t hdrMetadataFlags = -1;
+ // if (item->getInt32("android.media.mediacodec.hdr-metadata-flags", &hdrMetadataFlags)) {
+ // metrics_proto.set_hdr_metadata_flags(hdrMetadataFlags);
+ // }
+ // AStatsEvent_writeInt32(event, hdrMetadataFlags);
+
int err = AStatsEvent_write(event);
if (err < 0) {
ALOGE("Failed to write codec metrics to statsd (%d)", err);
diff --git a/services/mediametrics/statsd_drm.cpp b/services/mediametrics/statsd_drm.cpp
index 287fb8d..e06a605 100644
--- a/services/mediametrics/statsd_drm.cpp
+++ b/services/mediametrics/statsd_drm.cpp
@@ -171,7 +171,7 @@
std::vector<uint8_t> buf(str.length() / 4 * 3, 0);
size_t size = buf.size();
if (decodeBase64(buf.data(), &size, str.c_str()) && size <= buf.size()) {
- buf.erase(buf.begin() + size, buf.end());
+ buf.erase(buf.begin() + (ptrdiff_t)size, buf.end());
return buf;
}
return {};
diff --git a/services/mediametrics/statsd_extractor.cpp b/services/mediametrics/statsd_extractor.cpp
index bcf2e0a..a8bfeaa 100644
--- a/services/mediametrics/statsd_extractor.cpp
+++ b/services/mediametrics/statsd_extractor.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -86,7 +86,7 @@
std::string log_session_id;
if (item->getString("android.media.mediaextractor.logSessionId", &log_session_id)) {
- log_session_id = mediametrics::stringutils::sanitizeLogSessionId(log_session_id);
+ log_session_id = mediametrics::ValidateId::get()->validateId(log_session_id);
metrics_proto.set_log_session_id(log_session_id);
}
diff --git a/services/mediametrics/statsd_mediaparser.cpp b/services/mediametrics/statsd_mediaparser.cpp
index 921b320..67ca874b 100644
--- a/services/mediametrics/statsd_mediaparser.cpp
+++ b/services/mediametrics/statsd_mediaparser.cpp
@@ -31,7 +31,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/enums/stats/mediametrics/mediametrics.pb.h"
#include "iface_statsd.h"
@@ -81,7 +81,7 @@
std::string logSessionId;
item->getString("android.media.mediaparser.logSessionId", &logSessionId);
- logSessionId = mediametrics::stringutils::sanitizeLogSessionId(logSessionId);
+ logSessionId = mediametrics::ValidateId::get()->validateId(logSessionId);
int result = android::util::stats_write(android::util::MEDIAMETRICS_MEDIAPARSER_REPORTED,
timestamp_nanos,
diff --git a/services/mediametrics/statsd_recorder.cpp b/services/mediametrics/statsd_recorder.cpp
index b29ad73..5f54a68 100644
--- a/services/mediametrics/statsd_recorder.cpp
+++ b/services/mediametrics/statsd_recorder.cpp
@@ -32,7 +32,7 @@
#include <statslog.h>
#include "MediaMetricsService.h"
-#include "StringUtils.h"
+#include "ValidateId.h"
#include "frameworks/proto_logging/stats/message/mediametrics_message.pb.h"
#include "iface_statsd.h"
@@ -59,7 +59,7 @@
// string kRecorderLogSessionId = "android.media.mediarecorder.log-session-id";
std::string log_session_id;
if (item->getString("android.media.mediarecorder.log-session-id", &log_session_id)) {
- log_session_id = mediametrics::stringutils::sanitizeLogSessionId(log_session_id);
+ log_session_id = mediametrics::ValidateId::get()->validateId(log_session_id);
metrics_proto.set_log_session_id(log_session_id);
}
// string kRecorderAudioMime = "android.media.mediarecorder.audio.mime";
diff --git a/services/mediametrics/tests/mediametrics_tests.cpp b/services/mediametrics/tests/mediametrics_tests.cpp
index 2336d6f..102700a 100644
--- a/services/mediametrics/tests/mediametrics_tests.cpp
+++ b/services/mediametrics/tests/mediametrics_tests.cpp
@@ -28,6 +28,7 @@
#include "AudioTypes.h"
#include "StringUtils.h"
+#include "ValidateId.h"
using namespace android;
@@ -1127,3 +1128,169 @@
validId2[3] = '!';
ASSERT_EQ("", mediametrics::stringutils::sanitizeLogSessionId(validId2));
}
+
+TEST(mediametrics_tests, LruSet) {
+ constexpr size_t LRU_SET_SIZE = 2;
+ mediametrics::LruSet<std::string> lruSet(LRU_SET_SIZE);
+
+ // test adding a couple strings.
+ lruSet.add("abc");
+ ASSERT_EQ(1u, lruSet.size());
+ ASSERT_TRUE(lruSet.check("abc"));
+ lruSet.add("def");
+ ASSERT_EQ(2u, lruSet.size());
+
+ // now adding the third string causes eviction of the oldest.
+ lruSet.add("ghi");
+ ASSERT_FALSE(lruSet.check("abc"));
+ ASSERT_TRUE(lruSet.check("ghi"));
+ ASSERT_TRUE(lruSet.check("def")); // "def" is most recent.
+ ASSERT_EQ(2u, lruSet.size()); // "abc" is correctly discarded.
+
+ // adding another string will evict the oldest.
+ lruSet.add("foo");
+ ASSERT_FALSE(lruSet.check("ghi")); // note: "ghi" discarded when "foo" added.
+ ASSERT_TRUE(lruSet.check("foo"));
+ ASSERT_TRUE(lruSet.check("def"));
+
+ // manual removing of a string works, too.
+ ASSERT_TRUE(lruSet.remove("def"));
+ ASSERT_FALSE(lruSet.check("def")); // we manually removed "def".
+ ASSERT_TRUE(lruSet.check("foo")); // "foo" is still there.
+ ASSERT_EQ(1u, lruSet.size());
+
+ // you can't remove a string that has not been added.
+ ASSERT_FALSE(lruSet.remove("bar")); // Note: "bar" doesn't exist, so remove returns false.
+ ASSERT_EQ(1u, lruSet.size());
+
+ lruSet.add("foo"); // adding "foo" (which already exists) doesn't change size.
+ ASSERT_EQ(1u, lruSet.size());
+ lruSet.add("bar"); // add "bar"
+ ASSERT_EQ(2u, lruSet.size());
+ lruSet.add("glorp"); // add "glorp" evicts "foo".
+ ASSERT_EQ(2u, lruSet.size());
+ ASSERT_TRUE(lruSet.check("bar"));
+ ASSERT_TRUE(lruSet.check("glorp"));
+ ASSERT_FALSE(lruSet.check("foo"));
+}
+
+TEST(mediametrics_tests, LruSet0) {
+ constexpr size_t LRU_SET_SIZE = 0;
+ mediametrics::LruSet<std::string> lruSet(LRU_SET_SIZE);
+
+ lruSet.add("a");
+ ASSERT_EQ(0u, lruSet.size());
+ ASSERT_FALSE(lruSet.check("a"));
+ ASSERT_FALSE(lruSet.remove("a")); // never added.
+ ASSERT_EQ(0u, lruSet.size());
+}
+
+// Returns a 16 Base64Url string representing the decimal representation of value
+// (with leading 0s) e.g. 0000000000000000, 0000000000000001, 0000000000000002, ...
+static std::string generateId(size_t value)
+{
+ char id[16 + 1]; // to be filled with 16 Base64Url chars (and zero termination)
+ char *sptr = id + 16; // start at the end.
+ *sptr-- = 0; // zero terminate.
+ // output the digits from least significant to most significant.
+ while (value) {
+ *sptr-- = value % 10;
+ value /= 10;
+ }
+ // add leading 0's
+ while (sptr > id) {
+ *sptr-- = '0';
+ }
+ return std::string(id);
+}
+
+TEST(mediametrics_tests, ValidateId) {
+ constexpr size_t LRU_SET_SIZE = 3;
+ constexpr size_t IDS = 10;
+ static_assert(IDS > LRU_SET_SIZE); // IDS must be greater than LRU_SET_SIZE.
+ mediametrics::ValidateId validateId(LRU_SET_SIZE);
+
+
+ // register IDs as integer strings counting from 0.
+ for (size_t i = 0; i < IDS; ++i) {
+ validateId.registerId(generateId(i));
+ }
+
+ // only the last LRU_SET_SIZE exist.
+ for (size_t i = 0; i < IDS - LRU_SET_SIZE; ++i) {
+ ASSERT_EQ("", validateId.validateId(generateId(i)));
+ }
+ for (size_t i = IDS - LRU_SET_SIZE; i < IDS; ++i) {
+ const std::string id = generateId(i);
+ ASSERT_EQ(id, validateId.validateId(id));
+ }
+}
+
+TEST(mediametrics_tests, StatusConversion) {
+ constexpr status_t statuses[] = {
+ NO_ERROR,
+ BAD_VALUE,
+ DEAD_OBJECT,
+ NO_MEMORY,
+ PERMISSION_DENIED,
+ INVALID_OPERATION,
+ WOULD_BLOCK,
+ UNKNOWN_ERROR,
+ };
+
+ auto roundTrip = [](status_t status) {
+ return android::mediametrics::statusStringToStatus(
+ android::mediametrics::statusToStatusString(status));
+ };
+
+ // Primary status error categories.
+ for (const auto status : statuses) {
+ ASSERT_EQ(status, roundTrip(status));
+ }
+
+ // Status errors specially considered.
+ ASSERT_EQ(DEAD_OBJECT, roundTrip(FAILED_TRANSACTION));
+}
+
+TEST(mediametrics_tests, HeatMap) {
+ constexpr size_t SIZE = 2;
+ android::mediametrics::HeatMap heatMap{SIZE};
+ constexpr uid_t UID = 0;
+ constexpr int32_t SUBCODE = 1;
+
+ ASSERT_EQ((size_t)0, heatMap.size());
+ heatMap.add("someKey", "someSuffix", "someEvent",
+ AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+ ASSERT_EQ((size_t)1, heatMap.size());
+ heatMap.add("someKey", "someSuffix", "someEvent",
+ AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+ heatMap.add("someKey", "someSuffix", "anotherEvent",
+ AMEDIAMETRICS_PROP_STATUS_VALUE_ARGUMENT, UID, "message", SUBCODE);
+ ASSERT_EQ((size_t)1, heatMap.size());
+ heatMap.add("anotherKey", "someSuffix", "someEvent",
+ AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+ ASSERT_EQ((size_t)2, heatMap.size());
+ ASSERT_EQ((size_t)0, heatMap.rejected());
+
+ heatMap.add("thirdKey", "someSuffix", "someEvent",
+ AMEDIAMETRICS_PROP_STATUS_VALUE_OK, UID, "message", SUBCODE);
+ ASSERT_EQ((size_t)2, heatMap.size());
+ ASSERT_EQ((size_t)1, heatMap.rejected());
+
+ android::mediametrics::HeatData heatData = heatMap.getData("someKey");
+ ASSERT_EQ((size_t)2, heatData.size());
+ auto count = heatData.heatCount();
+ ASSERT_EQ((size_t)3, count.size()); // pairs in order { total, "anotherEvent", "someEvent" }
+ // check total value
+ ASSERT_EQ((size_t)2, count[0].first); // OK
+ ASSERT_EQ((size_t)1, count[0].second); // ERROR;
+ // first key "anotherEvent"
+ ASSERT_EQ((size_t)0, count[1].first); // OK
+ ASSERT_EQ((size_t)1, count[1].second); // ERROR;
+ // second key "someEvent"
+ ASSERT_EQ((size_t)2, count[2].first); // OK
+ ASSERT_EQ((size_t)0, count[2].second); // ERROR;
+
+ heatMap.clear();
+ ASSERT_EQ((size_t)0, heatMap.size());
+}
diff --git a/services/mediaresourcemanager/ResourceManagerService.cpp b/services/mediaresourcemanager/ResourceManagerService.cpp
index 953686b..0167cba 100644
--- a/services/mediaresourcemanager/ResourceManagerService.cpp
+++ b/services/mediaresourcemanager/ResourceManagerService.cpp
@@ -671,11 +671,11 @@
if (clients.size() == 0) {
// if we are here, run the fourth pass to free one codec with the different type.
if (secureCodec != NULL) {
- MediaResource temp(MediaResource::Type::kNonSecureCodec, 1);
+ MediaResource temp(MediaResource::Type::kNonSecureCodec, secureCodec->subType, 1);
getClientForResource_l(callingPid, &temp, &clients);
}
if (nonSecureCodec != NULL) {
- MediaResource temp(MediaResource::Type::kSecureCodec, 1);
+ MediaResource temp(MediaResource::Type::kSecureCodec, nonSecureCodec->subType, 1);
getClientForResource_l(callingPid, &temp, &clients);
}
}
diff --git a/services/oboeservice/AAudioServiceEndpoint.cpp b/services/oboeservice/AAudioServiceEndpoint.cpp
index 13dd3d3..390cd5c 100644
--- a/services/oboeservice/AAudioServiceEndpoint.cpp
+++ b/services/oboeservice/AAudioServiceEndpoint.cpp
@@ -59,6 +59,7 @@
result << " Device Id: " << getDeviceId() << "\n";
result << " Sample Rate: " << getSampleRate() << "\n";
result << " Channel Count: " << getSamplesPerFrame() << "\n";
+ result << " Channel Mask: 0x" << std::hex << getChannelMask() << std::dec << "\n";
result << " Format: " << getFormat() << "\n";
result << " Frames Per Burst: " << mFramesPerBurst << "\n";
result << " Usage: " << getUsage() << "\n";
@@ -164,6 +165,10 @@
configuration.getSamplesPerFrame() != getSamplesPerFrame()) {
return false;
}
+ if (configuration.getChannelMask() != AAUDIO_UNSPECIFIED &&
+ configuration.getChannelMask() != getChannelMask()) {
+ return false;
+ }
return true;
}
@@ -188,7 +193,9 @@
if (direction == AAUDIO_DIRECTION_OUTPUT) {
flags = static_cast<audio_flags_mask_t>(AUDIO_FLAG_LOW_LATENCY
| AAudioConvert_allowCapturePolicyToAudioFlagsMask(
- params->getAllowedCapturePolicy()));
+ params->getAllowedCapturePolicy(),
+ params->getSpatializationBehavior(),
+ params->isContentSpatialized()));
} else {
flags = static_cast<audio_flags_mask_t>(AUDIO_FLAG_LOW_LATENCY
| AAudioConvert_privacySensitiveToAudioFlagsMask(params->isPrivacySensitive()));
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index a08098c..35a0890 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -126,20 +126,15 @@
}
config.sample_rate = aaudioSampleRate;
- int32_t aaudioSamplesPerFrame = getSamplesPerFrame();
-
const aaudio_direction_t direction = getDirection();
+ config.channel_mask = AAudio_getChannelMaskForOpen(
+ getChannelMask(), getSamplesPerFrame(), direction == AAUDIO_DIRECTION_INPUT);
+
if (direction == AAUDIO_DIRECTION_OUTPUT) {
- config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
- ? AUDIO_CHANNEL_OUT_STEREO
- : audio_channel_out_mask_from_count(aaudioSamplesPerFrame);
mHardwareTimeOffsetNanos = OUTPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at DAC later
} else if (direction == AAUDIO_DIRECTION_INPUT) {
- config.channel_mask = (aaudioSamplesPerFrame == AAUDIO_UNSPECIFIED)
- ? AUDIO_CHANNEL_IN_STEREO
- : audio_channel_in_mask_from_count(aaudioSamplesPerFrame);
mHardwareTimeOffsetNanos = INPUT_ESTIMATED_HARDWARE_OFFSET_NANOS; // frames at ADC earlier
} else {
@@ -225,9 +220,9 @@
}
// Get information about the stream and pass it back to the caller.
- setSamplesPerFrame((direction == AAUDIO_DIRECTION_OUTPUT)
- ? audio_channel_count_from_out_mask(config.channel_mask)
- : audio_channel_count_from_in_mask(config.channel_mask));
+ setChannelMask(AAudioConvert_androidToAAudioChannelMask(
+ config.channel_mask, getDirection() == AAUDIO_DIRECTION_INPUT,
+ AAudio_isChannelIndexMask(config.channel_mask)));
// AAudio creates a copy of this FD and retains ownership of the copy.
// Assume that AudioFlinger will close the original shared_memory_fd.
@@ -247,9 +242,9 @@
setFormat(config.format);
setSampleRate(config.sample_rate);
- ALOGD("%s() actual rate = %d, channels = %d"
- ", deviceId = %d, capacity = %d\n",
- __func__, getSampleRate(), getSamplesPerFrame(), deviceId, getBufferCapacity());
+ ALOGD("%s() actual rate = %d, channels = %d channelMask = %#x, deviceId = %d, capacity = %d\n",
+ __func__, getSampleRate(), getSamplesPerFrame(), getChannelMask(),
+ deviceId, getBufferCapacity());
ALOGD("%s() format = 0x%08x, frame size = %d, burst size = %d",
__func__, getFormat(), calculateBytesPerFrame(), mFramesPerBurst);
diff --git a/services/oboeservice/AAudioServiceEndpointShared.cpp b/services/oboeservice/AAudioServiceEndpointShared.cpp
index 5fbcadb..5af0a91 100644
--- a/services/oboeservice/AAudioServiceEndpointShared.cpp
+++ b/services/oboeservice/AAudioServiceEndpointShared.cpp
@@ -78,7 +78,7 @@
result = mStreamInternal->open(builder);
setSampleRate(mStreamInternal->getSampleRate());
- setSamplesPerFrame(mStreamInternal->getSamplesPerFrame());
+ setChannelMask(mStreamInternal->getChannelMask());
setDeviceId(mStreamInternal->getDeviceId());
setSessionId(mStreamInternal->getSessionId());
setFormat(AUDIO_FORMAT_PCM_FLOAT); // force for mixer
diff --git a/services/oboeservice/AAudioServiceStreamBase.cpp b/services/oboeservice/AAudioServiceStreamBase.cpp
index 34ddd4d..4ffc127 100644
--- a/services/oboeservice/AAudioServiceStreamBase.cpp
+++ b/services/oboeservice/AAudioServiceStreamBase.cpp
@@ -73,7 +73,8 @@
}
std::string AAudioServiceStreamBase::dumpHeader() {
- return std::string(" T Handle UId Port Run State Format Burst Chan Capacity");
+ return std::string(
+ " T Handle UId Port Run State Format Burst Chan Mask Capacity");
}
std::string AAudioServiceStreamBase::dump() const {
@@ -88,6 +89,7 @@
result << std::setw(7) << getFormat();
result << std::setw(6) << mFramesPerBurst;
result << std::setw(5) << getSamplesPerFrame();
+ result << std::setw(8) << std::hex << getChannelMask() << std::dec;
result << std::setw(9) << getBufferCapacity();
return result.str();
diff --git a/services/oboeservice/AAudioServiceStreamShared.cpp b/services/oboeservice/AAudioServiceStreamShared.cpp
index c665cda..ad06d97 100644
--- a/services/oboeservice/AAudioServiceStreamShared.cpp
+++ b/services/oboeservice/AAudioServiceStreamShared.cpp
@@ -164,11 +164,11 @@
goto error;
}
- setSamplesPerFrame(configurationInput.getSamplesPerFrame());
- if (getSamplesPerFrame() == AAUDIO_UNSPECIFIED) {
- setSamplesPerFrame(endpoint->getSamplesPerFrame());
+ setChannelMask(configurationInput.getChannelMask());
+ if (getChannelMask() == AAUDIO_UNSPECIFIED) {
+ setChannelMask(endpoint->getChannelMask());
} else if (getSamplesPerFrame() != endpoint->getSamplesPerFrame()) {
- ALOGD("%s() mSamplesPerFrame = %d, need %d",
+ ALOGD("%s() mSamplesPerFrame = %#x, need %#x",
__func__, getSamplesPerFrame(), endpoint->getSamplesPerFrame());
result = AAUDIO_ERROR_OUT_OF_RANGE;
goto error;
diff --git a/services/oboeservice/fuzzer/README.md b/services/oboeservice/fuzzer/README.md
index 00b85df..ae7af3eb 100644
--- a/services/oboeservice/fuzzer/README.md
+++ b/services/oboeservice/fuzzer/README.md
@@ -15,7 +15,7 @@
4. InService
5. DeviceId
6. SampleRate
-7. SamplesPerFrame
+7. ChannelMask
8. Direction
9. SharingMode
10. Usage
@@ -31,7 +31,7 @@
| `InService` | `bool` | Value obtained from FuzzedDataProvider |
| `DeviceId` | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
| `SampleRate` | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
-| `SamplesPerFrame` | `INT32_MIN` to `INT32_MAX` | Value obtained from FuzzedDataProvider |
+| `ChannelMask` | `AAUDIO_UNSPECIFIED`, `AAUDIO_CHANNEL_INDEX_MASK_1`, `AAUDIO_CHANNEL_INDEX_MASK_2`, `AAUDIO_CHANNEL_INDEX_MASK_3`, `AAUDIO_CHANNEL_INDEX_MASK_4`, `AAUDIO_CHANNEL_INDEX_MASK_5`, `AAUDIO_CHANNEL_INDEX_MASK_6`, `AAUDIO_CHANNEL_INDEX_MASK_7`, `AAUDIO_CHANNEL_INDEX_MASK_8`, `AAUDIO_CHANNEL_INDEX_MASK_9`, `AAUDIO_CHANNEL_INDEX_MASK_10`, `AAUDIO_CHANNEL_INDEX_MASK_11`, `AAUDIO_CHANNEL_INDEX_MASK_12`, `AAUDIO_CHANNEL_INDEX_MASK_13`, `AAUDIO_CHANNEL_INDEX_MASK_14`, `AAUDIO_CHANNEL_INDEX_MASK_15`, `AAUDIO_CHANNEL_INDEX_MASK_16`, `AAUDIO_CHANNEL_INDEX_MASK_17`, `AAUDIO_CHANNEL_INDEX_MASK_18`, `AAUDIO_CHANNEL_INDEX_MASK_19`, `AAUDIO_CHANNEL_INDEX_MASK_20`, `AAUDIO_CHANNEL_INDEX_MASK_21`, `AAUDIO_CHANNEL_INDEX_MASK_22`, `AAUDIO_CHANNEL_INDEX_MASK_23`, `AAUDIO_CHANNEL_INDEX_MASK_24`, `AAUDIO_CHANNEL_MONO`, `AAUDIO_CHANNEL_STEREO`, `AAUDIO_CHANNEL_FRONT_BACK`, `AAUDIO_CHANNEL_2POINT0POINT2`, `AAUDIO_CHANNEL_2POINT1POINT2`, `AAUDIO_CHANNEL_3POINT0POINT2`, `AAUDIO_CHANNEL_3POINT1POINT2`, `AAUDIO_CHANNEL_5POINT1`, `AAUDIO_CHANNEL_MONO`, `AAUDIO_CHANNEL_STEREO`, `AAUDIO_CHANNEL_2POINT1`, `AAUDIO_CHANNEL_TRI`, `AAUDIO_CHANNEL_TRI_BACK`, `AAUDIO_CHANNEL_3POINT1`, `AAUDIO_CHANNEL_2POINT0POINT2`, `AAUDIO_CHANNEL_2POINT1POINT2`, `AAUDIO_CHANNEL_3POINT0POINT2`, `AAUDIO_CHANNEL_3POINT1POINT2`, `AAUDIO_CHANNEL_QUAD`, `AAUDIO_CHANNEL_QUAD_SIDE`, `AAUDIO_CHANNEL_SURROUND`, `AAUDIO_CHANNEL_PENTA`, `AAUDIO_CHANNEL_5POINT1`, `AAUDIO_CHANNEL_5POINT1_SIDE`, `AAUDIO_CHANNEL_5POINT1POINT2`, `AAUDIO_CHANNEL_5POINT1POINT4`, `AAUDIO_CHANNEL_6POINT1`, `AAUDIO_CHANNEL_7POINT1`, `AAUDIO_CHANNEL_7POINT1POINT2`, `AAUDIO_CHANNEL_7POINT1POINT4`, `AAUDIO_CHANNEL_9POINT1POINT4`, `AAUDIO_CHANNEL_9POINT1POINT6` | Value obtained from FuzzedDataProvider |
| `Direction` | `AAUDIO_DIRECTION_OUTPUT`, `AAUDIO_DIRECTION_INPUT` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
| `SharingMode` | `AAUDIO_SHARING_MODE_EXCLUSIVE`, `AAUDIO_SHARING_MODE_SHARED` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
| `Usage` | `AAUDIO_USAGE_MEDIA`, `AAUDIO_USAGE_VOICE_COMMUNICATION`, `AAUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING`, `AAUDIO_USAGE_ALARM`, `AAUDIO_USAGE_NOTIFICATION`, `AAUDIO_USAGE_NOTIFICATION_RINGTONE`, `AAUDIO_USAGE_NOTIFICATION_EVENT`, `AAUDIO_USAGE_ASSISTANCE_ACCESSIBILITY`, `AAUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE`, `AAUDIO_USAGE_ASSISTANCE_SONIFICATION`, `AAUDIO_USAGE_GAME`, `AAUDIO_USAGE_ASSISTANT`, `AAUDIO_SYSTEM_USAGE_EMERGENCY`, `AAUDIO_SYSTEM_USAGE_SAFETY`, `AAUDIO_SYSTEM_USAGE_VEHICLE_STATUS`, `AAUDIO_SYSTEM_USAGE_ANNOUNCEMENT` | Value chosen from valid values by obtaining index from FuzzedDataProvider |
diff --git a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
index 4bc661c..17e8d36 100644
--- a/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
+++ b/services/oboeservice/fuzzer/oboeservice_fuzzer.cpp
@@ -68,10 +68,71 @@
AAUDIO_INPUT_PRESET_UNPROCESSED, AAUDIO_INPUT_PRESET_VOICE_PERFORMANCE,
};
+aaudio_channel_mask_t kAAudioChannelMasks[] = {
+ AAUDIO_UNSPECIFIED,
+ AAUDIO_CHANNEL_INDEX_MASK_1,
+ AAUDIO_CHANNEL_INDEX_MASK_2,
+ AAUDIO_CHANNEL_INDEX_MASK_3,
+ AAUDIO_CHANNEL_INDEX_MASK_4,
+ AAUDIO_CHANNEL_INDEX_MASK_5,
+ AAUDIO_CHANNEL_INDEX_MASK_6,
+ AAUDIO_CHANNEL_INDEX_MASK_7,
+ AAUDIO_CHANNEL_INDEX_MASK_8,
+ AAUDIO_CHANNEL_INDEX_MASK_9,
+ AAUDIO_CHANNEL_INDEX_MASK_10,
+ AAUDIO_CHANNEL_INDEX_MASK_11,
+ AAUDIO_CHANNEL_INDEX_MASK_12,
+ AAUDIO_CHANNEL_INDEX_MASK_13,
+ AAUDIO_CHANNEL_INDEX_MASK_14,
+ AAUDIO_CHANNEL_INDEX_MASK_15,
+ AAUDIO_CHANNEL_INDEX_MASK_16,
+ AAUDIO_CHANNEL_INDEX_MASK_17,
+ AAUDIO_CHANNEL_INDEX_MASK_18,
+ AAUDIO_CHANNEL_INDEX_MASK_19,
+ AAUDIO_CHANNEL_INDEX_MASK_20,
+ AAUDIO_CHANNEL_INDEX_MASK_21,
+ AAUDIO_CHANNEL_INDEX_MASK_22,
+ AAUDIO_CHANNEL_INDEX_MASK_23,
+ AAUDIO_CHANNEL_INDEX_MASK_24,
+ AAUDIO_CHANNEL_MONO,
+ AAUDIO_CHANNEL_STEREO,
+ AAUDIO_CHANNEL_FRONT_BACK,
+ AAUDIO_CHANNEL_2POINT0POINT2,
+ AAUDIO_CHANNEL_2POINT1POINT2,
+ AAUDIO_CHANNEL_3POINT0POINT2,
+ AAUDIO_CHANNEL_3POINT1POINT2,
+ AAUDIO_CHANNEL_5POINT1,
+ AAUDIO_CHANNEL_MONO,
+ AAUDIO_CHANNEL_STEREO,
+ AAUDIO_CHANNEL_2POINT1,
+ AAUDIO_CHANNEL_TRI,
+ AAUDIO_CHANNEL_TRI_BACK,
+ AAUDIO_CHANNEL_3POINT1,
+ AAUDIO_CHANNEL_2POINT0POINT2,
+ AAUDIO_CHANNEL_2POINT1POINT2,
+ AAUDIO_CHANNEL_3POINT0POINT2,
+ AAUDIO_CHANNEL_3POINT1POINT2,
+ AAUDIO_CHANNEL_QUAD,
+ AAUDIO_CHANNEL_QUAD_SIDE,
+ AAUDIO_CHANNEL_SURROUND,
+ AAUDIO_CHANNEL_PENTA,
+ AAUDIO_CHANNEL_5POINT1,
+ AAUDIO_CHANNEL_5POINT1_SIDE,
+ AAUDIO_CHANNEL_5POINT1POINT2,
+ AAUDIO_CHANNEL_5POINT1POINT4,
+ AAUDIO_CHANNEL_6POINT1,
+ AAUDIO_CHANNEL_7POINT1,
+ AAUDIO_CHANNEL_7POINT1POINT2,
+ AAUDIO_CHANNEL_7POINT1POINT4,
+ AAUDIO_CHANNEL_9POINT1POINT4,
+ AAUDIO_CHANNEL_9POINT1POINT6,
+};
+
const size_t kNumAAudioFormats = std::size(kAAudioFormats);
const size_t kNumAAudioUsages = std::size(kAAudioUsages);
const size_t kNumAAudioContentTypes = std::size(kAAudioContentTypes);
const size_t kNumAAudioInputPresets = std::size(kAAudioInputPresets);
+const size_t kNumAAudioChannelMasks = std::size(kAAudioChannelMasks);
class FuzzAAudioClient : public virtual RefBase, public AAudioServiceInterface {
public:
@@ -305,7 +366,11 @@
request.getConfiguration().setDeviceId(fdp.ConsumeIntegral<int32_t>());
request.getConfiguration().setSampleRate(fdp.ConsumeIntegral<int32_t>());
- request.getConfiguration().setSamplesPerFrame(fdp.ConsumeIntegral<int32_t>());
+ request.getConfiguration().setChannelMask((aaudio_channel_mask_t)(
+ fdp.ConsumeBool()
+ ? fdp.ConsumeIntegral<int32_t>()
+ : kAAudioChannelMasks[fdp.ConsumeIntegralInRange<int32_t>(
+ 0, kNumAAudioChannelMasks - 1)]));
request.getConfiguration().setDirection(
fdp.ConsumeBool() ? fdp.ConsumeIntegral<int32_t>()
: (fdp.ConsumeBool() ? AAUDIO_DIRECTION_OUTPUT : AAUDIO_DIRECTION_INPUT));
diff --git a/services/tuner/TunerFilter.cpp b/services/tuner/TunerFilter.cpp
index 039fd31..ca82526 100644
--- a/services/tuner/TunerFilter.cpp
+++ b/services/tuner/TunerFilter.cpp
@@ -301,7 +301,7 @@
break;
}
case TunerFilterSettings::isPassthrough: {
- ip.filterSettings.bPassthrough(tunerSettings.isPassthrough);
+ ip.filterSettings.bPassthrough(tunerSettings.get<TunerFilterSettings::isPassthrough>());
break;
}
default: {
@@ -345,7 +345,8 @@
break;
}
case TunerFilterSettings::isPassthrough: {
- tlv.filterSettings.bPassthrough(tunerSettings.isPassthrough);
+ tlv.filterSettings.bPassthrough(
+ tunerSettings.get<TunerFilterSettings::isPassthrough>());
break;
}
default: {