Merge "CCodecBuffers: fix null buffer PCM conversion" into tm-dev am: 866b66391d am: c653c68195
Original change: https://googleplex-android-review.googlesource.com/c/platform/frameworks/av/+/18566311
Change-Id: Id884d93a34686df068e12bf75569bd6ba23d4d0a
Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
diff --git a/PREUPLOAD.cfg b/PREUPLOAD.cfg
index 716b550..1f7083b 100644
--- a/PREUPLOAD.cfg
+++ b/PREUPLOAD.cfg
@@ -11,3 +11,4 @@
clang_format = --commit ${PREUPLOAD_COMMIT} --style file --extensions c,h,cc,cpp
media/libmediatranscoding/
services/mediatranscoding/
+ media/libaudioclient/tests/
diff --git a/apex/manifest.json b/apex/manifest.json
index 4b75b04..932d28e 100644
--- a/apex/manifest.json
+++ b/apex/manifest.json
@@ -1,6 +1,6 @@
{
"name": "com.android.media",
- "version": 339990000,
+ "version": 990090000,
"requireNativeLibs": [
"libandroid.so",
"libbinder_ndk.so",
diff --git a/apex/manifest_codec.json b/apex/manifest_codec.json
index fbcbb69..f42f5a4 100644
--- a/apex/manifest_codec.json
+++ b/apex/manifest_codec.json
@@ -1,6 +1,6 @@
{
"name": "com.android.media.swcodec",
- "version": 339990000,
+ "version": 990090000,
"requireNativeLibs": [
":sphal"
]
diff --git a/camera/CaptureResult.cpp b/camera/CaptureResult.cpp
index be47898..bb880d1 100644
--- a/camera/CaptureResult.cpp
+++ b/camera/CaptureResult.cpp
@@ -52,7 +52,10 @@
parcel->readInt64(&lastCompletedRegularFrameNumber);
parcel->readInt64(&lastCompletedReprocessFrameNumber);
parcel->readInt64(&lastCompletedZslFrameNumber);
-
+ parcel->readBool(&hasReadoutTimestamp);
+ if (hasReadoutTimestamp) {
+ parcel->readInt64(&readoutTimestamp);
+ }
return OK;
}
@@ -82,6 +85,10 @@
parcel->writeInt64(lastCompletedRegularFrameNumber);
parcel->writeInt64(lastCompletedReprocessFrameNumber);
parcel->writeInt64(lastCompletedZslFrameNumber);
+ parcel->writeBool(hasReadoutTimestamp);
+ if (hasReadoutTimestamp) {
+ parcel->writeInt64(readoutTimestamp);
+ }
return OK;
}
diff --git a/camera/VendorTagDescriptor.cpp b/camera/VendorTagDescriptor.cpp
index b37803a..151b653 100644
--- a/camera/VendorTagDescriptor.cpp
+++ b/camera/VendorTagDescriptor.cpp
@@ -598,7 +598,6 @@
status_t VendorTagDescriptor::setAsGlobalVendorTagDescriptor(const sp<VendorTagDescriptor>& desc) {
status_t res = OK;
Mutex::Autolock al(sLock);
- sGlobalVendorTagDescriptor = desc;
vendor_tag_ops_t* opsPtr = NULL;
if (desc != NULL) {
@@ -613,6 +612,9 @@
ALOGE("%s: Could not set vendor tag descriptor, received error %s (%d)."
, __FUNCTION__, strerror(-res), res);
}
+
+ sGlobalVendorTagDescriptor = desc;
+
return res;
}
@@ -631,7 +633,6 @@
const sp<VendorTagDescriptorCache>& cache) {
status_t res = OK;
Mutex::Autolock al(sLock);
- sGlobalVendorTagDescriptorCache = cache;
struct vendor_tag_cache_ops* opsPtr = NULL;
if (cache != NULL) {
@@ -646,6 +647,9 @@
ALOGE("%s: Could not set vendor tag cache, received error %s (%d)."
, __FUNCTION__, strerror(-res), res);
}
+
+ sGlobalVendorTagDescriptorCache = cache;
+
return res;
}
diff --git a/camera/cameraserver/Android.bp b/camera/cameraserver/Android.bp
index 22bb234..094a3c1 100644
--- a/camera/cameraserver/Android.bp
+++ b/camera/cameraserver/Android.bp
@@ -47,7 +47,6 @@
"android.hardware.camera.device@1.0",
"android.hardware.camera.device@3.2",
"android.hardware.camera.device@3.4",
- "android.hardware.camera.device@3.8",
],
compile_multilib: "first",
cflags: [
diff --git a/camera/include/camera/CaptureResult.h b/camera/include/camera/CaptureResult.h
index f163c1e..de534ab 100644
--- a/camera/include/camera/CaptureResult.h
+++ b/camera/include/camera/CaptureResult.h
@@ -103,6 +103,17 @@
*/
int64_t lastCompletedZslFrameNumber;
+ /**
+ * Whether the readoutTimestamp variable is valid and should be used.
+ */
+ bool hasReadoutTimestamp;
+
+ /**
+ * The readout timestamp of the capture. Its value is equal to the
+ * start-of-exposure timestamp plus the exposure time (and a possible fixed
+ * offset due to sensor crop).
+ */
+ int64_t readoutTimestamp;
/**
* Constructor initializes object as invalid by setting requestId to be -1.
@@ -118,7 +129,9 @@
errorPhysicalCameraId(),
lastCompletedRegularFrameNumber(-1),
lastCompletedReprocessFrameNumber(-1),
- lastCompletedZslFrameNumber(-1) {
+ lastCompletedZslFrameNumber(-1),
+ hasReadoutTimestamp(false),
+ readoutTimestamp(0) {
}
/**
diff --git a/camera/include/camera/camera2/OutputConfiguration.h b/camera/include/camera/camera2/OutputConfiguration.h
index b842885..b7c7f7f 100644
--- a/camera/include/camera/camera2/OutputConfiguration.h
+++ b/camera/include/camera/camera2/OutputConfiguration.h
@@ -43,7 +43,9 @@
TIMESTAMP_BASE_SENSOR = 1,
TIMESTAMP_BASE_MONOTONIC = 2,
TIMESTAMP_BASE_REALTIME = 3,
- TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED = 4
+ TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED = 4,
+ TIMESTAMP_BASE_READOUT_SENSOR = 5,
+ TIMESTAMP_BASE_MAX = TIMESTAMP_BASE_READOUT_SENSOR,
};
enum MirrorModeType {
MIRROR_MODE_AUTO = 0,
diff --git a/drm/libmediadrm/DrmMetricsConsumer.cpp b/drm/libmediadrm/DrmMetricsConsumer.cpp
index c06f09b..fd095b7 100644
--- a/drm/libmediadrm/DrmMetricsConsumer.cpp
+++ b/drm/libmediadrm/DrmMetricsConsumer.cpp
@@ -42,7 +42,7 @@
}
return type_names[attribute];
}
-
+
static const char *type_names[] = {"PROVISION_REQUIRED", "KEY_NEEDED",
"KEY_EXPIRED", "VENDOR_DEFINED",
"SESSION_RECLAIMED"};
diff --git a/drm/mediadrm/plugins/clearkey/aidl/Android.bp b/drm/mediadrm/plugins/clearkey/aidl/Android.bp
index 2d1f741..397e4c6 100644
--- a/drm/mediadrm/plugins/clearkey/aidl/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/aidl/Android.bp
@@ -69,3 +69,78 @@
"android.hardware.drm-service.clearkey",
],
}
+
+cc_defaults {
+ name: "fuzz_aidl_clearkey_service_defaults",
+
+ srcs: [
+ "CreatePluginFactories.cpp",
+ "CryptoPlugin.cpp",
+ "DrmFactory.cpp",
+ "DrmPlugin.cpp",
+ ],
+
+ relative_install_path: "hw",
+
+ cflags: ["-Wall", "-Werror", "-Wthread-safety"],
+
+ include_dirs: ["frameworks/av/include"],
+
+ shared_libs: [
+ "libbase",
+ "libbinder_ndk",
+ "libcrypto",
+ "liblog",
+ "libprotobuf-cpp-lite",
+ "libutils",
+ "android.hardware.drm-V1-ndk",
+ ],
+
+ static_libs: [
+ "android.hardware.common-V2-ndk",
+ "libclearkeybase_fuzz",
+ ],
+
+ local_include_dirs: ["include"],
+
+ sanitize: {
+ integer_overflow: true,
+ },
+}
+
+cc_fuzz {
+ name: "android.hardware.drm-service.clearkey.aidl_fuzzer",
+ defaults: [
+ "fuzz_aidl_clearkey_service_defaults",
+ ],
+ static_libs: [
+ "libbase",
+ "libbinder_random_parcel",
+ "libcutils",
+ "liblog",
+ "libutils",
+ ],
+ target: {
+ android: {
+ shared_libs: [
+ "libbinder_ndk",
+ "libbinder",
+ ],
+ },
+ host: {
+ static_libs: [
+ "libbinder_ndk",
+ "libbinder",
+ ],
+ },
+ darwin: {
+ enabled: false,
+ },
+ },
+ srcs: ["fuzzer.cpp"],
+ fuzz_config: {
+ cc: [
+ "hamzeh@google.com",
+ ],
+ },
+}
\ No newline at end of file
diff --git a/drm/mediadrm/plugins/clearkey/aidl/fuzzer.cpp b/drm/mediadrm/plugins/clearkey/aidl/fuzzer.cpp
new file mode 100644
index 0000000..9ef331f
--- /dev/null
+++ b/drm/mediadrm/plugins/clearkey/aidl/fuzzer.cpp
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#include <fuzzbinder/libbinder_ndk_driver.h>
+#include <fuzzer/FuzzedDataProvider.h>
+
+#include "CreatePluginFactories.h"
+
+#include <android/binder_manager.h>
+#include <android/binder_process.h>
+
+using ::aidl::android::hardware::drm::clearkey::createDrmFactory;
+using ::aidl::android::hardware::drm::clearkey::DrmFactory;
+
+using android::fuzzService;
+using ndk::SharedRefBase;
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
+ std::shared_ptr<DrmFactory> drmFactory = createDrmFactory();
+ fuzzService(drmFactory->asBinder().get(), FuzzedDataProvider(data, size));
+
+ return 0;
+}
diff --git a/drm/mediadrm/plugins/clearkey/common/Android.bp b/drm/mediadrm/plugins/clearkey/common/Android.bp
index a6a5b28..6913df4 100644
--- a/drm/mediadrm/plugins/clearkey/common/Android.bp
+++ b/drm/mediadrm/plugins/clearkey/common/Android.bp
@@ -97,3 +97,54 @@
integer_overflow: true,
},
}
+
+cc_library_static {
+ name: "libclearkeydevicefiles-protos.common_fuzz",
+
+ proto: {
+ export_proto_headers: true,
+ type: "lite",
+ },
+ srcs: ["protos/DeviceFiles.proto"],
+}
+
+cc_library_static {
+ name: "libclearkeybase_fuzz",
+
+ srcs: [
+ "AesCtrDecryptor.cpp",
+ "Base64.cpp",
+ "Buffer.cpp",
+ "ClearKeyUUID.cpp",
+ "DeviceFiles.cpp",
+ "InitDataParser.cpp",
+ "JsonWebKey.cpp",
+ "MemoryFileSystem.cpp",
+ "Session.cpp",
+ "SessionLibrary.cpp",
+ "Utils.cpp",
+ ],
+
+ cflags: ["-Wall", "-Werror"],
+
+ include_dirs: ["frameworks/av/include"],
+
+ shared_libs: [
+ "libutils",
+ "libcrypto",
+ ],
+
+ whole_static_libs: [
+ "libjsmn",
+ "libclearkeydevicefiles-protos.common_fuzz",
+ ],
+
+ export_include_dirs: [
+ "include",
+ "include/clearkeydrm",
+ ],
+
+ sanitize: {
+ integer_overflow: true,
+ },
+}
diff --git a/include/media/MmapStreamCallback.h b/include/media/MmapStreamCallback.h
index 31b8eb5..76ee6d7 100644
--- a/include/media/MmapStreamCallback.h
+++ b/include/media/MmapStreamCallback.h
@@ -37,12 +37,9 @@
/**
* The volume to be applied to the use case specified when opening the stream has changed
- * \param[in] channels a channel mask containing all channels the volume should be applied to.
- * \param[in] values the volume values to be applied to each channel. The size of the vector
- * should correspond to the channel count retrieved with
- * audio_channel_count_from_in_mask() or audio_channel_count_from_out_mask()
+ * \param[in] volume the new target volume
*/
- virtual void onVolumeChanged(audio_channel_mask_t channels, Vector<float> values) = 0;
+ virtual void onVolumeChanged(float volume) = 0;
/**
* The device the stream is routed to/from has changed
diff --git a/include/private/media/VideoFrame.h b/include/private/media/VideoFrame.h
index d4025e5..97e0b1d 100644
--- a/include/private/media/VideoFrame.h
+++ b/include/private/media/VideoFrame.h
@@ -85,8 +85,6 @@
uint32_t mSize; // Number of bytes of frame data
uint32_t mIccSize; // Number of bytes of ICC data
uint32_t mBitDepth; // number of bits per R / G / B channel
-
- // Adding new items must be 64-bit aligned.
};
}; // namespace android
diff --git a/media/TEST_MAPPING b/media/TEST_MAPPING
index 41fe080..3b7a314 100644
--- a/media/TEST_MAPPING
+++ b/media/TEST_MAPPING
@@ -45,6 +45,32 @@
"platinum-postsubmit": [
// runs regularly, independent of changes in this tree.
// signals if changes elsewhere break media functionality
+ // @FlakyTest: in staged-postsubmit, but not postsubmit
+ {
+ "name": "CtsMediaCodecTestCases",
+ "options": [
+ {
+ "include-filter": "android.media.codec.cts.EncodeDecodeTest"
+ },
+ {
+ "exclude-annotation": "androidx.test.filters.FlakyTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsMediaCodecTestCases",
+ "options": [
+ {
+ "include-filter": "android.media.codec.cts.DecodeEditEncodeTest"
+ },
+ {
+ "exclude-annotation": "androidx.test.filters.FlakyTest"
+ }
+ ]
+ }
+ ],
+ "staged-platinum-postsubmit": [
+ // runs every four hours
{
"name": "CtsMediaCodecTestCases",
"options": [
diff --git a/media/audioserver/audioserver.rc b/media/audioserver/audioserver.rc
index c4a6601..0bd0d88 100644
--- a/media/audioserver/audioserver.rc
+++ b/media/audioserver/audioserver.rc
@@ -7,11 +7,9 @@
ioprio rt 4
task_profiles ProcessCapacityHigh HighPerformance
onrestart restart vendor.audio-hal
+ onrestart restart vendor.audio-hal-aidl
onrestart restart vendor.audio-hal-4-0-msd
onrestart restart audio_proxy_service
- # Keep the original service names for backward compatibility
- onrestart restart vendor.audio-hal-2-0
- onrestart restart audio-hal-2-0
on property:vts.native_server.on=1
stop audioserver
@@ -20,42 +18,34 @@
on property:init.svc.audioserver=stopped
stop vendor.audio-hal
+ stop vendor.audio-hal-aidl
stop vendor.audio-hal-4-0-msd
stop audio_proxy_service
- # Keep the original service names for backward compatibility
- stop vendor.audio-hal-2-0
- stop audio-hal-2-0
# See b/155364397. Need to have HAL service running for VTS.
# Can't use 'restart' because then HAL service would restart
# audioserver bringing it back into running state.
start vendor.audio-hal
+ start vendor.audio-hal-aidl
start vendor.audio-hal-4-0-msd
start audio_proxy_service
- # Keep the original service names for backward compatibility
- start vendor.audio-hal-2-0
- start audio-hal-2-0
on property:init.svc.audioserver=running
start vendor.audio-hal
+ start vendor.audio-hal-aidl
start vendor.audio-hal-4-0-msd
start audio_proxy_service
- # Keep the original service names for backward compatibility
- start vendor.audio-hal-2-0
- start audio-hal-2-0
on property:sys.audio.restart.hal=1
# See b/159966243. Avoid restart loop between audioserver and HAL.
# Keep the original service names for backward compatibility
stop vendor.audio-hal
+ stop vendor.audio-hal-aidl
stop vendor.audio-hal-4-0-msd
stop audio_proxy_service
- stop vendor.audio-hal-2-0
- stop audio-hal-2-0
start vendor.audio-hal
+ start vendor.audio-hal-aidl
start vendor.audio-hal-4-0-msd
start audio_proxy_service
- start vendor.audio-hal-2-0
- start audio-hal-2-0
# reset the property
setprop sys.audio.restart.hal 0
diff --git a/media/codec2/components/aom/C2SoftAomDec.cpp b/media/codec2/components/aom/C2SoftAomDec.cpp
index 3b0e949..d65488e 100644
--- a/media/codec2/components/aom/C2SoftAomDec.cpp
+++ b/media/codec2/components/aom/C2SoftAomDec.cpp
@@ -585,10 +585,9 @@
const uint16_t *srcV = (const uint16_t *)img->planes[AOM_PLANE_V];
if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
- convertYUV420Planar16ToY410((uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
- srcUStride / 2, srcVStride / 2,
- dstYStride / sizeof(uint32_t),
- mWidth, mHeight);
+ convertYUV420Planar16ToY410OrRGBA1010102(
+ (uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2, srcUStride / 2,
+ srcVStride / 2, dstYStride / sizeof(uint32_t), mWidth, mHeight);
} else {
convertYUV420Planar16ToYV12(dstY, dstU, dstV, srcY, srcU, srcV, srcYStride / 2,
srcUStride / 2, srcVStride / 2, dstYStride, dstUVStride,
diff --git a/media/codec2/components/base/SimpleC2Component.cpp b/media/codec2/components/base/SimpleC2Component.cpp
index b30eed5..678c269 100644
--- a/media/codec2/components/base/SimpleC2Component.cpp
+++ b/media/codec2/components/base/SimpleC2Component.cpp
@@ -137,7 +137,118 @@
dst += dstStride * 2;
}
}
+#define CLIP3(min, v, max) (((v) < (min)) ? (min) : (((max) > (v)) ? (v) : (max)))
+void convertYUV420Planar16ToRGBA1010102(uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
+ const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width,
+ size_t height) {
+ // Converting two lines at a time, slightly faster
+ for (size_t y = 0; y < height; y += 2) {
+ uint32_t *dstTop = (uint32_t *)dst;
+ uint32_t *dstBot = (uint32_t *)(dst + dstStride);
+ uint16_t *ySrcTop = (uint16_t *)srcY;
+ uint16_t *ySrcBot = (uint16_t *)(srcY + srcYStride);
+ uint16_t *uSrc = (uint16_t *)srcU;
+ uint16_t *vSrc = (uint16_t *)srcV;
+ // BT.2020 Limited Range conversion
+
+ // B = 1.168 *(Y - 64) + 2.148 *(U - 512)
+ // G = 1.168 *(Y - 64) - 0.652 *(V - 512) - 0.188 *(U - 512)
+ // R = 1.168 *(Y - 64) + 1.683 *(V - 512)
+
+ // B = 1196/1024 *(Y - 64) + 2200/1024 *(U - 512)
+ // G = .................... - 668/1024 *(V - 512) - 192/1024 *(U - 512)
+ // R = .................... + 1723/1024 *(V - 512)
+
+ // min_B = (1196 *(- 64) + 2200 *(- 512)) / 1024 = -1175
+ // min_G = (1196 *(- 64) - 668 *(1023 - 512) - 192 *(1023 - 512)) / 1024 = -504
+ // min_R = (1196 *(- 64) + 1723 *(- 512)) / 1024 = -937
+
+ // max_B = (1196 *(1023 - 64) + 2200 *(1023 - 512)) / 1024 = 2218
+ // max_G = (1196 *(1023 - 64) - 668 *(- 512) - 192 *(- 512)) / 1024 = 1551
+ // max_R = (1196 *(1023 - 64) + 1723 *(1023 - 512)) / 1024 = 1980
+
+ int32_t mY = 1196, mU_B = 2200, mV_G = -668, mV_R = 1723, mU_G = -192;
+ for (size_t x = 0; x < width; x += 2) {
+ int32_t u, v, y00, y01, y10, y11;
+ u = *uSrc - 512;
+ uSrc += 1;
+ v = *vSrc - 512;
+ vSrc += 1;
+
+ y00 = *ySrcTop - 64;
+ ySrcTop += 1;
+ y01 = *ySrcTop - 64;
+ ySrcTop += 1;
+ y10 = *ySrcBot - 64;
+ ySrcBot += 1;
+ y11 = *ySrcBot - 64;
+ ySrcBot += 1;
+
+ int32_t u_b = u * mU_B;
+ int32_t u_g = u * mU_G;
+ int32_t v_g = v * mV_G;
+ int32_t v_r = v * mV_R;
+
+ int32_t yMult, b, g, r;
+ yMult = y00 * mY;
+ b = (yMult + u_b) / 1024;
+ g = (yMult + v_g + u_g) / 1024;
+ r = (yMult + v_r) / 1024;
+ b = CLIP3(0, b, 1023);
+ g = CLIP3(0, g, 1023);
+ r = CLIP3(0, r, 1023);
+ *dstTop++ = 3 << 30 | (b << 20) | (g << 10) | r;
+
+ yMult = y01 * mY;
+ b = (yMult + u_b) / 1024;
+ g = (yMult + v_g + u_g) / 1024;
+ r = (yMult + v_r) / 1024;
+ b = CLIP3(0, b, 1023);
+ g = CLIP3(0, g, 1023);
+ r = CLIP3(0, r, 1023);
+ *dstTop++ = 3 << 30 | (b << 20) | (g << 10) | r;
+
+ yMult = y10 * mY;
+ b = (yMult + u_b) / 1024;
+ g = (yMult + v_g + u_g) / 1024;
+ r = (yMult + v_r) / 1024;
+ b = CLIP3(0, b, 1023);
+ g = CLIP3(0, g, 1023);
+ r = CLIP3(0, r, 1023);
+ *dstBot++ = 3 << 30 | (b << 20) | (g << 10) | r;
+
+ yMult = y11 * mY;
+ b = (yMult + u_b) / 1024;
+ g = (yMult + v_g + u_g) / 1024;
+ r = (yMult + v_r) / 1024;
+ b = CLIP3(0, b, 1023);
+ g = CLIP3(0, g, 1023);
+ r = CLIP3(0, r, 1023);
+ *dstBot++ = 3 << 30 | (b << 20) | (g << 10) | r;
+ }
+
+ srcY += srcYStride * 2;
+ srcU += srcUStride;
+ srcV += srcVStride;
+ dst += dstStride * 2;
+ }
+}
+
+void convertYUV420Planar16ToY410OrRGBA1010102(uint32_t *dst, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV,
+ size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width,
+ size_t height) {
+ if (isAtLeastT()) {
+ convertYUV420Planar16ToRGBA1010102(dst, srcY, srcU, srcV, srcYStride, srcUStride,
+ srcVStride, dstStride, width, height);
+ } else {
+ convertYUV420Planar16ToY410(dst, srcY, srcU, srcV, srcYStride, srcUStride, srcVStride,
+ dstStride, width, height);
+ }
+}
void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
@@ -799,10 +910,15 @@
// Add YV12 in the end as a fall-back option
mBitDepth10HalPixelFormats.push_back(HAL_PIXEL_FORMAT_YV12);
}
- // When RGBA1010102 is not allowed and if the first supported hal pixel is format is
- // HAL_PIXEL_FORMAT_RGBA_1010102, then return HAL_PIXEL_FORMAT_YV12
- if (!allowRGBA1010102 && mBitDepth10HalPixelFormats[0] == HAL_PIXEL_FORMAT_RGBA_1010102) {
- return HAL_PIXEL_FORMAT_YV12;
+ // From Android T onwards, HAL_PIXEL_FORMAT_RGBA_1010102 corresponds to true
+ // RGBA 1010102 format unlike earlier versions where it was used to represent
+ // YUVA 1010102 data
+ if (!isAtLeastT()) {
+ // When RGBA1010102 is not allowed and if the first supported hal pixel is format is
+ // HAL_PIXEL_FORMAT_RGBA_1010102, then return HAL_PIXEL_FORMAT_YV12
+ if (!allowRGBA1010102 && mBitDepth10HalPixelFormats[0] == HAL_PIXEL_FORMAT_RGBA_1010102) {
+ return HAL_PIXEL_FORMAT_YV12;
+ }
}
// Return the first entry from supported formats
return mBitDepth10HalPixelFormats[0];
diff --git a/media/codec2/components/base/include/SimpleC2Component.h b/media/codec2/components/base/include/SimpleC2Component.h
index 52ae3b8..3172f29 100644
--- a/media/codec2/components/base/include/SimpleC2Component.h
+++ b/media/codec2/components/base/include/SimpleC2Component.h
@@ -33,9 +33,11 @@
size_t srcUStride, size_t srcVStride, size_t dstYStride,
size_t dstUVStride, uint32_t width, uint32_t height,
bool isMonochrome = false);
-void convertYUV420Planar16ToY410(uint32_t *dst, const uint16_t *srcY, const uint16_t *srcU,
- const uint16_t *srcV, size_t srcYStride, size_t srcUStride,
- size_t srcVStride, size_t dstStride, size_t width, size_t height);
+void convertYUV420Planar16ToY410OrRGBA1010102(uint32_t *dst, const uint16_t *srcY,
+ const uint16_t *srcU, const uint16_t *srcV,
+ size_t srcYStride, size_t srcUStride,
+ size_t srcVStride, size_t dstStride, size_t width,
+ size_t height);
void convertYUV420Planar16ToYV12(uint8_t *dstY, uint8_t *dstU, uint8_t *dstV, const uint16_t *srcY,
const uint16_t *srcU, const uint16_t *srcV, size_t srcYStride,
size_t srcUStride, size_t srcVStride, size_t dstYStride,
diff --git a/media/codec2/components/gav1/C2SoftGav1Dec.cpp b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
index f5c8138..a22c750 100644
--- a/media/codec2/components/gav1/C2SoftGav1Dec.cpp
+++ b/media/codec2/components/gav1/C2SoftGav1Dec.cpp
@@ -714,9 +714,9 @@
const uint16_t *srcV = (const uint16_t *)buffer->plane[2];
if (format == HAL_PIXEL_FORMAT_RGBA_1010102) {
- convertYUV420Planar16ToY410((uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
- srcUStride / 2, srcVStride / 2, dstYStride / sizeof(uint32_t),
- mWidth, mHeight);
+ convertYUV420Planar16ToY410OrRGBA1010102((uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
+ srcUStride / 2, srcVStride / 2,
+ dstYStride / sizeof(uint32_t), mWidth, mHeight);
} else if (format == HAL_PIXEL_FORMAT_YCBCR_P010) {
convertYUV420Planar16ToP010((uint16_t *)dstY, (uint16_t *)dstU, srcY, srcU, srcV,
srcYStride / 2, srcUStride / 2, srcVStride / 2, dstYStride / 2,
diff --git a/media/codec2/components/vpx/C2SoftVpxDec.cpp b/media/codec2/components/vpx/C2SoftVpxDec.cpp
index e81f044..5d7ef5c 100644
--- a/media/codec2/components/vpx/C2SoftVpxDec.cpp
+++ b/media/codec2/components/vpx/C2SoftVpxDec.cpp
@@ -68,8 +68,8 @@
DefineParam(mSize, C2_PARAMKEY_PICTURE_SIZE)
.withDefault(new C2StreamPictureSizeInfo::output(0u, 320, 240))
.withFields({
- C2F(mSize, width).inRange(2, 2048, 2),
- C2F(mSize, height).inRange(2, 2048, 2),
+ C2F(mSize, width).inRange(2, 2048),
+ C2F(mSize, height).inRange(2, 2048),
})
.withSetter(SizeSetter)
.build());
@@ -717,7 +717,12 @@
}
C2MemoryUsage usage = { C2MemoryUsage::CPU_READ, C2MemoryUsage::CPU_WRITE };
- c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16), mHeight, format, usage, &block);
+ // We always create a graphic block that is width aligned to 16 and height
+ // aligned to 2. We set the correct "crop" value of the image in the call to
+ // createGraphicBuffer() by setting the correct image dimensions.
+ c2_status_t err = pool->fetchGraphicBlock(align(mWidth, 16),
+ align(mHeight, 2), format, usage,
+ &block);
if (err != C2_OK) {
ALOGE("fetchGraphicBlock for Output failed with status %d", err);
work->result = err;
@@ -760,7 +765,7 @@
[dstY, srcY, srcU, srcV,
srcYStride, srcUStride, srcVStride, dstYStride,
width = mWidth, height = std::min(mHeight - i, kHeight)] {
- convertYUV420Planar16ToY410(
+ convertYUV420Planar16ToY410OrRGBA1010102(
(uint32_t *)dstY, srcY, srcU, srcV, srcYStride / 2,
srcUStride / 2, srcVStride / 2, dstYStride / sizeof(uint32_t),
width, height);
diff --git a/media/codec2/hidl/plugin/samples/SampleFilterPlugin.cpp b/media/codec2/hidl/plugin/samples/SampleFilterPlugin.cpp
index b942be7..da62111 100644
--- a/media/codec2/hidl/plugin/samples/SampleFilterPlugin.cpp
+++ b/media/codec2/hidl/plugin/samples/SampleFilterPlugin.cpp
@@ -807,7 +807,6 @@
// affectedParams
{
C2StreamHdrStaticInfo::output::PARAM_TYPE,
- C2StreamHdr10PlusInfo::output::PARAM_TYPE,
C2StreamColorAspectsInfo::output::PARAM_TYPE,
},
};
diff --git a/media/codec2/sfplugin/CCodecConfig.cpp b/media/codec2/sfplugin/CCodecConfig.cpp
index 2643290..69efc5f 100644
--- a/media/codec2/sfplugin/CCodecConfig.cpp
+++ b/media/codec2/sfplugin/CCodecConfig.cpp
@@ -386,7 +386,7 @@
// read back always as int
float value;
if (v.get(&value)) {
- return (int32_t)value;
+ return (int32_t) (value + 0.5);
}
return C2Value();
}));
@@ -932,7 +932,7 @@
.limitTo(D::ENCODER & (D::CONFIG | D::PARAM)));
add(ConfigMapper(KEY_FLAC_COMPRESSION_LEVEL, C2_PARAMKEY_COMPLEXITY, "value")
.limitTo(D::AUDIO & D::ENCODER));
- add(ConfigMapper("complexity", C2_PARAMKEY_COMPLEXITY, "value")
+ add(ConfigMapper(KEY_COMPLEXITY, C2_PARAMKEY_COMPLEXITY, "value")
.limitTo(D::ENCODER & (D::CONFIG | D::PARAM)));
add(ConfigMapper(KEY_GRID_COLUMNS, C2_PARAMKEY_TILE_LAYOUT, "columns")
diff --git a/media/codec2/tests/Android.bp b/media/codec2/tests/Android.bp
index 9c3ba4d..2217235 100644
--- a/media/codec2/tests/Android.bp
+++ b/media/codec2/tests/Android.bp
@@ -36,6 +36,8 @@
cc_test {
name: "codec2_vndk_test",
test_suites: ["device-tests"],
+ // This test doesn't seem to support isolated with current assumption
+ isolated: false,
srcs: [
"C2_test.cpp",
diff --git a/media/extractors/mp4/MPEG4Extractor.cpp b/media/extractors/mp4/MPEG4Extractor.cpp
index eccbf46..5a03992 100644
--- a/media/extractors/mp4/MPEG4Extractor.cpp
+++ b/media/extractors/mp4/MPEG4Extractor.cpp
@@ -1969,26 +1969,8 @@
}
if (chunk_type == FOURCC("fLaC")) {
-
- // From https://github.com/xiph/flac/blob/master/doc/isoflac.txt
- // 4 for mime, 4 for blockType and BlockLen, 34 for metadata
- uint8_t flacInfo[4 + 4 + 34];
- // skipping dFla, version
- data_offset += sizeof(buffer) + 12;
- size_t flacOffset = 4;
- // Add flaC header mime type to CSD
- strncpy((char *)flacInfo, "fLaC", 4);
- if (mDataSource->readAt(
- data_offset, flacInfo + flacOffset, sizeof(flacInfo) - flacOffset) <
- (ssize_t)sizeof(flacInfo) - flacOffset) {
- return ERROR_IO;
- }
- data_offset += sizeof(flacInfo) - flacOffset;
-
- AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0, flacInfo,
- sizeof(flacInfo));
+ data_offset += sizeof(buffer);
*offset = data_offset;
- CHECK_EQ(*offset, stop_offset);
}
while (*offset < stop_offset) {
@@ -2521,6 +2503,35 @@
break;
}
+ case FOURCC("dfLa"):
+ {
+ *offset += chunk_size;
+
+ // From https://github.com/xiph/flac/blob/master/doc/isoflac.txt
+ // 4 for mediaType, 4 for blockType and BlockLen, 34 for metadata
+ uint8_t flacInfo[4 + 4 + 34];
+
+ if (chunk_data_size != sizeof(flacInfo)) {
+ return ERROR_MALFORMED;
+ }
+
+ data_offset += 4;
+ size_t flacOffset = 4;
+ // Add flaC header mediaType to CSD
+ strncpy((char *)flacInfo, "fLaC", 4);
+
+ ssize_t bytesToRead = sizeof(flacInfo) - flacOffset;
+ if (mDataSource->readAt(
+ data_offset, flacInfo + flacOffset, bytesToRead) < bytesToRead) {
+ return ERROR_IO;
+ }
+
+ data_offset += bytesToRead;
+ AMediaFormat_setBuffer(mLastTrack->meta, AMEDIAFORMAT_KEY_CSD_0, flacInfo,
+ sizeof(flacInfo));
+ break;
+ }
+
case FOURCC("avcC"):
{
*offset += chunk_size;
@@ -4775,7 +4786,7 @@
if (len2 == 0) {
return ERROR_MALFORMED;
}
- if (offset >= csd_size || csd[offset] != 0x01) {
+ if (offset + len1 > csd_size || csd[offset] != 0x01) {
return ERROR_MALFORMED;
}
diff --git a/media/libaaudio/include/aaudio/AAudio.h b/media/libaaudio/include/aaudio/AAudio.h
index 2ff9f5a..03930ac 100644
--- a/media/libaaudio/include/aaudio/AAudio.h
+++ b/media/libaaudio/include/aaudio/AAudio.h
@@ -40,7 +40,7 @@
/**
* This is used to represent a value that has not been specified.
* For example, an application could use {@link #AAUDIO_UNSPECIFIED} to indicate
- * that is did not not care what the specific value of a parameter was
+ * that it did not care what the specific value of a parameter was
* and would accept whatever it was given.
*/
#define AAUDIO_UNSPECIFIED 0
@@ -1441,7 +1441,10 @@
/**
* Asynchronous request for the stream to flush.
* Flushing will discard any pending data.
- * This call only works if the stream is pausing or paused. TODO review
+ * This call only works if the stream is OPEN, PAUSED, STOPPED, or FLUSHED.
+ * Calling this function when in other states,
+ * or calling from an AAudio callback function,
+ * will have no effect and an error will be returned.
* Frame counters are not reset by a flush. They may be advanced.
* After this call the state will be in {@link #AAUDIO_STREAM_STATE_FLUSHING} or
* {@link #AAUDIO_STREAM_STATE_FLUSHED}.
diff --git a/media/libaaudio/src/client/AudioStreamInternal.cpp b/media/libaaudio/src/client/AudioStreamInternal.cpp
index 9f0564f..34ecd25 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.cpp
+++ b/media/libaaudio/src/client/AudioStreamInternal.cpp
@@ -605,13 +605,6 @@
return AAUDIO_ERROR_INVALID_STATE;
}
-aaudio_result_t AudioStreamInternal::updateStateMachine() {
- if (isDataCallbackActive()) {
- return AAUDIO_OK; // state is getting updated by the callback thread read/write call
- }
- return processCommands();
-}
-
void AudioStreamInternal::logTimestamp(AAudioServiceMessage &command) {
static int64_t oldPosition = 0;
static int64_t oldTime = 0;
diff --git a/media/libaaudio/src/client/AudioStreamInternal.h b/media/libaaudio/src/client/AudioStreamInternal.h
index 2367572..4ea61d2 100644
--- a/media/libaaudio/src/client/AudioStreamInternal.h
+++ b/media/libaaudio/src/client/AudioStreamInternal.h
@@ -48,7 +48,7 @@
int64_t *framePosition,
int64_t *timeNanoseconds) override;
- virtual aaudio_result_t updateStateMachine() override;
+ virtual aaudio_result_t processCommands() override;
aaudio_result_t open(const AudioStreamBuilder &builder) override;
@@ -110,8 +110,6 @@
aaudio_result_t drainTimestampsFromService();
- aaudio_result_t processCommands();
-
aaudio_result_t stopCallback_l();
virtual void prepareBuffersForStart() {}
diff --git a/media/libaaudio/src/core/AudioStream.cpp b/media/libaaudio/src/core/AudioStream.cpp
index 06f05b0..8a5186a 100644
--- a/media/libaaudio/src/core/AudioStream.cpp
+++ b/media/libaaudio/src/core/AudioStream.cpp
@@ -21,7 +21,9 @@
#include <atomic>
#include <stdint.h>
+#include <linux/futex.h>
#include <media/MediaMetricsItem.h>
+#include <sys/syscall.h>
#include <aaudio/AAudio.h>
@@ -362,34 +364,37 @@
}
void AudioStream::setState(aaudio_stream_state_t state) {
- ALOGD("%s(s#%d) from %d to %d", __func__, getId(), mState, state);
- if (state == mState) {
+ aaudio_stream_state_t oldState = mState.load();
+ ALOGD("%s(s#%d) from %d to %d", __func__, getId(), oldState, state);
+ if (state == oldState) {
return; // no change
}
// Track transition to DISCONNECTED state.
if (state == AAUDIO_STREAM_STATE_DISCONNECTED) {
android::mediametrics::LogItem(mMetricsId)
.set(AMEDIAMETRICS_PROP_EVENT, AMEDIAMETRICS_PROP_EVENT_VALUE_DISCONNECT)
- .set(AMEDIAMETRICS_PROP_STATE, AudioGlobal_convertStreamStateToText(getState()))
+ .set(AMEDIAMETRICS_PROP_STATE, AudioGlobal_convertStreamStateToText(oldState))
.record();
}
// CLOSED is a final state
- if (mState == AAUDIO_STREAM_STATE_CLOSED) {
+ if (oldState == AAUDIO_STREAM_STATE_CLOSED) {
ALOGW("%s(%d) tried to set to %d but already CLOSED", __func__, getId(), state);
// Once CLOSING, we can only move to CLOSED state.
- } else if (mState == AAUDIO_STREAM_STATE_CLOSING
+ } else if (oldState == AAUDIO_STREAM_STATE_CLOSING
&& state != AAUDIO_STREAM_STATE_CLOSED) {
ALOGW("%s(%d) tried to set to %d but already CLOSING", __func__, getId(), state);
// Once DISCONNECTED, we can only move to CLOSING or CLOSED state.
- } else if (mState == AAUDIO_STREAM_STATE_DISCONNECTED
+ } else if (oldState == AAUDIO_STREAM_STATE_DISCONNECTED
&& !(state == AAUDIO_STREAM_STATE_CLOSING
|| state == AAUDIO_STREAM_STATE_CLOSED)) {
ALOGW("%s(%d) tried to set to %d but already DISCONNECTED", __func__, getId(), state);
} else {
- mState = state;
+ mState.store(state);
+ // Wake up a wakeForStateChange thread if it exists.
+ syscall(SYS_futex, &mState, FUTEX_WAKE_PRIVATE, INT_MAX, NULL, NULL, 0);
}
}
@@ -408,9 +413,15 @@
if (durationNanos > timeoutNanoseconds) {
durationNanos = timeoutNanoseconds;
}
- AudioClock::sleepForNanos(durationNanos);
- timeoutNanoseconds -= durationNanos;
+ struct timespec time;
+ time.tv_sec = durationNanos / AAUDIO_NANOS_PER_SECOND;
+ // Add the fractional nanoseconds.
+ time.tv_nsec = durationNanos - (time.tv_sec * AAUDIO_NANOS_PER_SECOND);
+ // Sleep for durationNanos. If mState changes from the callback
+ // thread, this thread will wake up earlier.
+ syscall(SYS_futex, &mState, FUTEX_WAIT_PRIVATE, currentState, &time, NULL, 0);
+ timeoutNanoseconds -= durationNanos;
aaudio_result_t result = updateStateMachine();
if (result != AAUDIO_OK) {
return result;
diff --git a/media/libaaudio/src/core/AudioStream.h b/media/libaaudio/src/core/AudioStream.h
index 5fb4528..8dd5538 100644
--- a/media/libaaudio/src/core/AudioStream.h
+++ b/media/libaaudio/src/core/AudioStream.h
@@ -100,10 +100,17 @@
int64_t *timeNanoseconds) = 0;
/**
- * Update state machine.()
- * @return
+ * Update state machine.
+ * @return result of the operation.
*/
- virtual aaudio_result_t updateStateMachine() = 0;
+ aaudio_result_t updateStateMachine() {
+ if (isDataCallbackActive()) {
+ return AAUDIO_OK; // state is getting updated by the callback thread read/write call
+ }
+ return processCommands();
+ };
+
+ virtual aaudio_result_t processCommands() = 0;
// =========== End ABSTRACT methods ===========================
@@ -184,7 +191,7 @@
// ============== Queries ===========================
aaudio_stream_state_t getState() const {
- return mState;
+ return mState.load();
}
virtual int32_t getBufferSize() const {
@@ -674,6 +681,8 @@
const android::sp<MyPlayerBase> mPlayerBase;
+ std::atomic<aaudio_stream_state_t> mState{AAUDIO_STREAM_STATE_UNINITIALIZED};
+
// These do not change after open().
int32_t mSamplesPerFrame = AAUDIO_UNSPECIFIED;
aaudio_channel_mask_t mChannelMask = AAUDIO_UNSPECIFIED;
@@ -682,7 +691,6 @@
aaudio_sharing_mode_t mSharingMode = AAUDIO_SHARING_MODE_SHARED;
bool mSharingModeMatchRequired = false; // must match sharing mode requested
audio_format_t mFormat = AUDIO_FORMAT_DEFAULT;
- aaudio_stream_state_t mState = AAUDIO_STREAM_STATE_UNINITIALIZED;
aaudio_performance_mode_t mPerformanceMode = AAUDIO_PERFORMANCE_MODE_NONE;
int32_t mFramesPerBurst = 0;
int32_t mBufferCapacity = 0;
diff --git a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
index dd11169..f32ef65 100644
--- a/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamLegacy.cpp
@@ -127,7 +127,7 @@
mCallbackEnabled.store(false);
}
- if (updateStateMachine() != AAUDIO_OK) {
+ if (processCommands() != AAUDIO_OK) {
forceDisconnect();
mCallbackEnabled.store(false);
}
@@ -192,7 +192,7 @@
mCallbackEnabled.store(false);
}
- if (updateStateMachine() != AAUDIO_OK) {
+ if (processCommands() != AAUDIO_OK) {
forceDisconnect();
mCallbackEnabled.store(false);
}
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.cpp b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
index 1e39e0f..9a136a7 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.cpp
@@ -364,8 +364,7 @@
return checkForDisconnectRequest(false);
}
-aaudio_result_t AudioStreamRecord::updateStateMachine()
-{
+aaudio_result_t AudioStreamRecord::processCommands() {
aaudio_result_t result = AAUDIO_OK;
aaudio_wrapping_frames_t position;
status_t err;
diff --git a/media/libaaudio/src/legacy/AudioStreamRecord.h b/media/libaaudio/src/legacy/AudioStreamRecord.h
index 5ce73f9..252ff3c 100644
--- a/media/libaaudio/src/legacy/AudioStreamRecord.h
+++ b/media/libaaudio/src/legacy/AudioStreamRecord.h
@@ -58,7 +58,7 @@
int64_t getFramesWritten() override;
- aaudio_result_t updateStateMachine() override;
+ aaudio_result_t processCommands() override;
aaudio_direction_t getDirection() const override {
return AAUDIO_DIRECTION_INPUT;
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.cpp b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
index 6f1dc92..09caa5c 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.cpp
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.cpp
@@ -378,8 +378,7 @@
return checkForDisconnectRequest(false);;
}
-aaudio_result_t AudioStreamTrack::updateStateMachine()
-{
+aaudio_result_t AudioStreamTrack::processCommands() {
status_t err;
aaudio_wrapping_frames_t position;
switch (getState()) {
diff --git a/media/libaaudio/src/legacy/AudioStreamTrack.h b/media/libaaudio/src/legacy/AudioStreamTrack.h
index 0f4d72b..1f877b5 100644
--- a/media/libaaudio/src/legacy/AudioStreamTrack.h
+++ b/media/libaaudio/src/legacy/AudioStreamTrack.h
@@ -79,7 +79,7 @@
return AAUDIO_DIRECTION_OUTPUT;
}
- aaudio_result_t updateStateMachine() override;
+ aaudio_result_t processCommands() override;
int64_t incrementClientFrameCounter(int32_t frames) override {
return incrementFramesWritten(frames);
diff --git a/media/libaudioclient/AidlConversion.cpp b/media/libaudioclient/AidlConversion.cpp
index 11724e0..b0c9a0c 100644
--- a/media/libaudioclient/AidlConversion.cpp
+++ b/media/libaudioclient/AidlConversion.cpp
@@ -1932,7 +1932,7 @@
case media::AudioPortType::SESSION:
legacy.session = VALUE_OR_RETURN(
aidl2legacy_int32_t_audio_port_config_session_ext(
- VALUE_OR_RETURN(UNION_GET(aidl, session))));
+ VALUE_OR_RETURN(UNION_GET(aidlSys, session))));
return legacy;
}
@@ -1966,9 +1966,9 @@
return OK;
}
case AUDIO_PORT_TYPE_SESSION:
- UNION_SET(*aidl, session, VALUE_OR_RETURN_STATUS(
+ UNION_SET(*aidl, unspecified, false);
+ UNION_SET(*aidlSys, session, VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_port_config_session_ext_int32_t(legacy.session)));
- UNION_SET(*aidlSys, unspecified, false);
return OK;
}
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
@@ -2816,7 +2816,7 @@
case media::AudioPortType::SESSION:
legacy.session = VALUE_OR_RETURN(
aidl2legacy_int32_t_audio_port_session_ext(
- VALUE_OR_RETURN(UNION_GET(aidl, session))));
+ VALUE_OR_RETURN(UNION_GET(aidlSys, session))));
return legacy;
}
@@ -2852,9 +2852,9 @@
return OK;
}
case AUDIO_PORT_TYPE_SESSION:
- UNION_SET(*aidl, session, VALUE_OR_RETURN_STATUS(
+ UNION_SET(*aidl, unspecified, false);
+ UNION_SET(*aidlSys, session, VALUE_OR_RETURN_STATUS(
legacy2aidl_audio_port_session_ext_int32_t(legacy.session)));
- UNION_SET(*aidlSys, unspecified, false);
return OK;
}
LOG_ALWAYS_FATAL("Shouldn't get here"); // with -Werror,-Wswitch may compile-time fail
diff --git a/media/libaudioclient/Android.bp b/media/libaudioclient/Android.bp
index 69a9c68..4c27d52 100644
--- a/media/libaudioclient/Android.bp
+++ b/media/libaudioclient/Android.bp
@@ -302,12 +302,26 @@
double_loadable: true,
vendor_available: true,
srcs: [
+ "aidl/android/media/EffectConfig.aidl",
"aidl/android/media/IEffect.aidl",
"aidl/android/media/IEffectClient.aidl",
],
imports: [
+ "android.media.audio.common.types-V1",
"shared-file-region-aidl",
],
+ backend: {
+ cpp: {
+ min_sdk_version: "29",
+ apex_available: [
+ "//apex_available:platform",
+ "com.android.media",
+ ],
+ },
+ java: {
+ sdk_version: "module_current",
+ },
+ },
}
aidl_interface {
diff --git a/media/libaudioclient/AudioEffect.cpp b/media/libaudioclient/AudioEffect.cpp
index 7b273ec..2870c4c 100644
--- a/media/libaudioclient/AudioEffect.cpp
+++ b/media/libaudioclient/AudioEffect.cpp
@@ -89,7 +89,7 @@
return NO_INIT;
}
- if (type == NULL && uuid == NULL) {
+ if (type == nullptr && uuid == nullptr) {
ALOGW("Must specify at least type or uuid");
return BAD_VALUE;
}
@@ -99,8 +99,8 @@
mCallback = callback;
memset(&mDescriptor, 0, sizeof(effect_descriptor_t));
- mDescriptor.type = *(type != NULL ? type : EFFECT_UUID_NULL);
- mDescriptor.uuid = *(uuid != NULL ? uuid : EFFECT_UUID_NULL);
+ mDescriptor.type = *(type != nullptr ? type : EFFECT_UUID_NULL);
+ mDescriptor.uuid = *(uuid != nullptr ? uuid : EFFECT_UUID_NULL);
// TODO b/182392769: use attribution source util
mIEffectClient = new EffectClient(this);
@@ -292,7 +292,7 @@
AudioSystem::releaseAudioSessionId(mSessionId,
VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid)));
}
- if (mIEffect != NULL) {
+ if (mIEffect != nullptr) {
mIEffect->disconnect();
IInterface::asBinder(mIEffect)->unlinkToDeath(mIEffectClient);
}
@@ -370,7 +370,7 @@
if (mEnabled == (cmdCode == EFFECT_CMD_ENABLE)) {
return NO_ERROR;
}
- if (replySize == NULL || *replySize != sizeof(status_t) || replyData == NULL) {
+ if (replySize == nullptr || *replySize != sizeof(status_t) || replyData == nullptr) {
return BAD_VALUE;
}
mLock.lock();
@@ -413,7 +413,7 @@
return (mStatus == ALREADY_EXISTS) ? (status_t) INVALID_OPERATION : mStatus;
}
- if (param == NULL || param->psize == 0 || param->vsize == 0) {
+ if (param == nullptr || param->psize == 0 || param->vsize == 0) {
return BAD_VALUE;
}
@@ -448,8 +448,7 @@
if (mStatus != NO_ERROR) {
return (mStatus == ALREADY_EXISTS) ? (status_t) INVALID_OPERATION : mStatus;
}
-
- if (param == NULL || param->psize == 0 || param->vsize == 0) {
+ if (param == nullptr || param->psize == 0 || param->vsize == 0) {
return BAD_VALUE;
}
@@ -504,8 +503,7 @@
if (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS) {
return mStatus;
}
-
- if (param == NULL || param->psize == 0 || param->vsize == 0) {
+ if (param == nullptr || param->psize == 0 || param->vsize == 0) {
return BAD_VALUE;
}
@@ -529,6 +527,36 @@
return status;
}
+status_t AudioEffect::getConfigs(
+ audio_config_base_t *inputCfg, audio_config_base_t *outputCfg)
+{
+ if (mProbe) {
+ return INVALID_OPERATION;
+ }
+ if (mStatus != NO_ERROR && mStatus != ALREADY_EXISTS) {
+ return mStatus;
+ }
+ if (inputCfg == NULL || outputCfg == NULL) {
+ return BAD_VALUE;
+ }
+ status_t status;
+ media::EffectConfig cfg;
+ Status bs = mIEffect->getConfig(&cfg, &status);
+ if (!bs.isOk()) {
+ status = statusTFromBinderStatus(bs);
+ ALOGW("%s received status %d from binder transaction", __func__, status);
+ return status;
+ }
+ if (status == NO_ERROR) {
+ *inputCfg = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioConfigBase_audio_config_base_t(
+ cfg.inputCfg, cfg.isOnInputStream));
+ *outputCfg = VALUE_OR_RETURN_STATUS(aidl2legacy_AudioConfigBase_audio_config_base_t(
+ cfg.outputCfg, cfg.isOnInputStream));
+ } else {
+ ALOGW("%s received status %d from the effect", __func__, status);
+ }
+ return status;
+}
// -------------------------------------------------------------------------
@@ -540,7 +568,6 @@
if (cb != nullptr) {
cb->onError(mStatus);
}
- mIEffect.clear();
}
// -------------------------------------------------------------------------
@@ -603,6 +630,9 @@
status_t AudioEffect::queryNumberEffects(uint32_t *numEffects)
{
+ if (numEffects == nullptr) {
+ return BAD_VALUE;
+ }
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
return af->queryNumberEffects(numEffects);
@@ -610,6 +640,9 @@
status_t AudioEffect::queryEffect(uint32_t index, effect_descriptor_t *descriptor)
{
+ if (descriptor == nullptr) {
+ return BAD_VALUE;
+ }
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
return af->queryEffect(index, descriptor);
@@ -620,6 +653,9 @@
uint32_t preferredTypeFlag,
effect_descriptor_t *descriptor)
{
+ if (uuid == nullptr || type == nullptr || descriptor == nullptr) {
+ return BAD_VALUE;
+ }
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
return af->getEffectDescriptor(uuid, type, preferredTypeFlag, descriptor);
@@ -650,6 +686,9 @@
status_t AudioEffect::newEffectUniqueId(audio_unique_id_t* id)
{
+ if (id == nullptr) {
+ return BAD_VALUE;
+ }
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
*id = af->newAudioUniqueId(AUDIO_UNIQUE_ID_USE_EFFECT);
@@ -663,14 +702,15 @@
audio_source_t source,
audio_unique_id_t *id)
{
+ if ((typeStr == nullptr && uuidStr == nullptr) || id == nullptr) {
+ return BAD_VALUE;
+ }
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- if (typeStr == NULL && uuidStr == NULL) return BAD_VALUE;
-
// Convert type & uuid from string to effect_uuid_t.
effect_uuid_t type;
- if (typeStr != NULL) {
+ if (typeStr != nullptr) {
status_t res = stringToGuid(typeStr, &type);
if (res != OK) return res;
} else {
@@ -678,7 +718,7 @@
}
effect_uuid_t uuid;
- if (uuidStr != NULL) {
+ if (uuidStr != nullptr) {
status_t res = stringToGuid(uuidStr, &uuid);
if (res != OK) return res;
} else {
@@ -706,14 +746,15 @@
audio_usage_t usage,
audio_unique_id_t *id)
{
+ if ((typeStr == nullptr && uuidStr == nullptr) || id == nullptr) {
+ return BAD_VALUE;
+ }
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return PERMISSION_DENIED;
- if (typeStr == NULL && uuidStr == NULL) return BAD_VALUE;
-
// Convert type & uuid from string to effect_uuid_t.
effect_uuid_t type;
- if (typeStr != NULL) {
+ if (typeStr != nullptr) {
status_t res = stringToGuid(typeStr, &type);
if (res != OK) return res;
} else {
@@ -721,7 +762,7 @@
}
effect_uuid_t uuid;
- if (uuidStr != NULL) {
+ if (uuidStr != nullptr) {
status_t res = stringToGuid(uuidStr, &uuid);
if (res != OK) return res;
} else {
@@ -764,7 +805,7 @@
status_t AudioEffect::stringToGuid(const char *str, effect_uuid_t *guid)
{
- if (str == NULL || guid == NULL) {
+ if (str == nullptr || guid == nullptr) {
return BAD_VALUE;
}
@@ -790,7 +831,7 @@
status_t AudioEffect::guidToString(const effect_uuid_t *guid, char *str, size_t maxLen)
{
- if (guid == NULL || str == NULL) {
+ if (guid == nullptr || str == nullptr) {
return BAD_VALUE;
}
diff --git a/media/libaudioclient/AudioRecord.cpp b/media/libaudioclient/AudioRecord.cpp
index 15203d6..69d73ad 100644
--- a/media/libaudioclient/AudioRecord.cpp
+++ b/media/libaudioclient/AudioRecord.cpp
@@ -146,39 +146,6 @@
audio_channel_mask_t channelMask,
const AttributionSourceState& client,
size_t frameCount,
- legacy_callback_t callback,
- void* user,
- uint32_t notificationFrames,
- audio_session_t sessionId,
- transfer_type transferType,
- audio_input_flags_t flags,
- const audio_attributes_t* pAttributes,
- audio_port_handle_t selectedDeviceId,
- audio_microphone_direction_t selectedMicDirection,
- float microphoneFieldDimension)
- : mActive(false),
- mStatus(NO_INIT),
- mClientAttributionSource(client),
- mSessionId(AUDIO_SESSION_ALLOCATE),
- mPreviousPriority(ANDROID_PRIORITY_NORMAL),
- mPreviousSchedulingGroup(SP_DEFAULT),
- mProxy(NULL)
-{
- uid_t uid = VALUE_OR_FATAL(aidl2legacy_int32_t_uid_t(mClientAttributionSource.uid));
- pid_t pid = VALUE_OR_FATAL(aidl2legacy_int32_t_pid_t(mClientAttributionSource.pid));
- (void)set(inputSource, sampleRate, format, channelMask, frameCount, callback, user,
- notificationFrames, false /*threadCanCallJava*/, sessionId, transferType, flags,
- uid, pid, pAttributes, selectedDeviceId, selectedMicDirection,
- microphoneFieldDimension);
-}
-
-AudioRecord::AudioRecord(
- audio_source_t inputSource,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const AttributionSourceState& client,
- size_t frameCount,
const wp<IAudioRecordCallback>& callback,
uint32_t notificationFrames,
audio_session_t sessionId,
@@ -255,37 +222,6 @@
mDeviceCallback.clear();
}
}
-namespace {
-class LegacyCallbackWrapper : public AudioRecord::IAudioRecordCallback {
- const AudioRecord::legacy_callback_t mCallback;
- void* const mData;
-
- public:
- LegacyCallbackWrapper(AudioRecord::legacy_callback_t callback, void* user)
- : mCallback(callback), mData(user) {}
-
- size_t onMoreData(const AudioRecord::Buffer& buffer) override {
- AudioRecord::Buffer copy = buffer;
- mCallback(AudioRecord::EVENT_MORE_DATA, mData, ©);
- return copy.size();
- }
-
- void onOverrun() override { mCallback(AudioRecord::EVENT_OVERRUN, mData, nullptr); }
-
- void onMarker(uint32_t markerPosition) override {
- mCallback(AudioRecord::EVENT_MARKER, mData, &markerPosition);
- }
-
- void onNewPos(uint32_t newPos) override {
- mCallback(AudioRecord::EVENT_NEW_POS, mData, &newPos);
- }
-
- void onNewIAudioRecord() override {
- mCallback(AudioRecord::EVENT_NEW_IAUDIORECORD, mData, nullptr);
- }
-};
-} // namespace
-
status_t AudioRecord::set(
audio_source_t inputSource,
uint32_t sampleRate,
@@ -479,37 +415,6 @@
return status;
}
-status_t AudioRecord::set(
- audio_source_t inputSource,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount,
- legacy_callback_t callback,
- void* user,
- uint32_t notificationFrames,
- bool threadCanCallJava,
- audio_session_t sessionId,
- transfer_type transferType,
- audio_input_flags_t flags,
- uid_t uid,
- pid_t pid,
- const audio_attributes_t* pAttributes,
- audio_port_handle_t selectedDeviceId,
- audio_microphone_direction_t selectedMicDirection,
- float microphoneFieldDimension,
- int32_t maxSharedAudioHistoryMs)
-{
- if (callback != nullptr) {
- mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
- } else if (user) {
- LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
- }
- return set(inputSource, sampleRate, format, channelMask, frameCount, mLegacyCallbackWrapper,
- notificationFrames, threadCanCallJava, sessionId, transferType, flags, uid, pid,
- pAttributes, selectedDeviceId, selectedMicDirection, microphoneFieldDimension,
- maxSharedAudioHistoryMs);
-}
// -------------------------------------------------------------------------
status_t AudioRecord::start(AudioSystem::sync_event_t event, audio_session_t triggerSession)
diff --git a/media/libaudioclient/AudioTrack.cpp b/media/libaudioclient/AudioTrack.cpp
index 6ab8339..ffbb32f 100644
--- a/media/libaudioclient/AudioTrack.cpp
+++ b/media/libaudioclient/AudioTrack.cpp
@@ -325,45 +325,6 @@
}
};
}
-
-AudioTrack::AudioTrack(
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount,
- audio_output_flags_t flags,
- legacy_callback_t callback,
- void* user,
- int32_t notificationFrames,
- audio_session_t sessionId,
- transfer_type transferType,
- const audio_offload_info_t *offloadInfo,
- const AttributionSourceState& attributionSource,
- const audio_attributes_t* pAttributes,
- bool doNotReconnect,
- float maxRequiredSpeed,
- audio_port_handle_t selectedDeviceId)
- : mStatus(NO_INIT),
- mState(STATE_STOPPED),
- mPreviousPriority(ANDROID_PRIORITY_NORMAL),
- mPreviousSchedulingGroup(SP_DEFAULT),
- mPausedPosition(0),
- mAudioTrackCallback(new AudioTrackCallback())
-{
- mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
- if (callback != nullptr) {
- mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
- } else if (user) {
- LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
- }
- mSetParams = std::unique_ptr<SetParams>{new SetParams{
- streamType, sampleRate, format, channelMask, frameCount, flags, mLegacyCallbackWrapper,
- notificationFrames, 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId,
- transferType, offloadInfo, attributionSource, pAttributes, doNotReconnect,
- maxRequiredSpeed, selectedDeviceId}};
-}
-
AudioTrack::AudioTrack(
audio_stream_type_t streamType,
uint32_t sampleRate,
@@ -397,44 +358,6 @@
doNotReconnect, maxRequiredSpeed, AUDIO_PORT_HANDLE_NONE}};
}
-AudioTrack::AudioTrack(
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const sp<IMemory>& sharedBuffer,
- audio_output_flags_t flags,
- legacy_callback_t callback,
- void* user,
- int32_t notificationFrames,
- audio_session_t sessionId,
- transfer_type transferType,
- const audio_offload_info_t *offloadInfo,
- const AttributionSourceState& attributionSource,
- const audio_attributes_t* pAttributes,
- bool doNotReconnect,
- float maxRequiredSpeed)
- : mStatus(NO_INIT),
- mState(STATE_STOPPED),
- mPreviousPriority(ANDROID_PRIORITY_NORMAL),
- mPreviousSchedulingGroup(SP_DEFAULT),
- mPausedPosition(0),
- mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
- mAudioTrackCallback(new AudioTrackCallback())
-{
- mAttributes = AUDIO_ATTRIBUTES_INITIALIZER;
- if (callback) {
- mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
- } else if (user) {
- LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
- }
- mSetParams = std::unique_ptr<SetParams>{new SetParams{
- streamType, sampleRate, format, channelMask, 0 /*frameCount*/, flags,
- mLegacyCallbackWrapper, notificationFrames, sharedBuffer, false /*threadCanCallJava*/,
- sessionId, transferType, offloadInfo, attributionSource, pAttributes, doNotReconnect,
- maxRequiredSpeed, AUDIO_PORT_HANDLE_NONE}};
-}
-
void AudioTrack::onFirstRef() {
if (mSetParams) {
set(*mSetParams);
@@ -496,38 +419,6 @@
mDeviceCallback.clear();
}
}
-
-status_t AudioTrack::set(
- audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount,
- audio_output_flags_t flags,
- legacy_callback_t callback,
- void * user,
- int32_t notificationFrames,
- const sp<IMemory>& sharedBuffer,
- bool threadCanCallJava,
- audio_session_t sessionId,
- transfer_type transferType,
- const audio_offload_info_t *offloadInfo,
- const AttributionSourceState& attributionSource,
- const audio_attributes_t* pAttributes,
- bool doNotReconnect,
- float maxRequiredSpeed,
- audio_port_handle_t selectedDeviceId)
-{
- if (callback) {
- mLegacyCallbackWrapper = sp<LegacyCallbackWrapper>::make(callback, user);
- } else if (user) {
- LOG_ALWAYS_FATAL("Callback data provided without callback pointer!");
- }
- return set(streamType, sampleRate,format, channelMask, frameCount, flags,
- mLegacyCallbackWrapper, notificationFrames, sharedBuffer, threadCanCallJava,
- sessionId, transferType, offloadInfo, attributionSource, pAttributes,
- doNotReconnect, maxRequiredSpeed, selectedDeviceId);
-}
status_t AudioTrack::set(
audio_stream_type_t streamType,
uint32_t sampleRate,
diff --git a/media/libaudioclient/TEST_MAPPING b/media/libaudioclient/TEST_MAPPING
index 3751f80..9fab870 100644
--- a/media/libaudioclient/TEST_MAPPING
+++ b/media/libaudioclient/TEST_MAPPING
@@ -4,6 +4,9 @@
"name": "audio_aidl_conversion_tests"
},
{
+ "name": "audio_aidl_status_tests"
+ },
+ {
"name": "CtsNativeMediaAAudioTestCases",
"options" : [
{
@@ -11,5 +14,22 @@
}
]
}
+ ],
+ "postsubmit": [
+ {
+ "name": "audiorecord_tests"
+ },
+ {
+ "name": "audioeffect_tests"
+ },
+ {
+ "name": "audiorouting_tests"
+ },
+ {
+ "name": "audioclient_serialization_tests"
+ },
+ {
+ "name": "trackplayerbase_tests"
+ }
]
}
diff --git a/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl b/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
index 2cdf4f6..d9c6df4 100644
--- a/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
+++ b/media/libaudioclient/aidl/android/media/AudioPortExtSys.aidl
@@ -31,4 +31,6 @@
AudioPortDeviceExtSys device;
/** System-only parameters when the port is an audio mix. */
AudioPortMixExtSys mix;
+ /** Framework audio session identifier. */
+ int session;
}
diff --git a/media/libaudioclient/aidl/android/media/EffectConfig.aidl b/media/libaudioclient/aidl/android/media/EffectConfig.aidl
new file mode 100644
index 0000000..5f62b73
--- /dev/null
+++ b/media/libaudioclient/aidl/android/media/EffectConfig.aidl
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package android.media;
+
+import android.media.audio.common.AudioConfigBase;
+
+/**
+ * Describes configuration of an audio effect. Input and output
+ * audio configurations are described separately because the effect
+ * can perform transformations on channel layouts, for example.
+ *
+ * {@hide}
+ */
+parcelable EffectConfig {
+ /** Configuration of the audio input of the effect. */
+ AudioConfigBase inputCfg;
+ /** Configuration of the audio output of the effect. */
+ AudioConfigBase outputCfg;
+ /**
+ * Specifies whether the effect is instantiated on an input stream,
+ * e.g. on the input from a microphone.
+ */
+ boolean isOnInputStream;
+}
diff --git a/media/libaudioclient/aidl/android/media/IEffect.aidl b/media/libaudioclient/aidl/android/media/IEffect.aidl
index 6ec0405..b7f70a6 100644
--- a/media/libaudioclient/aidl/android/media/IEffect.aidl
+++ b/media/libaudioclient/aidl/android/media/IEffect.aidl
@@ -16,6 +16,7 @@
package android.media;
+import android.media.EffectConfig;
import android.media.SharedFileRegion;
/**
@@ -63,6 +64,14 @@
*/
SharedFileRegion getCblk();
+ /**
+ * Provides audio configurations for effect's input and output,
+ * see EffectConfig parcelable for more details.
+ *
+ * @return a status_t code.
+ */
+ int getConfig(out EffectConfig config);
+
// When adding a new method, please review and update
// Effects.cpp AudioFlinger::EffectHandle::onTransact()
// Effects.cpp IEFFECT_BINDER_METHOD_MACRO_LIST
diff --git a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
index 036e72e..5536bcb 100644
--- a/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
+++ b/media/libaudioclient/fuzzer/audioflinger_fuzzer.cpp
@@ -84,13 +84,15 @@
};
template <typename T, typename X, typename FUNC>
-std::vector<T> getFlags(const xsdc_enum_range<X> &range, const FUNC &func,
- const std::string &findString = {}) {
+std::vector<T> getFlags(const xsdc_enum_range<X>& range, const FUNC& func,
+ const std::string& findString = {},
+ const std::set<X>& excludedValues = {}) {
std::vector<T> vec;
for (const auto &xsdEnumVal : range) {
T enumVal;
std::string enumString = toString(xsdEnumVal);
if (enumString.find(findString) != std::string::npos &&
+ (excludedValues.find(xsdEnumVal) == excludedValues.end()) &&
func(enumString.c_str(), &enumVal)) {
vec.push_back(enumVal);
}
@@ -102,13 +104,29 @@
getFlags<audio_stream_type_t, xsd::AudioStreamType, decltype(audio_stream_type_from_string)>(
xsdc_enum_range<xsd::AudioStreamType>{}, audio_stream_type_from_string);
+/**
+ * AudioFormat - AUDIO_FORMAT_HE_AAC_V1 and AUDIO_FORMAT_HE_AAC_V2
+ * are excluded from kFormats[] in order to avoid the abort triggered
+ * for these two types of AudioFormat in
+ * AidlConversion::legacy2aidl_audio_format_t_AudioFormatDescription()
+ */
static const std::vector<audio_format_t> kFormats =
- getFlags<audio_format_t, xsd::AudioFormat, decltype(audio_format_from_string)>(
- xsdc_enum_range<xsd::AudioFormat>{}, audio_format_from_string);
+ getFlags<audio_format_t, xsd::AudioFormat, decltype(audio_format_from_string)>(
+ xsdc_enum_range<xsd::AudioFormat>{}, audio_format_from_string, {},
+ {xsd::AudioFormat::AUDIO_FORMAT_HE_AAC_V1,
+ xsd::AudioFormat::AUDIO_FORMAT_HE_AAC_V2});
+/**
+ * AudioChannelMask - AUDIO_CHANNEL_IN_6
+ * is excluded from kChannelMasks[] in order to avoid the abort triggered
+ * for this type of AudioChannelMask in
+ * AidlConversion::legacy2aidl_audio_channel_mask_t_AudioChannelLayout()
+ */
static const std::vector<audio_channel_mask_t> kChannelMasks =
- getFlags<audio_channel_mask_t, xsd::AudioChannelMask, decltype(audio_channel_mask_from_string)>(
- xsdc_enum_range<xsd::AudioChannelMask>{}, audio_channel_mask_from_string);
+ getFlags<audio_channel_mask_t, xsd::AudioChannelMask,
+ decltype(audio_channel_mask_from_string)>(
+ xsdc_enum_range<xsd::AudioChannelMask>{}, audio_channel_mask_from_string, {},
+ {xsd::AudioChannelMask::AUDIO_CHANNEL_IN_6});
static const std::vector<audio_usage_t> kUsages =
getFlags<audio_usage_t, xsd::AudioUsage, decltype(audio_usage_from_string)>(
@@ -126,9 +144,17 @@
getFlags<audio_gain_mode_t, xsd::AudioGainMode, decltype(audio_gain_mode_from_string)>(
xsdc_enum_range<xsd::AudioGainMode>{}, audio_gain_mode_from_string);
+/**
+ * AudioDevice - AUDIO_DEVICE_IN_AMBIENT and AUDIO_DEVICE_IN_COMMUNICATION
+ * are excluded from kDevices[] in order to avoid the abort triggered
+ * for these two types of AudioDevice in
+ * AidlConversion::aidl2legacy_AudioDeviceDescription_audio_devices_t()
+ */
static const std::vector<audio_devices_t> kDevices =
- getFlags<audio_devices_t, xsd::AudioDevice, decltype(audio_device_from_string)>(
- xsdc_enum_range<xsd::AudioDevice>{}, audio_device_from_string);
+ getFlags<audio_devices_t, xsd::AudioDevice, decltype(audio_device_from_string)>(
+ xsdc_enum_range<xsd::AudioDevice>{}, audio_device_from_string, {},
+ {xsd::AudioDevice::AUDIO_DEVICE_IN_AMBIENT,
+ xsd::AudioDevice::AUDIO_DEVICE_IN_COMMUNICATION});
static const std::vector<audio_input_flags_t> kInputFlags =
getFlags<audio_input_flags_t, xsd::AudioInOutFlag, decltype(audio_input_flag_from_string)>(
diff --git a/media/libaudioclient/include/media/AudioEffect.h b/media/libaudioclient/include/media/AudioEffect.h
index 56884a3..72c050b 100644
--- a/media/libaudioclient/include/media/AudioEffect.h
+++ b/media/libaudioclient/include/media/AudioEffect.h
@@ -136,7 +136,7 @@
* indicated by count.
* PERMISSION_DENIED could not get AudioFlinger interface
* NO_INIT effect library failed to initialize
- * BAD_VALUE invalid audio session or descriptor pointers
+ * BAD_VALUE invalid audio session, or invalid descriptor or count pointers
*
* Returned value
* *descriptor updated with descriptors of pre processings enabled by default
@@ -160,6 +160,7 @@
* NO_ERROR successful operation.
* PERMISSION_DENIED could not get AudioFlinger interface
* or caller lacks required permissions.
+ * BAD_VALUE invalid pointer to id
* Returned value
* *id: The new unique system-wide effect id.
*/
@@ -194,7 +195,7 @@
* PERMISSION_DENIED could not get AudioFlinger interface
* or caller lacks required permissions.
* NO_INIT effect library failed to initialize.
- * BAD_VALUE invalid source, type uuid or implementation uuid.
+ * BAD_VALUE invalid source, type uuid or implementation uuid, or id pointer
* NAME_NOT_FOUND no effect with this uuid or type found.
*
* Returned value
@@ -233,7 +234,7 @@
* PERMISSION_DENIED could not get AudioFlinger interface
* or caller lacks required permissions.
* NO_INIT effect library failed to initialize.
- * BAD_VALUE invalid type uuid or implementation uuid.
+ * BAD_VALUE invalid type uuid or implementation uuid, or id pointer
* NAME_NOT_FOUND no effect with this uuid or type found.
*
* Returned value
@@ -517,7 +518,7 @@
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful operation.
* - INVALID_OPERATION: the application does not have control of the effect engine.
- * - BAD_VALUE: invalid parameter identifier or value.
+ * - BAD_VALUE: invalid parameter structure pointer, or invalid identifier or value.
* - DEAD_OBJECT: the effect engine has been deleted.
*/
virtual status_t setParameter(effect_param_t *param);
@@ -562,7 +563,7 @@
* Returned status (from utils/Errors.h) can be:
* - NO_ERROR: successful operation.
* - INVALID_OPERATION: the AudioEffect was not successfully initialized.
- * - BAD_VALUE: invalid parameter identifier.
+ * - BAD_VALUE: invalid parameter structure pointer, or invalid parameter identifier.
* - DEAD_OBJECT: the effect engine has been deleted.
*/
virtual status_t getParameter(effect_param_t *param);
@@ -577,6 +578,25 @@
uint32_t *replySize,
void *replyData);
+ /* Retrieves the configuration of the effect.
+ *
+ * Parameters:
+ * inputCfg: pointer to audio_config_base_t structure receiving input
+ * configuration of the effect
+ * outputCfg: pointer to audio_config_base_t structure receiving output
+ * configuration of the effect
+ *
+ * Channel masks of the returned configs are "input" or "output" depending
+ * on the direction of the stream that the effect is attached to.
+ *
+ * Returned status (from utils/Errors.h) can be:
+ * - NO_ERROR: successful operation.
+ * - INVALID_OPERATION: the AudioEffect was not successfully initialized.
+ * - BAD_VALUE: null config pointers
+ * - DEAD_OBJECT: the effect engine has been deleted.
+ */
+ virtual status_t getConfigs(audio_config_base_t *inputCfg,
+ audio_config_base_t *outputCfg);
/*
* Utility functions.
diff --git a/media/libaudioclient/include/media/AudioRecord.h b/media/libaudioclient/include/media/AudioRecord.h
index cb05dd9..5a1ff65 100644
--- a/media/libaudioclient/include/media/AudioRecord.h
+++ b/media/libaudioclient/include/media/AudioRecord.h
@@ -46,27 +46,6 @@
{
public:
- /* Events used by AudioRecord callback function (legacy_callback_t).
- * Keep in sync with frameworks/base/media/java/android/media/AudioRecord.java NATIVE_EVENT_*.
- */
- enum event_type {
- EVENT_MORE_DATA = 0, // Request to read available data from buffer.
- // If this event is delivered but the callback handler
- // does not want to read the available data, the handler must
- // explicitly ignore the event by setting frameCount to zero.
- EVENT_OVERRUN = 1, // Buffer overrun occurred.
- EVENT_MARKER = 2, // Record head is at the specified marker position
- // (See setMarkerPosition()).
- EVENT_NEW_POS = 3, // Record head is at a new position
- // (See setPositionUpdatePeriod()).
- EVENT_NEW_IAUDIORECORD = 4, // IAudioRecord was re-created, either due to re-routing and
- // voluntary invalidation by mediaserver, or mediaserver crash.
- };
-
- /* Client should declare a Buffer and pass address to obtainBuffer()
- * and releaseBuffer(). See also legacy_callback_t for EVENT_MORE_DATA.
- */
-
class Buffer
{
friend AudioRecord;
@@ -122,7 +101,6 @@
* - EVENT_NEW_IAUDIORECORD: unused.
*/
- typedef void (*legacy_callback_t)(int event, void* user, void *info);
class IAudioRecordCallback : public virtual RefBase {
friend AudioRecord;
@@ -226,24 +204,6 @@
float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT);
- AudioRecord(audio_source_t inputSource,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const android::content::AttributionSourceState& client,
- size_t frameCount,
- legacy_callback_t callback,
- void* user,
- uint32_t notificationFrames = 0,
- audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
- transfer_type transferType = TRANSFER_DEFAULT,
- audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
- const audio_attributes_t* pAttributes = nullptr,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
- audio_microphone_direction_t
- selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
- float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT);
-
/* Terminates the AudioRecord and unregisters it from AudioFlinger.
* Also destroys all resources associated with the AudioRecord.
*/
@@ -286,27 +246,6 @@
float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT,
int32_t maxSharedAudioHistoryMs = 0);
- status_t set(audio_source_t inputSource,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount,
- legacy_callback_t callback,
- void* user,
- uint32_t notificationFrames = 0,
- bool threadCanCallJava = false,
- audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
- transfer_type transferType = TRANSFER_DEFAULT,
- audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
- uid_t uid = AUDIO_UID_INVALID,
- pid_t pid = -1,
- const audio_attributes_t* pAttributes = nullptr,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE,
- audio_microphone_direction_t
- selectedMicDirection = MIC_DIRECTION_UNSPECIFIED,
- float selectedMicFieldDimension = MIC_FIELD_DIMENSION_DEFAULT,
- int32_t maxSharedAudioHistoryMs = 0);
-
/* Result of constructing the AudioRecord. This must be checked for successful initialization
* before using any AudioRecord API (except for set()), because using
* an uninitialized AudioRecord produces undefined results.
diff --git a/media/libaudioclient/include/media/AudioTrack.h b/media/libaudioclient/include/media/AudioTrack.h
index 9f540e6..b6ee483 100644
--- a/media/libaudioclient/include/media/AudioTrack.h
+++ b/media/libaudioclient/include/media/AudioTrack.h
@@ -148,7 +148,6 @@
* - EVENT_NEW_TIMESTAMP: pointer to const AudioTimestamp.
*/
- typedef void (*legacy_callback_t)(int event, void* user, void* info);
class IAudioTrackCallback : public virtual RefBase {
friend AudioTrack;
protected:
@@ -343,26 +342,6 @@
float maxRequiredSpeed = 1.0f,
audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
-
- AudioTrack( audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount,
- audio_output_flags_t flags,
- legacy_callback_t cbf,
- void* user = nullptr,
- int32_t notificationFrames = 0,
- audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
- transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = nullptr,
- const AttributionSourceState& attributionSource =
- AttributionSourceState(),
- const audio_attributes_t* pAttributes = nullptr,
- bool doNotReconnect = false,
- float maxRequiredSpeed = 1.0f,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
-
/* Creates an audio track and registers it with AudioFlinger.
* With this constructor, the track is configured for static buffer mode.
* Data to be rendered is passed in a shared memory buffer
@@ -391,25 +370,6 @@
bool doNotReconnect = false,
float maxRequiredSpeed = 1.0f);
-
- AudioTrack( audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- const sp<IMemory>& sharedBuffer,
- audio_output_flags_t flags,
- legacy_callback_t cbf,
- void* user = nullptr,
- int32_t notificationFrames = 0,
- audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
- transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = nullptr,
- const AttributionSourceState& attributionSource =
- AttributionSourceState(),
- const audio_attributes_t* pAttributes = nullptr,
- bool doNotReconnect = false,
- float maxRequiredSpeed = 1.0f);
-
/* Terminates the AudioTrack and unregisters it from AudioFlinger.
* Also destroys all resources associated with the AudioTrack.
*/
@@ -490,28 +450,8 @@
}
void onFirstRef() override;
public:
- status_t set(audio_stream_type_t streamType,
- uint32_t sampleRate,
- audio_format_t format,
- audio_channel_mask_t channelMask,
- size_t frameCount,
- audio_output_flags_t flags,
- legacy_callback_t callback,
- void * user = nullptr,
- int32_t notificationFrames = 0,
- const sp<IMemory>& sharedBuffer = 0,
- bool threadCanCallJava = false,
- audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
- transfer_type transferType = TRANSFER_DEFAULT,
- const audio_offload_info_t *offloadInfo = nullptr,
- const AttributionSourceState& attributionSource =
- AttributionSourceState(),
- const audio_attributes_t* pAttributes = nullptr,
- bool doNotReconnect = false,
- float maxRequiredSpeed = 1.0f,
- audio_port_handle_t selectedDeviceId = AUDIO_PORT_HANDLE_NONE);
-
- // FIXME(b/169889714): Vendor code depends on the old method signature at link time
+ typedef void (*legacy_callback_t)(int event, void* user, void* info);
+ // FIXME(b/169889714): Vendor code depends on the old method signature at link time
status_t set(audio_stream_type_t streamType,
uint32_t sampleRate,
audio_format_t format,
diff --git a/media/libaudioclient/tests/Android.bp b/media/libaudioclient/tests/Android.bp
index 891293e..6535b5b 100644
--- a/media/libaudioclient/tests/Android.bp
+++ b/media/libaudioclient/tests/Android.bp
@@ -93,3 +93,107 @@
],
data: ["record_test_input_*.txt"],
}
+
+cc_defaults {
+ name: "libaudioclient_gtests_defaults",
+ cflags: [
+ "-Wall",
+ "-Werror",
+ ],
+ shared_libs: [
+ "capture_state_listener-aidl-cpp",
+ "framework-permission-aidl-cpp",
+ "libbase",
+ "libbinder",
+ "libcgrouprc",
+ "libcutils",
+ "libdl",
+ "liblog",
+ "libmedia",
+ "libmediametrics",
+ "libmediautils",
+ "libmedia_helper",
+ "libnblog",
+ "libprocessgroup",
+ "libshmemcompat",
+ "libstagefright_foundation",
+ "libutils",
+ "libvibrator",
+ "mediametricsservice-aidl-cpp",
+ "packagemanager_aidl-cpp",
+ "shared-file-region-aidl-cpp",
+ ],
+ static_libs: [
+ "android.hardware.audio.common@7.0-enums",
+ "android.media.audio.common.types-V1-cpp",
+ "audioclient-types-aidl-cpp",
+ "audioflinger-aidl-cpp",
+ "audiopolicy-aidl-cpp",
+ "audiopolicy-types-aidl-cpp",
+ "av-types-aidl-cpp",
+ "effect-aidl-cpp",
+ "libaudioclient",
+ "libaudioclient_aidl_conversion",
+ "libaudiofoundation",
+ "libaudiomanager",
+ "libaudiopolicy",
+ "libaudioutils",
+ ],
+ data: ["bbb*.raw"],
+ test_config_template: "audio_test_template.xml",
+ test_suites: ["device-tests"],
+}
+
+cc_test {
+ name: "audiorecord_tests",
+ defaults: ["libaudioclient_gtests_defaults"],
+ srcs: [
+ "audiorecord_tests.cpp",
+ "audio_test_utils.cpp",
+ ],
+}
+
+cc_test {
+ name: "audiotrack_tests",
+ defaults: ["libaudioclient_gtests_defaults"],
+ srcs: [
+ "audiotrack_tests.cpp",
+ "audio_test_utils.cpp",
+ ],
+}
+
+cc_test {
+ name: "audioeffect_tests",
+ defaults: ["libaudioclient_gtests_defaults"],
+ srcs: [
+ "audioeffect_tests.cpp",
+ "audio_test_utils.cpp",
+ ],
+}
+
+cc_test {
+ name: "audiorouting_tests",
+ defaults: ["libaudioclient_gtests_defaults"],
+ srcs: [
+ "audiorouting_tests.cpp",
+ "audio_test_utils.cpp",
+ ],
+ shared_libs: [
+ "libxml2",
+ ],
+}
+
+cc_test {
+ name: "audioclient_serialization_tests",
+ defaults: ["libaudioclient_gtests_defaults"],
+ srcs: [
+ "audioclient_serialization_tests.cpp",
+ "audio_test_utils.cpp",
+ ],
+}
+
+cc_test {
+ name: "trackplayerbase_tests",
+ defaults: ["libaudioclient_gtests_defaults"],
+ srcs: ["trackplayerbase_tests.cpp"],
+}
diff --git a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
index 997f62a..f3361c1 100644
--- a/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
+++ b/media/libaudioclient/tests/audio_aidl_legacy_conversion_tests.cpp
@@ -16,8 +16,8 @@
#include <gtest/gtest.h>
-#include <media/AudioCommonTypes.h>
#include <media/AidlConversion.h>
+#include <media/AudioCommonTypes.h>
using namespace android;
using namespace android::aidl_utils;
@@ -31,7 +31,8 @@
namespace {
-template<typename T> size_t hash(const T& t) {
+template <typename T>
+size_t hash(const T& t) {
return std::hash<T>{}(t);
}
@@ -52,10 +53,8 @@
return AudioChannelLayout::make<AudioChannelLayout::Tag::layoutMask>(
// Use channels that exist both for input and output,
// but doesn't form a known layout mask.
- AudioChannelLayout::CHANNEL_FRONT_LEFT |
- AudioChannelLayout::CHANNEL_FRONT_RIGHT |
- AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT |
- AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT);
+ AudioChannelLayout::CHANNEL_FRONT_LEFT | AudioChannelLayout::CHANNEL_FRONT_RIGHT |
+ AudioChannelLayout::CHANNEL_TOP_SIDE_LEFT | AudioChannelLayout::CHANNEL_TOP_SIDE_RIGHT);
}
AudioChannelLayout make_ACL_ChannelIndex2() {
@@ -74,7 +73,7 @@
}
AudioDeviceDescription make_AudioDeviceDescription(AudioDeviceType type,
- const std::string& connection = "") {
+ const std::string& connection = "") {
AudioDeviceDescription result;
result.type = type;
result.connection = connection;
@@ -95,12 +94,12 @@
AudioDeviceDescription make_ADD_WiredHeadset() {
return make_AudioDeviceDescription(AudioDeviceType::OUT_HEADSET,
- AudioDeviceDescription::CONNECTION_ANALOG());
+ AudioDeviceDescription::CONNECTION_ANALOG());
}
AudioDeviceDescription make_ADD_BtScoHeadset() {
return make_AudioDeviceDescription(AudioDeviceType::OUT_HEADSET,
- AudioDeviceDescription::CONNECTION_BT_SCO());
+ AudioDeviceDescription::CONNECTION_BT_SCO());
}
AudioFormatDescription make_AudioFormatDescription(AudioFormatType type) {
@@ -121,8 +120,7 @@
return result;
}
-AudioFormatDescription make_AudioFormatDescription(PcmType transport,
- const std::string& encoding) {
+AudioFormatDescription make_AudioFormatDescription(PcmType transport, const std::string& encoding) {
auto result = make_AudioFormatDescription(encoding);
result.pcm = transport;
return result;
@@ -163,7 +161,8 @@
// is identical to the same format description constructed by the framework.
class HashIdentityTest : public ::testing::Test {
public:
- template<typename T> void verifyHashIdentity(const std::vector<std::function<T()>>& valueGens) {
+ template <typename T>
+ void verifyHashIdentity(const std::vector<std::function<T()>>& valueGens) {
for (size_t i = 0; i < valueGens.size(); ++i) {
for (size_t j = 0; j < valueGens.size(); ++j) {
if (i == j) {
@@ -177,27 +176,25 @@
};
TEST_F(HashIdentityTest, AudioChannelLayoutHashIdentity) {
- verifyHashIdentity<AudioChannelLayout>({
- make_ACL_None, make_ACL_Invalid, make_ACL_Stereo,
- make_ACL_LayoutArbitrary, make_ACL_ChannelIndex2,
- make_ACL_ChannelIndexArbitrary, make_ACL_VoiceCall});
+ verifyHashIdentity<AudioChannelLayout>({make_ACL_None, make_ACL_Invalid, make_ACL_Stereo,
+ make_ACL_LayoutArbitrary, make_ACL_ChannelIndex2,
+ make_ACL_ChannelIndexArbitrary, make_ACL_VoiceCall});
}
TEST_F(HashIdentityTest, AudioDeviceDescriptionHashIdentity) {
- verifyHashIdentity<AudioDeviceDescription>({
- make_ADD_None, make_ADD_DefaultIn, make_ADD_DefaultOut, make_ADD_WiredHeadset,
- make_ADD_BtScoHeadset});
+ verifyHashIdentity<AudioDeviceDescription>({make_ADD_None, make_ADD_DefaultIn,
+ make_ADD_DefaultOut, make_ADD_WiredHeadset,
+ make_ADD_BtScoHeadset});
}
TEST_F(HashIdentityTest, AudioFormatDescriptionHashIdentity) {
- verifyHashIdentity<AudioFormatDescription>({
- make_AFD_Default, make_AFD_Invalid, make_AFD_Pcm16Bit, make_AFD_Bitstream,
- make_AFD_Encap, make_AFD_Encap_with_Enc});
+ verifyHashIdentity<AudioFormatDescription>({make_AFD_Default, make_AFD_Invalid,
+ make_AFD_Pcm16Bit, make_AFD_Bitstream,
+ make_AFD_Encap, make_AFD_Encap_with_Enc});
}
using ChannelLayoutParam = std::tuple<AudioChannelLayout, bool /*isInput*/>;
-class AudioChannelLayoutRoundTripTest :
- public testing::TestWithParam<ChannelLayoutParam> {};
+class AudioChannelLayoutRoundTripTest : public testing::TestWithParam<ChannelLayoutParam> {};
TEST_P(AudioChannelLayoutRoundTripTest, Aidl2Legacy2Aidl) {
const auto initial = std::get<0>(GetParam());
const bool isInput = std::get<1>(GetParam());
@@ -207,21 +204,20 @@
ASSERT_TRUE(convBack.ok());
EXPECT_EQ(initial, convBack.value());
}
-INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutRoundTrip,
- AudioChannelLayoutRoundTripTest,
- testing::Combine(
- testing::Values(AudioChannelLayout{}, make_ACL_Invalid(), make_ACL_Stereo(),
- make_ACL_LayoutArbitrary(), make_ACL_ChannelIndex2(),
- make_ACL_ChannelIndexArbitrary()),
- testing::Values(false, true)));
-INSTANTIATE_TEST_SUITE_P(AudioChannelVoiceRoundTrip,
- AudioChannelLayoutRoundTripTest,
- // In legacy constants the voice call is only defined for input.
- testing::Combine(testing::Values(make_ACL_VoiceCall()), testing::Values(true)));
+INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutRoundTrip, AudioChannelLayoutRoundTripTest,
+ testing::Combine(testing::Values(AudioChannelLayout{}, make_ACL_Invalid(),
+ make_ACL_Stereo(),
+ make_ACL_LayoutArbitrary(),
+ make_ACL_ChannelIndex2(),
+ make_ACL_ChannelIndexArbitrary()),
+ testing::Values(false, true)));
+INSTANTIATE_TEST_SUITE_P(AudioChannelVoiceRoundTrip, AudioChannelLayoutRoundTripTest,
+ // In legacy constants the voice call is only defined for input.
+ testing::Combine(testing::Values(make_ACL_VoiceCall()),
+ testing::Values(true)));
using ChannelLayoutEdgeCaseParam = std::tuple<int /*legacy*/, bool /*isInput*/, bool /*isValid*/>;
-class AudioChannelLayoutEdgeCaseTest :
- public testing::TestWithParam<ChannelLayoutEdgeCaseParam> {};
+class AudioChannelLayoutEdgeCaseTest : public testing::TestWithParam<ChannelLayoutEdgeCaseParam> {};
TEST_P(AudioChannelLayoutEdgeCaseTest, Legacy2Aidl) {
const audio_channel_mask_t legacy = static_cast<audio_channel_mask_t>(std::get<0>(GetParam()));
const bool isInput = std::get<1>(GetParam());
@@ -229,8 +225,8 @@
auto conv = legacy2aidl_audio_channel_mask_t_AudioChannelLayout(legacy, isInput);
EXPECT_EQ(isValid, conv.ok());
}
-INSTANTIATE_TEST_SUITE_P(AudioChannelLayoutEdgeCase,
- AudioChannelLayoutEdgeCaseTest,
+INSTANTIATE_TEST_SUITE_P(
+ AudioChannelLayoutEdgeCase, AudioChannelLayoutEdgeCaseTest,
testing::Values(
// Valid legacy input masks.
std::make_tuple(AUDIO_CHANNEL_IN_VOICE_UPLINK_MONO, true, true),
@@ -240,25 +236,26 @@
std::make_tuple(
// This has the same numerical representation as Mask 'A' below
AUDIO_CHANNEL_OUT_FRONT_CENTER | AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
- AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT, false, true),
+ AUDIO_CHANNEL_OUT_TOP_FRONT_RIGHT,
+ false, true),
std::make_tuple(
// This has the same numerical representation as Mask 'B' below
AUDIO_CHANNEL_OUT_FRONT_CENTER | AUDIO_CHANNEL_OUT_LOW_FREQUENCY |
- AUDIO_CHANNEL_OUT_TOP_BACK_LEFT, false, true),
+ AUDIO_CHANNEL_OUT_TOP_BACK_LEFT,
+ false, true),
// Invalid legacy input masks.
std::make_tuple(AUDIO_CHANNEL_IN_6, true, false),
- std::make_tuple(
- AUDIO_CHANNEL_IN_6 | AUDIO_CHANNEL_IN_FRONT_PROCESSED, true, false),
- std::make_tuple(
- AUDIO_CHANNEL_IN_PRESSURE | AUDIO_CHANNEL_IN_X_AXIS |
- AUDIO_CHANNEL_IN_Y_AXIS | AUDIO_CHANNEL_IN_Z_AXIS, true, false),
+ std::make_tuple(AUDIO_CHANNEL_IN_6 | AUDIO_CHANNEL_IN_FRONT_PROCESSED, true, false),
+ std::make_tuple(AUDIO_CHANNEL_IN_PRESSURE | AUDIO_CHANNEL_IN_X_AXIS |
+ AUDIO_CHANNEL_IN_Y_AXIS | AUDIO_CHANNEL_IN_Z_AXIS,
+ true, false),
std::make_tuple( // Mask 'A'
AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_VOICE_UPLINK, true, false),
std::make_tuple( // Mask 'B'
AUDIO_CHANNEL_IN_STEREO | AUDIO_CHANNEL_IN_VOICE_DNLINK, true, false)));
-class AudioDeviceDescriptionRoundTripTest :
- public testing::TestWithParam<AudioDeviceDescription> {};
+class AudioDeviceDescriptionRoundTripTest : public testing::TestWithParam<AudioDeviceDescription> {
+};
TEST_P(AudioDeviceDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
const auto initial = GetParam();
auto conv = aidl2legacy_AudioDeviceDescription_audio_devices_t(initial);
@@ -267,13 +264,13 @@
ASSERT_TRUE(convBack.ok());
EXPECT_EQ(initial, convBack.value());
}
-INSTANTIATE_TEST_SUITE_P(AudioDeviceDescriptionRoundTrip,
- AudioDeviceDescriptionRoundTripTest,
- testing::Values(AudioDeviceDescription{}, make_ADD_DefaultIn(),
- make_ADD_DefaultOut(), make_ADD_WiredHeadset(), make_ADD_BtScoHeadset()));
+INSTANTIATE_TEST_SUITE_P(AudioDeviceDescriptionRoundTrip, AudioDeviceDescriptionRoundTripTest,
+ testing::Values(AudioDeviceDescription{}, make_ADD_DefaultIn(),
+ make_ADD_DefaultOut(), make_ADD_WiredHeadset(),
+ make_ADD_BtScoHeadset()));
-class AudioFormatDescriptionRoundTripTest :
- public testing::TestWithParam<AudioFormatDescription> {};
+class AudioFormatDescriptionRoundTripTest : public testing::TestWithParam<AudioFormatDescription> {
+};
TEST_P(AudioFormatDescriptionRoundTripTest, Aidl2Legacy2Aidl) {
const auto initial = GetParam();
auto conv = aidl2legacy_AudioFormatDescription_audio_format_t(initial);
@@ -282,6 +279,6 @@
ASSERT_TRUE(convBack.ok());
EXPECT_EQ(initial, convBack.value());
}
-INSTANTIATE_TEST_SUITE_P(AudioFormatDescriptionRoundTrip,
- AudioFormatDescriptionRoundTripTest,
- testing::Values(make_AFD_Invalid(), AudioFormatDescription{}, make_AFD_Pcm16Bit()));
+INSTANTIATE_TEST_SUITE_P(AudioFormatDescriptionRoundTrip, AudioFormatDescriptionRoundTripTest,
+ testing::Values(make_AFD_Invalid(), AudioFormatDescription{},
+ make_AFD_Pcm16Bit()));
diff --git a/media/libaudioclient/tests/audio_aidl_status_tests.cpp b/media/libaudioclient/tests/audio_aidl_status_tests.cpp
index 5517091..8a7e6c1 100644
--- a/media/libaudioclient/tests/audio_aidl_status_tests.cpp
+++ b/media/libaudioclient/tests/audio_aidl_status_tests.cpp
@@ -37,25 +37,10 @@
// Special status values are preserved on round trip.
TEST(audio_aidl_status_tests, statusRoundTripSpecialValues) {
- for (status_t status : {
- OK,
- UNKNOWN_ERROR,
- NO_MEMORY,
- INVALID_OPERATION,
- BAD_VALUE,
- BAD_TYPE,
- NAME_NOT_FOUND,
- PERMISSION_DENIED,
- NO_INIT,
- ALREADY_EXISTS,
- DEAD_OBJECT,
- FAILED_TRANSACTION,
- BAD_INDEX,
- NOT_ENOUGH_DATA,
- WOULD_BLOCK,
- TIMED_OUT,
- UNKNOWN_TRANSACTION,
- FDS_NOT_ALLOWED}) {
+ for (status_t status :
+ {OK, UNKNOWN_ERROR, NO_MEMORY, INVALID_OPERATION, BAD_VALUE, BAD_TYPE, NAME_NOT_FOUND,
+ PERMISSION_DENIED, NO_INIT, ALREADY_EXISTS, DEAD_OBJECT, FAILED_TRANSACTION, BAD_INDEX,
+ NOT_ENOUGH_DATA, WOULD_BLOCK, TIMED_OUT, UNKNOWN_TRANSACTION, FDS_NOT_ALLOWED}) {
ASSERT_EQ(status, statusTFromBinderStatus(binderStatusFromStatusT(status)));
}
}
@@ -63,47 +48,29 @@
// Binder exceptions show as an error (not fixed at this time); these come fromExceptionCode().
TEST(audio_aidl_status_tests, binderStatusExceptions) {
for (int exceptionCode : {
- //Status::EX_NONE,
- Status::EX_SECURITY,
- Status::EX_BAD_PARCELABLE,
- Status::EX_ILLEGAL_ARGUMENT,
- Status::EX_NULL_POINTER,
- Status::EX_ILLEGAL_STATE,
- Status::EX_NETWORK_MAIN_THREAD,
- Status::EX_UNSUPPORTED_OPERATION,
- //Status::EX_SERVICE_SPECIFIC, -- tested fromServiceSpecificError()
- Status::EX_PARCELABLE,
- // This is special and Java specific; see Parcel.java.
- Status::EX_HAS_REPLY_HEADER,
- // This is special, and indicates to C++ binder proxies that the
- // transaction has failed at a low level.
- //Status::EX_TRANSACTION_FAILED, -- tested fromStatusT().
- }) {
+ // Status::EX_NONE,
+ Status::EX_SECURITY, Status::EX_BAD_PARCELABLE, Status::EX_ILLEGAL_ARGUMENT,
+ Status::EX_NULL_POINTER, Status::EX_ILLEGAL_STATE, Status::EX_NETWORK_MAIN_THREAD,
+ Status::EX_UNSUPPORTED_OPERATION,
+ // Status::EX_SERVICE_SPECIFIC, -- tested fromServiceSpecificError()
+ Status::EX_PARCELABLE,
+ // This is special and Java specific; see Parcel.java.
+ Status::EX_HAS_REPLY_HEADER,
+ // This is special, and indicates to C++ binder proxies that the
+ // transaction has failed at a low level.
+ // Status::EX_TRANSACTION_FAILED, -- tested fromStatusT().
+ }) {
ASSERT_NE(OK, statusTFromBinderStatus(Status::fromExceptionCode(exceptionCode)));
}
}
// Binder transaction errors show exactly in status_t; these come fromStatusT().
TEST(audio_aidl_status_tests, binderStatusTransactionError) {
- for (status_t status : {
- OK, // Note: fromStatusT does check if this is 0, so this is no error.
- UNKNOWN_ERROR,
- NO_MEMORY,
- INVALID_OPERATION,
- BAD_VALUE,
- BAD_TYPE,
- NAME_NOT_FOUND,
- PERMISSION_DENIED,
- NO_INIT,
- ALREADY_EXISTS,
- DEAD_OBJECT,
- FAILED_TRANSACTION,
- BAD_INDEX,
- NOT_ENOUGH_DATA,
- WOULD_BLOCK,
- TIMED_OUT,
- UNKNOWN_TRANSACTION,
- FDS_NOT_ALLOWED}) {
+ for (status_t status :
+ {OK, // Note: fromStatusT does check if this is 0, so this is no error.
+ UNKNOWN_ERROR, NO_MEMORY, INVALID_OPERATION, BAD_VALUE, BAD_TYPE, NAME_NOT_FOUND,
+ PERMISSION_DENIED, NO_INIT, ALREADY_EXISTS, DEAD_OBJECT, FAILED_TRANSACTION, BAD_INDEX,
+ NOT_ENOUGH_DATA, WOULD_BLOCK, TIMED_OUT, UNKNOWN_TRANSACTION, FDS_NOT_ALLOWED}) {
ASSERT_EQ(status, statusTFromBinderStatus(Status::fromStatusT(status)));
}
}
diff --git a/media/libaudioclient/tests/audio_test_template.xml b/media/libaudioclient/tests/audio_test_template.xml
new file mode 100644
index 0000000..ed0cb21
--- /dev/null
+++ b/media/libaudioclient/tests/audio_test_template.xml
@@ -0,0 +1,32 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2022 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the"License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an"AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<configuration description="Unit test configuration for {MODULE}">
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push-file" key="{MODULE}" value="/data/local/tmp/{MODULE}" />
+
+ <!-- Files used for audio testing -->
+ <option name="push-file" key="bbb_1ch_8kHz_s16le.raw" value="/data/local/tmp/bbb_1ch_8kHz_s16le.raw" />
+ <option name="push-file" key="bbb_2ch_24kHz_s16le.raw" value="/data/local/tmp/bbb_2ch_24kHz_s16le.raw" />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.GTest" >
+ <option name="native-test-device-path" value="/data/local/tmp" />
+ <option name="module-name" value="{MODULE}" />
+ </test>
+</configuration>
diff --git a/media/libaudioclient/tests/audio_test_utils.cpp b/media/libaudioclient/tests/audio_test_utils.cpp
new file mode 100644
index 0000000..d7ce014
--- /dev/null
+++ b/media/libaudioclient/tests/audio_test_utils.cpp
@@ -0,0 +1,787 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AudioTestUtils"
+
+#include <utils/Log.h>
+
+#include "audio_test_utils.h"
+
+// Generates a random string.
+void CreateRandomFile(int& fd) {
+ std::string filename = "/data/local/tmp/record-XXXXXX";
+ fd = mkstemp(filename.data());
+}
+
+void OnAudioDeviceUpdateNotifier::onAudioDeviceUpdate(audio_io_handle_t audioIo,
+ audio_port_handle_t deviceId) {
+ std::unique_lock<std::mutex> lock{mMutex};
+ ALOGD("%s audioIo=%d deviceId=%d", __func__, audioIo, deviceId);
+ mAudioIo = audioIo;
+ mDeviceId = deviceId;
+ mCondition.notify_all();
+}
+
+status_t OnAudioDeviceUpdateNotifier::waitForAudioDeviceCb() {
+ std::unique_lock<std::mutex> lock{mMutex};
+ if (mAudioIo == AUDIO_IO_HANDLE_NONE) {
+ mCondition.wait_for(lock, std::chrono::milliseconds(500));
+ if (mAudioIo == AUDIO_IO_HANDLE_NONE) return TIMED_OUT;
+ }
+ return OK;
+}
+
+AudioPlayback::AudioPlayback(uint32_t sampleRate, audio_format_t format,
+ audio_channel_mask_t channelMask, audio_output_flags_t flags,
+ audio_session_t sessionId, AudioTrack::transfer_type transferType,
+ audio_attributes_t* attributes)
+ : mSampleRate(sampleRate),
+ mFormat(format),
+ mChannelMask(channelMask),
+ mFlags(flags),
+ mSessionId(sessionId),
+ mTransferType(transferType),
+ mAttributes(attributes) {
+ mStopPlaying = false;
+ mBytesUsedSoFar = 0;
+ mState = PLAY_NO_INIT;
+ mMemCapacity = 0;
+ mMemoryDealer = nullptr;
+ mMemory = nullptr;
+}
+
+AudioPlayback::~AudioPlayback() {
+ stop();
+}
+
+status_t AudioPlayback::create() {
+ if (mState != PLAY_NO_INIT) return INVALID_OPERATION;
+ std::string packageName{"AudioPlayback"};
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = packageName;
+ attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+ attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
+ attributionSource.token = sp<BBinder>::make();
+ if (mTransferType == AudioTrack::TRANSFER_OBTAIN) {
+ mTrack = new AudioTrack(attributionSource);
+ mTrack->set(AUDIO_STREAM_MUSIC, mSampleRate, mFormat, mChannelMask, 0 /* frameCount */,
+ mFlags, nullptr /* callback */, 0 /* notificationFrames */,
+ nullptr /* sharedBuffer */, false /*canCallJava */, mSessionId, mTransferType,
+ nullptr /* offloadInfo */, attributionSource, mAttributes);
+ } else if (mTransferType == AudioTrack::TRANSFER_SHARED) {
+ mTrack = new AudioTrack(AUDIO_STREAM_MUSIC, mSampleRate, mFormat, mChannelMask, mMemory,
+ mFlags, wp<AudioTrack::IAudioTrackCallback>::fromExisting(this), 0,
+ mSessionId, mTransferType, nullptr, attributionSource, mAttributes);
+ } else {
+ ALOGE("Required Transfer type not existed");
+ return INVALID_OPERATION;
+ }
+ mTrack->setCallerName(packageName);
+ status_t status = mTrack->initCheck();
+ if (NO_ERROR == status) mState = PLAY_READY;
+ return status;
+}
+
+status_t AudioPlayback::loadResource(const char* name) {
+ status_t status = OK;
+ FILE* fp = fopen(name, "rbe");
+ struct stat buf {};
+ if (fp && !fstat(fileno(fp), &buf)) {
+ mMemCapacity = buf.st_size;
+ mMemoryDealer = new MemoryDealer(mMemCapacity, "AudioPlayback");
+ if (nullptr == mMemoryDealer.get()) {
+ ALOGE("couldn't get MemoryDealer!");
+ fclose(fp);
+ return NO_MEMORY;
+ }
+ mMemory = mMemoryDealer->allocate(mMemCapacity);
+ if (nullptr == mMemory.get()) {
+ ALOGE("couldn't get IMemory!");
+ fclose(fp);
+ return NO_MEMORY;
+ }
+ uint8_t* ipBuffer = static_cast<uint8_t*>(static_cast<void*>(mMemory->unsecurePointer()));
+ fread(ipBuffer, sizeof(uint8_t), mMemCapacity, fp);
+ } else {
+ ALOGE("unable to open input file %s", name);
+ status = NAME_NOT_FOUND;
+ }
+ if (fp) fclose(fp);
+ return status;
+}
+
+sp<AudioTrack> AudioPlayback::getAudioTrackHandle() {
+ return (PLAY_NO_INIT != mState) ? mTrack : nullptr;
+}
+
+status_t AudioPlayback::start() {
+ status_t status;
+ if (PLAY_READY != mState) {
+ return INVALID_OPERATION;
+ } else {
+ status = mTrack->start();
+ if (OK == status) {
+ mState = PLAY_STARTED;
+ LOG_FATAL_IF(false != mTrack->stopped());
+ }
+ }
+ return status;
+}
+
+void AudioPlayback::onBufferEnd() {
+ std::unique_lock<std::mutex> lock{mMutex};
+ mStopPlaying = true;
+ mCondition.notify_all();
+}
+
+status_t AudioPlayback::fillBuffer() {
+ if (PLAY_STARTED != mState && PLAY_STOPPED != mState) return INVALID_OPERATION;
+ int retry = 25;
+ uint8_t* ipBuffer = static_cast<uint8_t*>(static_cast<void*>(mMemory->unsecurePointer()));
+ size_t nonContig = 0;
+ size_t bytesAvailable = mMemCapacity - mBytesUsedSoFar;
+ while (bytesAvailable > 0) {
+ AudioTrack::Buffer trackBuffer;
+ trackBuffer.frameCount = mTrack->frameCount() * 2;
+ status_t status = mTrack->obtainBuffer(&trackBuffer, retry, &nonContig);
+ if (OK == status) {
+ size_t bytesToCopy = std::min(bytesAvailable, trackBuffer.size());
+ if (bytesToCopy > 0) {
+ memcpy(trackBuffer.data(), ipBuffer + mBytesUsedSoFar, bytesToCopy);
+ }
+ mTrack->releaseBuffer(&trackBuffer);
+ mBytesUsedSoFar += bytesToCopy;
+ bytesAvailable = mMemCapacity - mBytesUsedSoFar;
+ if (bytesAvailable == 0) {
+ stop();
+ }
+ } else if (WOULD_BLOCK == status) {
+ if (mStopPlaying)
+ return OK;
+ else
+ return TIMED_OUT;
+ }
+ }
+ return OK;
+}
+
+status_t AudioPlayback::waitForConsumption(bool testSeek) {
+ if (PLAY_STARTED != mState) return INVALID_OPERATION;
+ // in static buffer mode, lets not play clips with duration > 30 sec
+ int retry = 30;
+ // Total number of frames in the input file.
+ size_t totalFrameCount = mMemCapacity / mTrack->frameSize();
+ while (!mStopPlaying && retry > 0) {
+ // Get the total numbers of frames played.
+ uint32_t currPosition;
+ mTrack->getPosition(&currPosition);
+ if (testSeek && (currPosition > totalFrameCount * 0.6)) {
+ testSeek = false;
+ if (!mTrack->hasStarted()) return BAD_VALUE;
+ mTrack->pauseAndWait(std::chrono::seconds(2));
+ if (mTrack->hasStarted()) return BAD_VALUE;
+ mTrack->reload();
+ mTrack->getPosition(&currPosition);
+ if (currPosition != 0) return BAD_VALUE;
+ mTrack->start();
+ while (currPosition < totalFrameCount * 0.3) {
+ mTrack->getPosition(&currPosition);
+ }
+ mTrack->pauseAndWait(std::chrono::seconds(2));
+ uint32_t setPosition = totalFrameCount * 0.9;
+ mTrack->setPosition(setPosition);
+ uint32_t bufferPosition;
+ mTrack->getBufferPosition(&bufferPosition);
+ if (bufferPosition != setPosition) return BAD_VALUE;
+ mTrack->start();
+ }
+ std::this_thread::sleep_for(std::chrono::milliseconds(300));
+ retry--;
+ }
+ if (!mStopPlaying) return TIMED_OUT;
+ return OK;
+}
+
+status_t AudioPlayback::onProcess(bool testSeek) {
+ if (mTransferType == AudioTrack::TRANSFER_SHARED)
+ return waitForConsumption(testSeek);
+ else if (mTransferType == AudioTrack::TRANSFER_OBTAIN)
+ return fillBuffer();
+ else
+ return INVALID_OPERATION;
+}
+
+void AudioPlayback::stop() {
+ std::unique_lock<std::mutex> lock{mMutex};
+ mStopPlaying = true;
+ if (mState != PLAY_STOPPED) {
+ mTrack->stopAndJoinCallbacks();
+ LOG_FATAL_IF(true != mTrack->stopped());
+ mState = PLAY_STOPPED;
+ }
+}
+
+// hold pcm data sent by AudioRecord
+RawBuffer::RawBuffer(int64_t ptsPipeline, int64_t ptsManual, int32_t capacity)
+ : mData(capacity > 0 ? new uint8_t[capacity] : nullptr),
+ mPtsPipeline(ptsPipeline),
+ mPtsManual(ptsManual),
+ mCapacity(capacity) {}
+
+// Simple AudioCapture
+size_t AudioCapture::onMoreData(const AudioRecord::Buffer& buffer) {
+ if (mState != REC_STARTED) {
+ ALOGE("Unexpected Callback from audiorecord, not reading data");
+ return 0;
+ }
+
+ // no more frames to read
+ if (mNumFramesReceived > mNumFramesToRecord || mStopRecording) {
+ mStopRecording = true;
+ return 0;
+ }
+
+ int64_t timeUs = 0, position = 0, timeNs = 0;
+ ExtendedTimestamp ts;
+ ExtendedTimestamp::Location location;
+ const int32_t usPerSec = 1000000;
+
+ if (mRecord->getTimestamp(&ts) == OK &&
+ ts.getBestTimestamp(&position, &timeNs, ExtendedTimestamp::TIMEBASE_MONOTONIC, &location) ==
+ OK) {
+ // Use audio timestamp.
+ timeUs = timeNs / 1000 -
+ (position - mNumFramesReceived + mNumFramesLost) * usPerSec / mSampleRate;
+ } else {
+ // This should not happen in normal case.
+ ALOGW("Failed to get audio timestamp, fallback to use systemclock");
+ timeUs = systemTime() / 1000LL;
+ // Estimate the real sampling time of the 1st sample in this buffer
+ // from AudioRecord's latency. (Apply this adjustment first so that
+ // the start time logic is not affected.)
+ timeUs -= mRecord->latency() * 1000LL;
+ }
+
+ ALOGV("dataCallbackTimestamp: %" PRId64 " us", timeUs);
+
+ const size_t frameSize = mRecord->frameSize();
+ uint64_t numLostBytes = (uint64_t)mRecord->getInputFramesLost() * frameSize;
+ if (numLostBytes > 0) {
+ ALOGW("Lost audio record data: %" PRIu64 " bytes", numLostBytes);
+ }
+ std::deque<RawBuffer> tmpQueue;
+ while (numLostBytes > 0) {
+ uint64_t bufferSize = numLostBytes;
+ if (numLostBytes > mMaxBytesPerCallback) {
+ numLostBytes -= mMaxBytesPerCallback;
+ bufferSize = mMaxBytesPerCallback;
+ } else {
+ numLostBytes = 0;
+ }
+ const int64_t timestampUs =
+ ((1000000LL * mNumFramesReceived) + (mRecord->getSampleRate() >> 1)) /
+ mRecord->getSampleRate();
+ RawBuffer emptyBuffer{timeUs, timestampUs, static_cast<int32_t>(bufferSize)};
+ memset(emptyBuffer.mData.get(), 0, bufferSize);
+ mNumFramesLost += bufferSize / frameSize;
+ mNumFramesReceived += bufferSize / frameSize;
+ tmpQueue.push_back(std::move(emptyBuffer));
+ }
+
+ if (buffer.size() == 0) {
+ ALOGW("Nothing is available from AudioRecord callback buffer");
+ } else {
+ const size_t bufferSize = buffer.size();
+ const int64_t timestampUs =
+ ((1000000LL * mNumFramesReceived) + (mRecord->getSampleRate() >> 1)) /
+ mRecord->getSampleRate();
+ RawBuffer audioBuffer{timeUs, timestampUs, static_cast<int32_t>(bufferSize)};
+ memcpy(audioBuffer.mData.get(), buffer.data(), bufferSize);
+ mNumFramesReceived += bufferSize / frameSize;
+ tmpQueue.push_back(std::move(audioBuffer));
+ }
+
+ if (tmpQueue.size() > 0) {
+ std::unique_lock<std::mutex> lock{mMutex};
+ for (auto it = tmpQueue.begin(); it != tmpQueue.end(); it++)
+ mBuffersReceived.push_back(std::move(*it));
+ mCondition.notify_all();
+ }
+ return buffer.size();
+}
+
+void AudioCapture::onOverrun() {
+ ALOGV("received event overrun");
+ mBufferOverrun = true;
+}
+
+void AudioCapture::onMarker(uint32_t markerPosition) {
+ ALOGV("received Callback at position %d", markerPosition);
+ mReceivedCbMarkerAtPosition = markerPosition;
+}
+
+void AudioCapture::onNewPos(uint32_t markerPosition) {
+ ALOGV("received Callback at position %d", markerPosition);
+ mReceivedCbMarkerCount++;
+}
+
+void AudioCapture::onNewIAudioRecord() {
+ ALOGV("IAudioRecord is re-created");
+}
+
+AudioCapture::AudioCapture(audio_source_t inputSource, uint32_t sampleRate, audio_format_t format,
+ audio_channel_mask_t channelMask, audio_input_flags_t flags,
+ audio_session_t sessionId, AudioRecord::transfer_type transferType)
+ : mInputSource(inputSource),
+ mSampleRate(sampleRate),
+ mFormat(format),
+ mChannelMask(channelMask),
+ mFlags(flags),
+ mSessionId(sessionId),
+ mTransferType(transferType) {
+ mFrameCount = 0;
+ mNotificationFrames = 0;
+ mNumFramesToRecord = 0;
+ mNumFramesReceived = 0;
+ mNumFramesLost = 0;
+ mBufferOverrun = false;
+ mMarkerPosition = 0;
+ mMarkerPeriod = 0;
+ mReceivedCbMarkerAtPosition = -1;
+ mReceivedCbMarkerCount = 0;
+ mState = REC_NO_INIT;
+ mStopRecording = false;
+#if RECORD_TO_FILE
+ CreateRandomFile(mOutFileFd);
+#endif
+}
+
+AudioCapture::~AudioCapture() {
+ if (mOutFileFd > 0) close(mOutFileFd);
+ stop();
+}
+
+status_t AudioCapture::create() {
+ if (mState != REC_NO_INIT) return INVALID_OPERATION;
+ // get Min Frame Count
+ size_t minFrameCount;
+ status_t status =
+ AudioRecord::getMinFrameCount(&minFrameCount, mSampleRate, mFormat, mChannelMask);
+ if (NO_ERROR != status) return status;
+ // Limit notificationFrames basing on client bufferSize
+ const int samplesPerFrame = audio_channel_count_from_in_mask(mChannelMask);
+ const int bytesPerSample = audio_bytes_per_sample(mFormat);
+ mNotificationFrames = mMaxBytesPerCallback / (samplesPerFrame * bytesPerSample);
+ // select frameCount to be at least minFrameCount
+ mFrameCount = 2 * mNotificationFrames;
+ while (mFrameCount < minFrameCount) {
+ mFrameCount += mNotificationFrames;
+ }
+ if (mFlags & AUDIO_INPUT_FLAG_FAST) {
+ ALOGW("Overriding all previous computations");
+ mFrameCount = 0;
+ mNotificationFrames = 0;
+ }
+ mNumFramesToRecord = (mSampleRate * 0.25); // record .25 sec
+ std::string packageName{"AudioCapture"};
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = packageName;
+ attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+ attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
+ attributionSource.token = sp<BBinder>::make();
+ if (mTransferType == AudioRecord::TRANSFER_OBTAIN) {
+ if (mSampleRate == 48000) { // test all available constructors
+ mRecord = new AudioRecord(mInputSource, mSampleRate, mFormat, mChannelMask,
+ attributionSource, mFrameCount, nullptr /* callback */,
+ mNotificationFrames, mSessionId, mTransferType, mFlags);
+ } else {
+ mRecord = new AudioRecord(attributionSource);
+ status = mRecord->set(mInputSource, mSampleRate, mFormat, mChannelMask, mFrameCount,
+ nullptr /* callback */, 0 /* notificationFrames */,
+ false /* canCallJava */, mSessionId, mTransferType, mFlags,
+ attributionSource.uid, attributionSource.pid);
+ }
+ if (NO_ERROR != status) return status;
+ } else if (mTransferType == AudioRecord::TRANSFER_CALLBACK) {
+ mRecord = new AudioRecord(mInputSource, mSampleRate, mFormat, mChannelMask,
+ attributionSource, mFrameCount, this, mNotificationFrames,
+ mSessionId, mTransferType, mFlags);
+ } else {
+ ALOGE("Test application is not handling transfer type %s",
+ AudioRecord::convertTransferToText(mTransferType));
+ return NO_INIT;
+ }
+ mRecord->setCallerName(packageName);
+ status = mRecord->initCheck();
+ if (NO_ERROR == status) mState = REC_READY;
+ if (mFlags & AUDIO_INPUT_FLAG_FAST) {
+ mFrameCount = mRecord->frameCount();
+ mNotificationFrames = mRecord->getNotificationPeriodInFrames();
+ mMaxBytesPerCallback = mNotificationFrames * samplesPerFrame * bytesPerSample;
+ }
+ return status;
+}
+
+sp<AudioRecord> AudioCapture::getAudioRecordHandle() {
+ return (REC_NO_INIT == mState) ? nullptr : mRecord;
+}
+
+status_t AudioCapture::start(AudioSystem::sync_event_t event, audio_session_t triggerSession) {
+ status_t status;
+ if (REC_READY != mState) {
+ return INVALID_OPERATION;
+ } else {
+ status = mRecord->start(event, triggerSession);
+ if (OK == status) {
+ mState = REC_STARTED;
+ LOG_FATAL_IF(false != mRecord->stopped());
+ }
+ }
+ return status;
+}
+
+status_t AudioCapture::stop() {
+ status_t status = OK;
+ mStopRecording = true;
+ if (mState != REC_STOPPED) {
+ mRecord->stopAndJoinCallbacks();
+ mState = REC_STOPPED;
+ LOG_FATAL_IF(true != mRecord->stopped());
+ }
+ return status;
+}
+
+status_t AudioCapture::obtainBuffer(RawBuffer& buffer) {
+ if (REC_STARTED != mState && REC_STOPPED != mState) return INVALID_OPERATION;
+ int retry = 25;
+ AudioRecord::Buffer recordBuffer;
+ recordBuffer.frameCount = mNotificationFrames;
+ size_t nonContig = 0;
+ status_t status = mRecord->obtainBuffer(&recordBuffer, retry, &nonContig);
+ if (OK == status) {
+ const int64_t timestampUs =
+ ((1000000LL * mNumFramesReceived) + (mRecord->getSampleRate() >> 1)) /
+ mRecord->getSampleRate();
+ RawBuffer buff{-1, timestampUs, static_cast<int32_t>(recordBuffer.size())};
+ memcpy(buff.mData.get(), recordBuffer.data(), recordBuffer.size());
+ buffer = std::move(buff);
+ mNumFramesReceived += recordBuffer.size() / mRecord->frameSize();
+ mRecord->releaseBuffer(&recordBuffer);
+ if (mNumFramesReceived > mNumFramesToRecord) {
+ stop();
+ }
+ } else if (status == WOULD_BLOCK) {
+ if (mStopRecording)
+ return WOULD_BLOCK;
+ else
+ return TIMED_OUT;
+ }
+ return OK;
+}
+
+status_t AudioCapture::obtainBufferCb(RawBuffer& buffer) {
+ if (REC_STARTED != mState) return INVALID_OPERATION;
+ int retry = 10;
+ std::unique_lock<std::mutex> lock{mMutex};
+ while (mBuffersReceived.empty() && !mStopRecording && retry > 0) {
+ mCondition.wait_for(lock, std::chrono::milliseconds(100));
+ retry--;
+ }
+ if (!mBuffersReceived.empty()) {
+ auto it = mBuffersReceived.begin();
+ buffer = std::move(*it);
+ mBuffersReceived.erase(it);
+ } else {
+ if (retry == 0) return TIMED_OUT;
+ if (mStopRecording)
+ return WOULD_BLOCK;
+ else
+ return UNKNOWN_ERROR;
+ }
+ return OK;
+}
+
+status_t AudioCapture::audioProcess() {
+ RawBuffer buffer;
+ while (true) {
+ status_t status;
+ if (mTransferType == AudioRecord::TRANSFER_CALLBACK)
+ status = obtainBufferCb(buffer);
+ else
+ status = obtainBuffer(buffer);
+ switch (status) {
+ case OK:
+ if (mOutFileFd > 0) {
+ const char* ptr =
+ static_cast<const char*>(static_cast<void*>(buffer.mData.get()));
+ write(mOutFileFd, ptr, buffer.mCapacity);
+ }
+ break;
+ case WOULD_BLOCK:
+ return OK;
+ case TIMED_OUT: // "recorder application timed out from receiving buffers"
+ case NO_INIT: // "recorder not initialized"
+ case INVALID_OPERATION: // "recorder not started"
+ case UNKNOWN_ERROR: // "Unknown error"
+ default:
+ return status;
+ }
+ }
+}
+
+status_t listAudioPorts(std::vector<audio_port_v7>& portsVec) {
+ int attempts = 5;
+ status_t status;
+ unsigned int generation1, generation;
+ unsigned int numPorts = 0;
+ do {
+ if (attempts-- < 0) {
+ status = TIMED_OUT;
+ break;
+ }
+ status = AudioSystem::listAudioPorts(AUDIO_PORT_ROLE_NONE, AUDIO_PORT_TYPE_NONE, &numPorts,
+ nullptr, &generation1);
+ if (status != NO_ERROR) {
+ ALOGE("AudioSystem::listAudioPorts returned error %d", status);
+ break;
+ }
+ portsVec.resize(numPorts);
+ status = AudioSystem::listAudioPorts(AUDIO_PORT_ROLE_NONE, AUDIO_PORT_TYPE_NONE, &numPorts,
+ portsVec.data(), &generation);
+ } while (generation1 != generation && status == NO_ERROR);
+ if (status != NO_ERROR) {
+ numPorts = 0;
+ portsVec.clear();
+ }
+ return status;
+}
+
+status_t getPortById(const audio_port_handle_t portId, audio_port_v7& port) {
+ std::vector<struct audio_port_v7> ports;
+ status_t status = listAudioPorts(ports);
+ if (status != OK) return status;
+ for (auto i = 0; i < ports.size(); i++) {
+ if (ports[i].id == portId) {
+ port = ports[i];
+ return OK;
+ }
+ }
+ return BAD_VALUE;
+}
+
+status_t getPortByAttributes(audio_port_role_t role, audio_port_type_t type,
+ audio_devices_t deviceType, audio_port_v7& port) {
+ std::vector<struct audio_port_v7> ports;
+ status_t status = listAudioPorts(ports);
+ if (status != OK) return status;
+ for (auto i = 0; i < ports.size(); i++) {
+ if (ports[i].role == role && ports[i].type == type &&
+ ports[i].ext.device.type == deviceType) {
+ port = ports[i];
+ return OK;
+ }
+ }
+ return BAD_VALUE;
+}
+
+status_t listAudioPatches(std::vector<struct audio_patch>& patchesVec) {
+ int attempts = 5;
+ status_t status;
+ unsigned int generation1, generation;
+ unsigned int numPatches = 0;
+ do {
+ if (attempts-- < 0) {
+ status = TIMED_OUT;
+ break;
+ }
+ status = AudioSystem::listAudioPatches(&numPatches, nullptr, &generation1);
+ if (status != NO_ERROR) {
+ ALOGE("AudioSystem::listAudioPatches returned error %d", status);
+ break;
+ }
+ patchesVec.resize(numPatches);
+ status = AudioSystem::listAudioPatches(&numPatches, patchesVec.data(), &generation);
+ } while (generation1 != generation && status == NO_ERROR);
+ if (status != NO_ERROR) {
+ numPatches = 0;
+ patchesVec.clear();
+ }
+ return status;
+}
+
+status_t getPatchForOutputMix(audio_io_handle_t audioIo, audio_patch& patch) {
+ std::vector<struct audio_patch> patches;
+ status_t status = listAudioPatches(patches);
+ if (status != OK) return status;
+
+ for (auto i = 0; i < patches.size(); i++) {
+ for (auto j = 0; j < patches[i].num_sources; j++) {
+ if (patches[i].sources[j].type == AUDIO_PORT_TYPE_MIX &&
+ patches[i].sources[j].ext.mix.handle == audioIo) {
+ patch = patches[i];
+ return OK;
+ }
+ }
+ }
+ return BAD_VALUE;
+}
+
+status_t getPatchForInputMix(audio_io_handle_t audioIo, audio_patch& patch) {
+ std::vector<struct audio_patch> patches;
+ status_t status = listAudioPatches(patches);
+ if (status != OK) return status;
+
+ for (auto i = 0; i < patches.size(); i++) {
+ for (auto j = 0; j < patches[i].num_sinks; j++) {
+ if (patches[i].sinks[j].type == AUDIO_PORT_TYPE_MIX &&
+ patches[i].sinks[j].ext.mix.handle == audioIo) {
+ patch = patches[i];
+ return OK;
+ }
+ }
+ }
+ return BAD_VALUE;
+}
+
+bool patchContainsOutputDevice(audio_port_handle_t deviceId, audio_patch patch) {
+ for (auto j = 0; j < patch.num_sinks; j++) {
+ if (patch.sinks[j].type == AUDIO_PORT_TYPE_DEVICE && patch.sinks[j].id == deviceId) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool patchContainsInputDevice(audio_port_handle_t deviceId, audio_patch patch) {
+ for (auto j = 0; j < patch.num_sources; j++) {
+ if (patch.sources[j].type == AUDIO_PORT_TYPE_DEVICE && patch.sources[j].id == deviceId) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool checkPatchPlayback(audio_io_handle_t audioIo, audio_port_handle_t deviceId) {
+ struct audio_patch patch;
+ if (getPatchForOutputMix(audioIo, patch) == OK) {
+ return patchContainsOutputDevice(deviceId, patch);
+ }
+ return false;
+}
+
+bool checkPatchCapture(audio_io_handle_t audioIo, audio_port_handle_t deviceId) {
+ struct audio_patch patch;
+ if (getPatchForInputMix(audioIo, patch) == OK) {
+ return patchContainsInputDevice(deviceId, patch);
+ }
+ return false;
+}
+
+std::string dumpPortConfig(const audio_port_config& port) {
+ std::ostringstream result;
+ std::string deviceInfo;
+ if (port.type == AUDIO_PORT_TYPE_DEVICE) {
+ if (port.ext.device.type & AUDIO_DEVICE_BIT_IN) {
+ InputDeviceConverter::maskToString(port.ext.device.type, deviceInfo);
+ } else {
+ OutputDeviceConverter::maskToString(port.ext.device.type, deviceInfo);
+ }
+ deviceInfo += std::string(", address = ") + port.ext.device.address;
+ }
+ result << "audio_port_handle_t = " << port.id << ", "
+ << "Role = " << (port.role == AUDIO_PORT_ROLE_SOURCE ? "source" : "sink") << ", "
+ << "Type = " << (port.type == AUDIO_PORT_TYPE_DEVICE ? "device" : "mix") << ", "
+ << "deviceInfo = " << (port.type == AUDIO_PORT_TYPE_DEVICE ? deviceInfo : "") << ", "
+ << "config_mask = 0x" << std::hex << port.config_mask << std::dec << ", ";
+ if (port.config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
+ result << "sample rate = " << port.sample_rate << ", ";
+ }
+ if (port.config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
+ result << "channel mask = " << port.channel_mask << ", ";
+ }
+ if (port.config_mask & AUDIO_PORT_CONFIG_FORMAT) {
+ result << "format = " << port.format << ", ";
+ }
+ result << "input flags = " << port.flags.input << ", ";
+ result << "output flags = " << port.flags.output << ", ";
+ result << "mix io handle = " << (port.type == AUDIO_PORT_TYPE_DEVICE ? 0 : port.ext.mix.handle)
+ << "\n";
+ return result.str();
+}
+
+std::string dumpPatch(const audio_patch& patch) {
+ std::ostringstream result;
+ result << "----------------- Dumping Patch ------------ \n";
+ result << "Patch Handle: " << patch.id << ", sources: " << patch.num_sources
+ << ", sink: " << patch.num_sinks << "\n";
+ audio_port_v7 port;
+ for (uint32_t i = 0; i < patch.num_sources; i++) {
+ result << "----------------- Dumping Source Port Config @ index " << i
+ << " ------------ \n";
+ result << dumpPortConfig(patch.sources[i]);
+ result << "----------------- Dumping Source Port for id " << patch.sources[i].id
+ << " ------------ \n";
+ getPortById(patch.sources[i].id, port);
+ result << dumpPort(port);
+ }
+ for (uint32_t i = 0; i < patch.num_sinks; i++) {
+ result << "----------------- Dumping Sink Port Config @ index " << i << " ------------ \n";
+ result << dumpPortConfig(patch.sinks[i]);
+ result << "----------------- Dumping Sink Port for id " << patch.sinks[i].id
+ << " ------------ \n";
+ getPortById(patch.sinks[i].id, port);
+ result << dumpPort(port);
+ }
+ return result.str();
+}
+
+std::string dumpPort(const audio_port_v7& port) {
+ std::ostringstream result;
+ std::string deviceInfo;
+ if (port.type == AUDIO_PORT_TYPE_DEVICE) {
+ if (port.ext.device.type & AUDIO_DEVICE_BIT_IN) {
+ InputDeviceConverter::maskToString(port.ext.device.type, deviceInfo);
+ } else {
+ OutputDeviceConverter::maskToString(port.ext.device.type, deviceInfo);
+ }
+ deviceInfo += std::string(", address = ") + port.ext.device.address;
+ }
+ result << "audio_port_handle_t = " << port.id << ", "
+ << "Role = " << (port.role == AUDIO_PORT_ROLE_SOURCE ? "source" : "sink") << ", "
+ << "Type = " << (port.type == AUDIO_PORT_TYPE_DEVICE ? "device" : "mix") << ", "
+ << "deviceInfo = " << (port.type == AUDIO_PORT_TYPE_DEVICE ? deviceInfo : "") << ", "
+ << "Name = " << port.name << ", "
+ << "num profiles = " << port.num_audio_profiles << ", "
+ << "mix io handle = " << (port.type == AUDIO_PORT_TYPE_DEVICE ? 0 : port.ext.mix.handle)
+ << ", ";
+ for (int i = 0; i < port.num_audio_profiles; i++) {
+ result << "AudioProfile = " << i << " {";
+ result << "format = " << port.audio_profiles[i].format << ", ";
+ result << "samplerates = ";
+ for (int j = 0; j < port.audio_profiles[i].num_sample_rates; j++) {
+ result << port.audio_profiles[i].sample_rates[j] << ", ";
+ }
+ result << "channelmasks = ";
+ for (int j = 0; j < port.audio_profiles[i].num_channel_masks; j++) {
+ result << "0x" << std::hex << port.audio_profiles[i].channel_masks[j] << std::dec
+ << ", ";
+ }
+ result << "} ";
+ }
+ result << dumpPortConfig(port.active_config);
+ return result.str();
+}
diff --git a/media/libaudioclient/tests/audio_test_utils.h b/media/libaudioclient/tests/audio_test_utils.h
new file mode 100644
index 0000000..fc269ed
--- /dev/null
+++ b/media/libaudioclient/tests/audio_test_utils.h
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef AUDIO_TEST_UTILS_H_
+#define AUDIO_TEST_UTILS_H_
+
+#include <sys/stat.h>
+#include <unistd.h>
+#include <atomic>
+#include <chrono>
+#include <cinttypes>
+#include <deque>
+#include <memory>
+#include <mutex>
+#include <thread>
+
+#include <binder/MemoryDealer.h>
+#include <media/AidlConversion.h>
+#include <media/AudioRecord.h>
+#include <media/AudioTrack.h>
+
+#define RECORD_TO_FILE 0
+
+using namespace android;
+
+void CreateRandomFile(int& fd);
+status_t listAudioPorts(std::vector<audio_port_v7>& portsVec);
+status_t listAudioPatches(std::vector<struct audio_patch>& patchesVec);
+status_t getPortByAttributes(audio_port_role_t role, audio_port_type_t type,
+ audio_devices_t deviceType, audio_port_v7& port);
+status_t getPatchForOutputMix(audio_io_handle_t audioIo, audio_patch& patch);
+status_t getPatchForInputMix(audio_io_handle_t audioIo, audio_patch& patch);
+bool patchContainsOutputDevice(audio_port_handle_t deviceId, audio_patch patch);
+bool patchContainsInputDevice(audio_port_handle_t deviceId, audio_patch patch);
+bool checkPatchPlayback(audio_io_handle_t audioIo, audio_port_handle_t deviceId);
+bool checkPatchCapture(audio_io_handle_t audioIo, audio_port_handle_t deviceId);
+std::string dumpPort(const audio_port_v7& port);
+std::string dumpPortConfig(const audio_port_config& port);
+std::string dumpPatch(const audio_patch& patch);
+
+class OnAudioDeviceUpdateNotifier : public AudioSystem::AudioDeviceCallback {
+ public:
+ audio_io_handle_t mAudioIo = AUDIO_IO_HANDLE_NONE;
+ audio_port_handle_t mDeviceId = AUDIO_PORT_HANDLE_NONE;
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+
+ void onAudioDeviceUpdate(audio_io_handle_t audioIo, audio_port_handle_t deviceId);
+ status_t waitForAudioDeviceCb();
+};
+
+// Simple AudioPlayback class.
+class AudioPlayback : public AudioTrack::IAudioTrackCallback {
+ friend sp<AudioPlayback>;
+ AudioPlayback(uint32_t sampleRate, audio_format_t format, audio_channel_mask_t channelMask,
+ audio_output_flags_t flags = AUDIO_OUTPUT_FLAG_NONE,
+ audio_session_t sessionId = AUDIO_SESSION_NONE,
+ AudioTrack::transfer_type transferType = AudioTrack::TRANSFER_SHARED,
+ audio_attributes_t* attributes = nullptr);
+ public:
+ status_t loadResource(const char* name);
+ status_t create();
+ sp<AudioTrack> getAudioTrackHandle();
+ status_t start();
+ status_t waitForConsumption(bool testSeek = false);
+ status_t fillBuffer();
+ status_t onProcess(bool testSeek = false);
+ virtual void onBufferEnd() override;
+ void stop();
+
+ bool mStopPlaying;
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+
+ enum State {
+ PLAY_NO_INIT,
+ PLAY_READY,
+ PLAY_STARTED,
+ PLAY_STOPPED,
+ };
+
+ private:
+ ~AudioPlayback();
+ const uint32_t mSampleRate;
+ const audio_format_t mFormat;
+ const audio_channel_mask_t mChannelMask;
+ const audio_output_flags_t mFlags;
+ const audio_session_t mSessionId;
+ const AudioTrack::transfer_type mTransferType;
+ const audio_attributes_t* mAttributes;
+
+ size_t mBytesUsedSoFar;
+ State mState;
+ size_t mMemCapacity;
+ sp<MemoryDealer> mMemoryDealer;
+ sp<IMemory> mMemory;
+
+ sp<AudioTrack> mTrack;
+};
+
+// hold pcm data sent by AudioRecord
+class RawBuffer {
+ public:
+ RawBuffer(int64_t ptsPipeline = -1, int64_t ptsManual = -1, int32_t capacity = 0);
+
+ std::unique_ptr<uint8_t[]> mData;
+ int64_t mPtsPipeline;
+ int64_t mPtsManual;
+ int32_t mCapacity;
+};
+
+// Simple AudioCapture
+class AudioCapture : public AudioRecord::IAudioRecordCallback {
+ public:
+ AudioCapture(audio_source_t inputSource, uint32_t sampleRate, audio_format_t format,
+ audio_channel_mask_t channelMask,
+ audio_input_flags_t flags = AUDIO_INPUT_FLAG_NONE,
+ audio_session_t sessionId = AUDIO_SESSION_ALLOCATE,
+ AudioRecord::transfer_type transferType = AudioRecord::TRANSFER_CALLBACK);
+ ~AudioCapture();
+ size_t onMoreData(const AudioRecord::Buffer& buffer) override;
+ void onOverrun() override;
+ void onMarker(uint32_t markerPosition) override;
+ void onNewPos(uint32_t newPos) override;
+ void onNewIAudioRecord() override;
+ status_t create();
+ sp<AudioRecord> getAudioRecordHandle();
+ status_t start(AudioSystem::sync_event_t event = AudioSystem::SYNC_EVENT_NONE,
+ audio_session_t triggerSession = AUDIO_SESSION_NONE);
+ status_t obtainBufferCb(RawBuffer& buffer);
+ status_t obtainBuffer(RawBuffer& buffer);
+ status_t audioProcess();
+ status_t stop();
+
+ uint32_t mFrameCount;
+ uint32_t mNotificationFrames;
+ int64_t mNumFramesToRecord;
+ int64_t mNumFramesReceived;
+ int64_t mNumFramesLost;
+ uint32_t mMarkerPosition;
+ uint32_t mMarkerPeriod;
+ uint32_t mReceivedCbMarkerAtPosition;
+ uint32_t mReceivedCbMarkerCount;
+ bool mBufferOverrun;
+
+ enum State {
+ REC_NO_INIT,
+ REC_READY,
+ REC_STARTED,
+ REC_STOPPED,
+ };
+
+ private:
+ const audio_source_t mInputSource;
+ const uint32_t mSampleRate;
+ const audio_format_t mFormat;
+ const audio_channel_mask_t mChannelMask;
+ const audio_input_flags_t mFlags;
+ const audio_session_t mSessionId;
+ const AudioRecord::transfer_type mTransferType;
+
+ size_t mMaxBytesPerCallback = 2048;
+ sp<AudioRecord> mRecord;
+ State mState;
+ bool mStopRecording;
+ int mOutFileFd = -1;
+
+ std::mutex mMutex;
+ std::condition_variable mCondition;
+ std::deque<RawBuffer> mBuffersReceived;
+};
+
+#endif // AUDIO_TEST_UTILS_H_
diff --git a/media/libaudioclient/tests/audioclient_serialization_tests.cpp b/media/libaudioclient/tests/audioclient_serialization_tests.cpp
new file mode 100644
index 0000000..93baefd6
--- /dev/null
+++ b/media/libaudioclient/tests/audioclient_serialization_tests.cpp
@@ -0,0 +1,310 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AudioClientSerializationUnitTests"
+
+#include <cstdint>
+#include <cstdlib>
+#include <ctime>
+
+#include <gtest/gtest.h>
+
+#include <android_audio_policy_configuration_V7_0-enums.h>
+#include <xsdc/XsdcSupport.h>
+
+#include "audio_test_utils.h"
+
+using namespace android;
+namespace xsd {
+using namespace ::android::audio::policy::configuration::V7_0;
+}
+
+template <typename T, typename X, typename FUNC>
+std::vector<T> getFlags(const xsdc_enum_range<X>& range, const FUNC& func,
+ const std::string& findString = {}) {
+ std::vector<T> vec;
+ for (const auto& xsdEnumVal : range) {
+ T enumVal;
+ std::string enumString = toString(xsdEnumVal);
+ if (enumString.find(findString) != std::string::npos &&
+ func(enumString.c_str(), &enumVal)) {
+ vec.push_back(enumVal);
+ }
+ }
+ return vec;
+}
+
+static const std::vector<audio_usage_t> kUsages =
+ getFlags<audio_usage_t, xsd::AudioUsage, decltype(audio_usage_from_string)>(
+ xsdc_enum_range<xsd::AudioUsage>{}, audio_usage_from_string);
+
+static const std::vector<audio_content_type_t> kContentType =
+ getFlags<audio_content_type_t, xsd::AudioContentType,
+ decltype(audio_content_type_from_string)>(xsdc_enum_range<xsd::AudioContentType>{},
+ audio_content_type_from_string);
+
+static const std::vector<audio_source_t> kInputSources =
+ getFlags<audio_source_t, xsd::AudioSource, decltype(audio_source_from_string)>(
+ xsdc_enum_range<xsd::AudioSource>{}, audio_source_from_string);
+
+static const std::vector<audio_stream_type_t> kStreamtypes =
+ getFlags<audio_stream_type_t, xsd::AudioStreamType,
+ decltype(audio_stream_type_from_string)>(xsdc_enum_range<xsd::AudioStreamType>{},
+ audio_stream_type_from_string);
+
+static const std::vector<uint32_t> kMixMatchRules = {
+ RULE_MATCH_ATTRIBUTE_USAGE,
+ RULE_EXCLUDE_ATTRIBUTE_USAGE,
+ RULE_MATCH_ATTRIBUTE_CAPTURE_PRESET,
+ RULE_EXCLUDE_ATTRIBUTE_CAPTURE_PRESET,
+ RULE_MATCH_UID,
+ RULE_EXCLUDE_UID,
+ RULE_MATCH_USERID,
+ RULE_EXCLUDE_USERID,
+};
+
+// Generates a random string.
+std::string CreateRandomString(size_t n) {
+ std::string data =
+ "abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789";
+ srand(static_cast<unsigned int>(time(0)));
+ std::string s(n, ' ');
+ for (size_t i = 0; i < n; ++i) {
+ s[i] = data[rand() % data.size()];
+ }
+ return s;
+}
+
+class FillAudioAttributes {
+ public:
+ void fillAudioAttributes(audio_attributes_t& attr);
+
+ unsigned int mSeed;
+};
+
+void FillAudioAttributes::fillAudioAttributes(audio_attributes_t& attr) {
+ attr.content_type = kContentType[rand() % kContentType.size()];
+ attr.usage = kUsages[rand() % kUsages.size()];
+ attr.source = kInputSources[rand() % kInputSources.size()];
+ // attr.flags -> [0, (1 << (CAPTURE_PRIVATE + 1) - 1)]
+ attr.flags = static_cast<audio_flags_mask_t>(rand() & 0x3fff);
+ sprintf(attr.tags, "%s",
+ CreateRandomString((int)rand() % (AUDIO_ATTRIBUTES_TAGS_MAX_SIZE - 1)).c_str());
+}
+
+class SerializationTest : public FillAudioAttributes, public ::testing::Test {
+ void SetUp() override {
+ mSeed = static_cast<unsigned int>(time(0));
+ srand(mSeed);
+ }
+};
+
+// UNIT TESTS
+TEST_F(SerializationTest, AudioProductStrategyBinderization) {
+ for (int j = 0; j < 512; j++) {
+ const std::string name{"Test APSBinderization for seed::" + std::to_string(mSeed)};
+ std::vector<AudioAttributes> audioattributesvector;
+ for (auto i = 0; i < 16; i++) {
+ audio_attributes_t attributes;
+ fillAudioAttributes(attributes);
+ AudioAttributes audioattributes{static_cast<volume_group_t>(rand()),
+ kStreamtypes[rand() % kStreamtypes.size()], attributes};
+ audioattributesvector.push_back(audioattributes);
+ }
+ product_strategy_t psId = static_cast<product_strategy_t>(rand());
+ AudioProductStrategy aps{name, audioattributesvector, psId};
+
+ Parcel p;
+ EXPECT_EQ(NO_ERROR, aps.writeToParcel(&p)) << name;
+
+ AudioProductStrategy apsCopy;
+ p.setDataPosition(0);
+ EXPECT_EQ(NO_ERROR, apsCopy.readFromParcel(&p)) << name;
+ EXPECT_EQ(apsCopy.getName(), name) << name;
+ EXPECT_EQ(apsCopy.getId(), psId) << name;
+ auto avec = apsCopy.getAudioAttributes();
+ EXPECT_EQ(avec.size(), audioattributesvector.size()) << name;
+ for (int i = 0; i < audioattributesvector.size(); i++) {
+ EXPECT_EQ(avec[i].getGroupId(), audioattributesvector[i].getGroupId()) << name;
+ EXPECT_EQ(avec[i].getStreamType(), audioattributesvector[i].getStreamType()) << name;
+ EXPECT_TRUE(avec[i].getAttributes() == audioattributesvector[i].getAttributes())
+ << name;
+ }
+ }
+}
+
+TEST_F(SerializationTest, AudioVolumeGroupBinderization) {
+ for (int j = 0; j < 512; j++) {
+ const std::string name{"Test AVGBinderization for seed::" + std::to_string(mSeed)};
+ volume_group_t groupId = static_cast<volume_group_t>(rand());
+ std::vector<audio_attributes_t> attributesvector;
+ for (auto i = 0; i < 16; i++) {
+ audio_attributes_t attributes;
+ fillAudioAttributes(attributes);
+ attributesvector.push_back(attributes);
+ }
+ std::vector<audio_stream_type_t> streamsvector;
+ for (auto i = 0; i < 8; i++) {
+ streamsvector.push_back(kStreamtypes[rand() % kStreamtypes.size()]);
+ }
+ AudioVolumeGroup avg{name, groupId, attributesvector, streamsvector};
+
+ Parcel p;
+ EXPECT_EQ(NO_ERROR, avg.writeToParcel(&p));
+
+ AudioVolumeGroup avgCopy;
+ p.setDataPosition(0);
+ EXPECT_EQ(NO_ERROR, avgCopy.readFromParcel(&p)) << name;
+ EXPECT_EQ(avgCopy.getName(), name) << name;
+ EXPECT_EQ(avgCopy.getId(), groupId) << name;
+ auto avec = avgCopy.getAudioAttributes();
+ EXPECT_EQ(avec.size(), attributesvector.size()) << name;
+ for (int i = 0; i < avec.size(); i++) {
+ EXPECT_TRUE(avec[i] == attributesvector[i]) << name;
+ }
+ StreamTypeVector svec = avgCopy.getStreamTypes();
+ EXPECT_EQ(svec.size(), streamsvector.size()) << name;
+ for (int i = 0; i < svec.size(); i++) {
+ EXPECT_EQ(svec[i], streamsvector[i]) << name;
+ }
+ }
+}
+
+TEST_F(SerializationTest, AudioMixBinderization) {
+ for (int j = 0; j < 512; j++) {
+ const std::string msg{"Test AMBinderization for seed::" + std::to_string(mSeed)};
+ Vector<AudioMixMatchCriterion> criteria;
+ for (int i = 0; i < 16; i++) {
+ AudioMixMatchCriterion ammc{kUsages[rand() % kUsages.size()],
+ kInputSources[rand() % kInputSources.size()],
+ kMixMatchRules[rand() % kMixMatchRules.size()]};
+ criteria.add(ammc);
+ }
+ audio_config_t config{};
+ config.sample_rate = 48000;
+ config.channel_mask = AUDIO_CHANNEL_IN_MONO;
+ config.format = AUDIO_FORMAT_PCM_16_BIT;
+ config.offload_info = AUDIO_INFO_INITIALIZER;
+ config.frame_count = 4800;
+ AudioMix am{criteria,
+ static_cast<uint32_t>(rand()),
+ config,
+ static_cast<uint32_t>(rand()),
+ String8(msg.c_str()),
+ static_cast<uint32_t>(rand())};
+
+ Parcel p;
+ EXPECT_EQ(NO_ERROR, am.writeToParcel(&p)) << msg;
+
+ AudioMix amCopy;
+ p.setDataPosition(0);
+ EXPECT_EQ(NO_ERROR, amCopy.readFromParcel(&p)) << msg;
+ EXPECT_EQ(amCopy.mMixType, am.mMixType) << msg;
+ EXPECT_EQ(amCopy.mFormat.sample_rate, am.mFormat.sample_rate) << msg;
+ EXPECT_EQ(amCopy.mFormat.channel_mask, am.mFormat.channel_mask) << msg;
+ EXPECT_EQ(amCopy.mFormat.format, am.mFormat.format) << msg;
+ EXPECT_EQ(amCopy.mRouteFlags, am.mRouteFlags) << msg;
+ EXPECT_EQ(amCopy.mDeviceAddress, am.mDeviceAddress) << msg;
+ EXPECT_EQ(amCopy.mCbFlags, am.mCbFlags) << msg;
+ EXPECT_EQ(amCopy.mCriteria.size(), am.mCriteria.size()) << msg;
+ for (auto i = 0; i < amCopy.mCriteria.size(); i++) {
+ EXPECT_EQ(amCopy.mCriteria[i].mRule, am.mCriteria[i].mRule) << msg;
+ EXPECT_EQ(amCopy.mCriteria[i].mValue.mUserId, am.mCriteria[i].mValue.mUserId) << msg;
+ }
+ }
+}
+
+using MMCTestParams = std::tuple<audio_usage_t, audio_source_t, uint32_t>;
+
+class MMCParameterizedTest : public FillAudioAttributes,
+ public ::testing::TestWithParam<MMCTestParams> {
+ public:
+ MMCParameterizedTest()
+ : mAudioUsage(std::get<0>(GetParam())),
+ mAudioSource(std::get<1>(GetParam())),
+ mAudioMixMatchRules(std::get<2>(GetParam())){};
+
+ const audio_usage_t mAudioUsage;
+ const audio_source_t mAudioSource;
+ const uint32_t mAudioMixMatchRules;
+
+ void SetUp() override {
+ mSeed = static_cast<unsigned int>(time(0));
+ srand(mSeed);
+ }
+};
+
+TEST_P(MMCParameterizedTest, AudioMixMatchCriterionBinderization) {
+ const std::string msg{"Test AMMCBinderization for seed::" + std::to_string(mSeed)};
+ AudioMixMatchCriterion ammc{mAudioUsage, mAudioSource, mAudioMixMatchRules};
+
+ Parcel p;
+ EXPECT_EQ(NO_ERROR, ammc.writeToParcel(&p)) << msg;
+
+ AudioMixMatchCriterion ammcCopy;
+ p.setDataPosition(0);
+ EXPECT_EQ(NO_ERROR, ammcCopy.readFromParcel(&p)) << msg;
+ EXPECT_EQ(ammcCopy.mRule, ammc.mRule) << msg;
+ EXPECT_EQ(ammcCopy.mValue.mUserId, ammc.mValue.mUserId) << msg;
+}
+
+// audioUsage, audioSource, audioMixMatchRules
+INSTANTIATE_TEST_SUITE_P(SerializationParameterizedTests, MMCParameterizedTest,
+ ::testing::Combine(testing::ValuesIn(kUsages),
+ testing::ValuesIn(kInputSources),
+ testing::ValuesIn(kMixMatchRules)));
+
+using AudioAttributesTestParams = std::tuple<audio_stream_type_t>;
+
+class AudioAttributesParameterizedTest
+ : public FillAudioAttributes,
+ public ::testing::TestWithParam<AudioAttributesTestParams> {
+ public:
+ AudioAttributesParameterizedTest() : mAudioStream(std::get<0>(GetParam())){};
+
+ const audio_stream_type_t mAudioStream;
+
+ void SetUp() override {
+ mSeed = static_cast<unsigned int>(time(0));
+ srand(mSeed);
+ }
+};
+
+TEST_P(AudioAttributesParameterizedTest, AudioAttributesBinderization) {
+ const std::string msg{"Test AABinderization for seed::" + std::to_string(mSeed)};
+ volume_group_t groupId = static_cast<volume_group_t>(rand());
+ audio_stream_type_t stream = mAudioStream;
+ audio_attributes_t attributes;
+ fillAudioAttributes(attributes);
+ AudioAttributes audioattributes{groupId, stream, attributes};
+
+ Parcel p;
+ EXPECT_EQ(NO_ERROR, audioattributes.writeToParcel(&p)) << msg;
+
+ AudioAttributes audioattributesCopy;
+ p.setDataPosition(0);
+ EXPECT_EQ(NO_ERROR, audioattributesCopy.readFromParcel(&p)) << msg;
+ EXPECT_EQ(audioattributesCopy.getGroupId(), audioattributes.getGroupId()) << msg;
+ EXPECT_EQ(audioattributesCopy.getStreamType(), audioattributes.getStreamType()) << msg;
+ EXPECT_TRUE(audioattributesCopy.getAttributes() == attributes) << msg;
+}
+
+// audioStream
+INSTANTIATE_TEST_SUITE_P(SerializationParameterizedTests, AudioAttributesParameterizedTest,
+ ::testing::Combine(testing::ValuesIn(kStreamtypes)));
diff --git a/media/libaudioclient/tests/audioeffect_tests.cpp b/media/libaudioclient/tests/audioeffect_tests.cpp
new file mode 100644
index 0000000..93fe306
--- /dev/null
+++ b/media/libaudioclient/tests/audioeffect_tests.cpp
@@ -0,0 +1,335 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AudioEffectUnitTests"
+
+#include <gtest/gtest.h>
+#include <media/AudioEffect.h>
+#include <system/audio_effects/effect_visualizer.h>
+
+#include "audio_test_utils.h"
+
+using namespace android;
+
+static constexpr int kDefaultInputEffectPriority = -1;
+static constexpr int kDefaultOutputEffectPriority = 0;
+
+static const char* gPackageName = "AudioEffectTest";
+
+bool isEffectExistsOnAudioSession(const effect_uuid_t* type, int priority,
+ audio_session_t sessionId) {
+ std::string packageName{gPackageName};
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = packageName;
+ attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+ attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
+ attributionSource.token = sp<BBinder>::make();
+ sp<AudioEffect> effect = new AudioEffect(attributionSource);
+ effect->set(type, nullptr /* uid */, priority, nullptr /* callback */, sessionId);
+ return effect->initCheck() == ALREADY_EXISTS;
+}
+
+bool isEffectDefaultOnRecord(const effect_uuid_t* type, const sp<AudioRecord>& audioRecord) {
+ effect_descriptor_t descriptors[AudioEffect::kMaxPreProcessing];
+ uint32_t numEffects = AudioEffect::kMaxPreProcessing;
+ status_t ret = AudioEffect::queryDefaultPreProcessing(audioRecord->getSessionId(), descriptors,
+ &numEffects);
+ if (ret != OK) {
+ return false;
+ }
+ for (int i = 0; i < numEffects; i++) {
+ if (memcmp(&descriptors[i].type, type, sizeof(effect_uuid_t)) == 0) {
+ return true;
+ }
+ }
+ return false;
+}
+
+void listEffectsAvailable(std::vector<effect_descriptor_t>& descriptors) {
+ uint32_t numEffects = 0;
+ if (NO_ERROR == AudioEffect::queryNumberEffects(&numEffects)) {
+ for (auto i = 0; i < numEffects; i++) {
+ effect_descriptor_t des;
+ if (NO_ERROR == AudioEffect::queryEffect(i, &des)) descriptors.push_back(des);
+ }
+ }
+}
+
+bool isPreprocessing(effect_descriptor_t& descriptor) {
+ return ((descriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_PRE_PROC);
+}
+
+bool isInsert(effect_descriptor_t& descriptor) {
+ return ((descriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_INSERT);
+}
+
+bool isAux(effect_descriptor_t& descriptor) {
+ return ((descriptor.flags & EFFECT_FLAG_TYPE_MASK) == EFFECT_FLAG_TYPE_AUXILIARY);
+}
+
+bool isFastCompatible(effect_descriptor_t& descriptor) {
+ return !(((descriptor.flags & EFFECT_FLAG_HW_ACC_MASK) == 0) &&
+ ((descriptor.flags & EFFECT_FLAG_NO_PROCESS) == 0));
+}
+
+// UNIT TESTS
+TEST(AudioEffectTest, getEffectDescriptor) {
+ effect_uuid_t randomType = {
+ 0x81781c08, 0x93dd, 0x11ec, 0xb909, {0x02, 0x42, 0xac, 0x12, 0x00, 0x02}};
+ effect_uuid_t randomUuid = {
+ 0x653730e1, 0x1be1, 0x438e, 0xa35a, {0xfc, 0x9b, 0xa1, 0x2a, 0x5e, 0xc9}};
+ effect_uuid_t empty = EFFECT_UUID_INITIALIZER;
+
+ effect_descriptor_t descriptor;
+ EXPECT_EQ(NAME_NOT_FOUND, AudioEffect::getEffectDescriptor(&randomUuid, &randomType,
+ EFFECT_FLAG_TYPE_MASK, &descriptor));
+
+ std::vector<effect_descriptor_t> descriptors;
+ listEffectsAvailable(descriptors);
+
+ for (auto i = 0; i < descriptors.size(); i++) {
+ EXPECT_EQ(NO_ERROR,
+ AudioEffect::getEffectDescriptor(&descriptors[i].uuid, &descriptors[i].type,
+ EFFECT_FLAG_TYPE_MASK, &descriptor));
+ EXPECT_EQ(0, memcmp(&descriptor, &descriptors[i], sizeof(effect_uuid_t)));
+ }
+ // negative tests
+ if (descriptors.size() > 0) {
+ EXPECT_EQ(BAD_VALUE,
+ AudioEffect::getEffectDescriptor(&descriptors[0].uuid, &descriptors[0].type,
+ EFFECT_FLAG_TYPE_MASK, nullptr));
+ }
+ EXPECT_EQ(BAD_VALUE, AudioEffect::getEffectDescriptor(nullptr, nullptr,
+ EFFECT_FLAG_TYPE_PRE_PROC, &descriptor));
+ EXPECT_EQ(BAD_VALUE, AudioEffect::getEffectDescriptor(&empty, &randomType,
+ EFFECT_FLAG_TYPE_MASK, nullptr));
+ EXPECT_EQ(BAD_VALUE, AudioEffect::getEffectDescriptor(nullptr, &randomType,
+ EFFECT_FLAG_TYPE_POST_PROC, &descriptor));
+ EXPECT_EQ(BAD_VALUE, AudioEffect::getEffectDescriptor(&randomUuid, nullptr,
+ EFFECT_FLAG_TYPE_INSERT, &descriptor));
+}
+
+TEST(AudioEffectTest, DISABLED_GetSetParameterForEffect) {
+ std::string packageName{gPackageName};
+ AttributionSourceState attributionSource;
+ attributionSource.packageName = packageName;
+ attributionSource.uid = VALUE_OR_FATAL(legacy2aidl_uid_t_int32_t(getuid()));
+ attributionSource.pid = VALUE_OR_FATAL(legacy2aidl_pid_t_int32_t(getpid()));
+ attributionSource.token = sp<BBinder>::make();
+ sp<AudioEffect> visualizer = new AudioEffect(attributionSource);
+ ASSERT_NE(visualizer, nullptr) << "effect not created";
+ visualizer->set(SL_IID_VISUALIZATION);
+ status_t status = visualizer->initCheck();
+ ASSERT_TRUE(status == NO_ERROR || status == ALREADY_EXISTS) << "Init check error";
+ ASSERT_EQ(NO_ERROR, visualizer->setEnabled(true)) << "visualizer not enabled";
+
+ uint32_t buf32[3][sizeof(effect_param_t) / sizeof(uint32_t) + 2];
+ effect_param_t* vis_none = (effect_param_t*)(buf32[0]);
+ effect_param_t* vis_rms = (effect_param_t*)(buf32[1]);
+ effect_param_t* vis_tmp = (effect_param_t*)(buf32[2]);
+
+ // Visualizer::setMeasurementMode()
+ vis_none->psize = sizeof(uint32_t);
+ vis_none->vsize = sizeof(uint32_t);
+ *(int32_t*)vis_none->data = VISUALIZER_PARAM_MEASUREMENT_MODE;
+ *((int32_t*)vis_none->data + 1) = MEASUREMENT_MODE_NONE;
+ EXPECT_EQ(NO_ERROR, visualizer->setParameter(vis_none))
+ << "setMeasurementMode doesn't report success";
+
+ // Visualizer::getMeasurementMode()
+ vis_tmp->psize = sizeof(uint32_t);
+ vis_tmp->vsize = sizeof(uint32_t);
+ *(int32_t*)vis_tmp->data = VISUALIZER_PARAM_MEASUREMENT_MODE;
+ *((int32_t*)vis_tmp->data + 1) = 23;
+ EXPECT_EQ(NO_ERROR, visualizer->getParameter(vis_tmp))
+ << "getMeasurementMode doesn't report success";
+ EXPECT_EQ(*((int32_t*)vis_tmp->data + 1), *((int32_t*)vis_none->data + 1))
+ << "target mode does not match set mode";
+
+ // Visualizer::setMeasurementModeDeferred()
+ vis_rms->psize = sizeof(uint32_t);
+ vis_rms->vsize = sizeof(uint32_t);
+ *(int32_t*)vis_rms->data = VISUALIZER_PARAM_MEASUREMENT_MODE;
+ *((int32_t*)vis_rms->data + 1) = MEASUREMENT_MODE_PEAK_RMS;
+ EXPECT_EQ(NO_ERROR, visualizer->setParameterDeferred(vis_rms))
+ << "setMeasurementModeDeferred doesn't report success";
+
+ *((int32_t*)vis_tmp->data + 1) = 23;
+ EXPECT_EQ(NO_ERROR, visualizer->getParameter(vis_tmp))
+ << "getMeasurementMode doesn't report success";
+ EXPECT_EQ(*((int32_t*)vis_tmp->data + 1), *((int32_t*)vis_none->data + 1))
+ << "target mode does not match set mode";
+
+ // setParameterCommit
+ EXPECT_EQ(NO_ERROR, visualizer->setParameterCommit())
+ << "setMeasurementModeCommit does not report success";
+
+ // validate Params
+ *((int32_t*)vis_tmp->data + 1) = 23;
+ EXPECT_EQ(NO_ERROR, visualizer->getParameter(vis_tmp))
+ << "getMeasurementMode doesn't report success";
+ EXPECT_EQ(*((int32_t*)vis_tmp->data + 1), *((int32_t*)vis_rms->data + 1))
+ << "target mode does not match set mode";
+}
+
+TEST(AudioEffectTest, ManageSourceDefaultEffects) {
+ int32_t selectedEffect = -1;
+
+ const uint32_t sampleRate = 44100;
+ const audio_format_t format = AUDIO_FORMAT_PCM_16_BIT;
+ const audio_channel_mask_t channelMask = AUDIO_CHANNEL_IN_STEREO;
+ sp<AudioCapture> capture = nullptr;
+
+ std::vector<effect_descriptor_t> descriptors;
+ listEffectsAvailable(descriptors);
+ for (auto i = 0; i < descriptors.size(); i++) {
+ if (isPreprocessing(descriptors[i])) {
+ capture = new AudioCapture(AUDIO_SOURCE_MIC, sampleRate, format, channelMask);
+ ASSERT_NE(capture, nullptr) << "Unable to create Record Application";
+ EXPECT_EQ(NO_ERROR, capture->create());
+ EXPECT_EQ(NO_ERROR, capture->start());
+ if (!isEffectDefaultOnRecord(&descriptors[i].type, capture->getAudioRecordHandle())) {
+ selectedEffect = i;
+ break;
+ }
+ }
+ }
+ if (selectedEffect == -1) GTEST_SKIP() << " expected at least one preprocessing effect";
+ effect_uuid_t selectedEffectType = descriptors[selectedEffect].type;
+
+ char type[512];
+ AudioEffect::guidToString(&selectedEffectType, type, sizeof(type));
+
+ capture = new AudioCapture(AUDIO_SOURCE_MIC, sampleRate, format, channelMask);
+ ASSERT_NE(capture, nullptr) << "Unable to create Record Application";
+ EXPECT_EQ(NO_ERROR, capture->create());
+ EXPECT_EQ(NO_ERROR, capture->start());
+ EXPECT_FALSE(isEffectDefaultOnRecord(&selectedEffectType, capture->getAudioRecordHandle()))
+ << "Effect should not have been default on record. " << type;
+ EXPECT_FALSE(isEffectExistsOnAudioSession(&selectedEffectType, kDefaultInputEffectPriority - 1,
+ capture->getAudioRecordHandle()->getSessionId()))
+ << "Effect should not have been added. " << type;
+ EXPECT_EQ(OK, capture->audioProcess());
+ EXPECT_EQ(OK, capture->stop());
+
+ String16 name{gPackageName};
+ audio_unique_id_t effectId;
+ status_t status = AudioEffect::addSourceDefaultEffect(
+ type, name, nullptr, kDefaultInputEffectPriority, AUDIO_SOURCE_MIC, &effectId);
+ EXPECT_EQ(NO_ERROR, status) << "Adding default effect failed: " << type;
+
+ capture = new AudioCapture(AUDIO_SOURCE_MIC, sampleRate, format, channelMask);
+ ASSERT_NE(capture, nullptr) << "Unable to create Record Application";
+ EXPECT_EQ(NO_ERROR, capture->create());
+ EXPECT_EQ(NO_ERROR, capture->start());
+ EXPECT_TRUE(isEffectDefaultOnRecord(&selectedEffectType, capture->getAudioRecordHandle()))
+ << "Effect should have been default on record. " << type;
+ EXPECT_TRUE(isEffectExistsOnAudioSession(&selectedEffectType, kDefaultInputEffectPriority - 1,
+ capture->getAudioRecordHandle()->getSessionId()))
+ << "Effect should have been added. " << type;
+ EXPECT_EQ(OK, capture->audioProcess());
+ EXPECT_EQ(OK, capture->stop());
+
+ status = AudioEffect::removeSourceDefaultEffect(effectId);
+ EXPECT_EQ(NO_ERROR, status);
+ capture = new AudioCapture(AUDIO_SOURCE_MIC, sampleRate, format, channelMask);
+ ASSERT_NE(capture, nullptr) << "Unable to create Record Application";
+ EXPECT_EQ(NO_ERROR, capture->create());
+ EXPECT_EQ(NO_ERROR, capture->start());
+ EXPECT_FALSE(isEffectDefaultOnRecord(&selectedEffectType, capture->getAudioRecordHandle()))
+ << "Effect should not have been default on record. " << type;
+ EXPECT_FALSE(isEffectExistsOnAudioSession(&selectedEffectType, kDefaultInputEffectPriority - 1,
+ capture->getAudioRecordHandle()->getSessionId()))
+ << "Effect should not have been added. " << type;
+ EXPECT_EQ(OK, capture->audioProcess());
+ EXPECT_EQ(OK, capture->stop());
+}
+
+TEST(AudioEffectTest, ManageStreamDefaultEffects) {
+ int32_t selectedEffect = -1;
+
+ std::vector<effect_descriptor_t> descriptors;
+ listEffectsAvailable(descriptors);
+ for (auto i = 0; i < descriptors.size(); i++) {
+ if (isAux(descriptors[i])) {
+ selectedEffect = i;
+ break;
+ }
+ }
+ if (selectedEffect == -1) GTEST_SKIP() << " expected at least one Aux effect";
+ effect_uuid_t* selectedEffectType = &descriptors[selectedEffect].type;
+
+ char type[512];
+ AudioEffect::guidToString(selectedEffectType, type, sizeof(type));
+ // create track
+ audio_attributes_t attributes;
+ attributes.usage = AUDIO_USAGE_MEDIA;
+ attributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
+ auto playback = sp<AudioPlayback>::make(
+ 44100 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE, AudioTrack::TRANSFER_SHARED, &attributes);
+ ASSERT_NE(nullptr, playback);
+ ASSERT_EQ(NO_ERROR, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"));
+ EXPECT_EQ(NO_ERROR, playback->create());
+ EXPECT_EQ(NO_ERROR, playback->start());
+ EXPECT_FALSE(isEffectExistsOnAudioSession(selectedEffectType, kDefaultOutputEffectPriority - 1,
+ playback->getAudioTrackHandle()->getSessionId()))
+ << "Effect should not have been added. " << type;
+ EXPECT_EQ(NO_ERROR, playback->waitForConsumption());
+ playback->stop();
+ playback.clear();
+
+ String16 name{gPackageName};
+ audio_unique_id_t id;
+ status_t status = AudioEffect::addStreamDefaultEffect(
+ type, name, nullptr, kDefaultOutputEffectPriority, AUDIO_USAGE_MEDIA, &id);
+ EXPECT_EQ(NO_ERROR, status) << "Adding default effect failed: " << type;
+
+ playback = sp<AudioPlayback>::make(
+ 44100 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE, AudioTrack::TRANSFER_SHARED, &attributes);
+ ASSERT_NE(nullptr, playback);
+ ASSERT_EQ(NO_ERROR, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"));
+ EXPECT_EQ(NO_ERROR, playback->create());
+ float level = 0.2f, levelGot;
+ playback->getAudioTrackHandle()->setAuxEffectSendLevel(level);
+ EXPECT_EQ(NO_ERROR, playback->start());
+ EXPECT_TRUE(isEffectExistsOnAudioSession(selectedEffectType, kDefaultOutputEffectPriority - 1,
+ playback->getAudioTrackHandle()->getSessionId()))
+ << "Effect should have been added. " << type;
+ EXPECT_EQ(NO_ERROR, playback->waitForConsumption());
+ playback->getAudioTrackHandle()->getAuxEffectSendLevel(&levelGot);
+ EXPECT_EQ(level, levelGot);
+ playback->stop();
+ playback.clear();
+
+ status = AudioEffect::removeStreamDefaultEffect(id);
+ EXPECT_EQ(NO_ERROR, status);
+ playback = sp<AudioPlayback>::make(
+ 44100 /*sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE, AudioTrack::TRANSFER_SHARED, &attributes);
+ ASSERT_NE(nullptr, playback);
+ ASSERT_EQ(NO_ERROR, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"));
+ EXPECT_EQ(NO_ERROR, playback->create());
+ EXPECT_EQ(NO_ERROR, playback->start());
+ EXPECT_FALSE(isEffectExistsOnAudioSession(selectedEffectType, kDefaultOutputEffectPriority - 1,
+ playback->getAudioTrackHandle()->getSessionId()))
+ << "Effect should not have been added. " << type;
+ EXPECT_EQ(NO_ERROR, playback->waitForConsumption());
+ playback->stop();
+ playback.clear();
+}
diff --git a/media/libaudioclient/tests/audiorecord_tests.cpp b/media/libaudioclient/tests/audiorecord_tests.cpp
new file mode 100644
index 0000000..754e6cc
--- /dev/null
+++ b/media/libaudioclient/tests/audiorecord_tests.cpp
@@ -0,0 +1,235 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+#define LOG_TAG "AudioRecordTest"
+
+#include <gtest/gtest.h>
+
+#include "audio_test_utils.h"
+
+using namespace android;
+
+class AudioRecordTest : public ::testing::Test {
+ public:
+ virtual void SetUp() override {
+ mAC = new AudioCapture(AUDIO_SOURCE_DEFAULT, 44100, AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_CHANNEL_IN_FRONT);
+ ASSERT_NE(nullptr, mAC);
+ ASSERT_EQ(OK, mAC->create()) << "record creation failed";
+ }
+
+ virtual void TearDown() override {
+ if (mAC) ASSERT_EQ(OK, mAC->stop());
+ }
+
+ sp<AudioCapture> mAC;
+};
+
+class AudioRecordCreateTest
+ : public ::testing::TestWithParam<
+ std::tuple<uint32_t, audio_format_t, audio_channel_mask_t, audio_input_flags_t,
+ audio_session_t, audio_source_t>> {
+ public:
+ AudioRecordCreateTest()
+ : mSampleRate(std::get<0>(GetParam())),
+ mFormat(std::get<1>(GetParam())),
+ mChannelMask(std::get<2>(GetParam())),
+ mFlags(std::get<3>(GetParam())),
+ mSessionId(std::get<4>(GetParam())),
+ mInputSource(std::get<5>(GetParam())){};
+
+ const uint32_t mSampleRate;
+ const audio_format_t mFormat;
+ const audio_channel_mask_t mChannelMask;
+ const audio_input_flags_t mFlags;
+ const audio_session_t mSessionId;
+ const audio_source_t mInputSource;
+ const AudioRecord::transfer_type mTransferType = AudioRecord::TRANSFER_OBTAIN;
+
+ sp<AudioCapture> mAC;
+
+ virtual void SetUp() override {
+ mAC = new AudioCapture(mInputSource, mSampleRate, mFormat, mChannelMask, mFlags, mSessionId,
+ mTransferType);
+ ASSERT_NE(nullptr, mAC);
+ ASSERT_EQ(OK, mAC->create()) << "record creation failed";
+ }
+
+ virtual void TearDown() override {
+ if (mAC) ASSERT_EQ(OK, mAC->stop());
+ }
+};
+
+TEST_F(AudioRecordTest, TestSimpleRecord) {
+ EXPECT_EQ(OK, mAC->start()) << "start recording failed";
+ EXPECT_EQ(OK, mAC->audioProcess()) << "audioProcess failed";
+}
+
+TEST_F(AudioRecordTest, TestAudioCbNotifier) {
+ EXPECT_EQ(BAD_VALUE, mAC->getAudioRecordHandle()->addAudioDeviceCallback(nullptr));
+ sp<OnAudioDeviceUpdateNotifier> cb = new OnAudioDeviceUpdateNotifier();
+ sp<OnAudioDeviceUpdateNotifier> cbOld = new OnAudioDeviceUpdateNotifier();
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->addAudioDeviceCallback(cbOld));
+ EXPECT_EQ(INVALID_OPERATION, mAC->getAudioRecordHandle()->addAudioDeviceCallback(cbOld));
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->addAudioDeviceCallback(cb));
+ EXPECT_EQ(OK, mAC->start()) << "record creation failed";
+ EXPECT_EQ(OK, cb->waitForAudioDeviceCb());
+ EXPECT_EQ(AUDIO_IO_HANDLE_NONE, cbOld->mAudioIo);
+ EXPECT_EQ(AUDIO_PORT_HANDLE_NONE, cbOld->mDeviceId);
+ EXPECT_NE(AUDIO_IO_HANDLE_NONE, cb->mAudioIo);
+ EXPECT_NE(AUDIO_PORT_HANDLE_NONE, cb->mDeviceId);
+ EXPECT_EQ(BAD_VALUE, mAC->getAudioRecordHandle()->removeAudioDeviceCallback(nullptr));
+ EXPECT_EQ(INVALID_OPERATION, mAC->getAudioRecordHandle()->removeAudioDeviceCallback(cbOld));
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->removeAudioDeviceCallback(cb));
+ mAC->stop();
+}
+
+TEST_F(AudioRecordTest, TestEventRecordTrackPause) {
+ const auto playback = sp<AudioPlayback>::make(
+ 8000 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_MONO);
+ ASSERT_EQ(OK, playback->loadResource("/data/local/tmp/bbb_1ch_8kHz_s16le.raw"))
+ << "Unable to open Resource";
+ EXPECT_EQ(OK, playback->create()) << "AudioTrack Creation failed";
+ audio_session_t audioTrackSession = playback->getAudioTrackHandle()->getSessionId();
+ EXPECT_EQ(OK, mAC->start(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE, audioTrackSession))
+ << "record creation failed";
+ EXPECT_EQ(OK, playback->start());
+ RawBuffer buffer;
+ status_t status = mAC->obtainBufferCb(buffer);
+ EXPECT_EQ(status, TIMED_OUT) << "Not expecting any callbacks until track sends Sync event";
+ playback->getAudioTrackHandle()->pause();
+ EXPECT_EQ(OK, mAC->audioProcess()) << "audioProcess failed";
+ playback->stop();
+}
+
+TEST_F(AudioRecordTest, TestEventRecordTrackStop) {
+ const auto playback = sp<AudioPlayback>::make(
+ 8000 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_MONO);
+ ASSERT_EQ(OK, playback->loadResource("/data/local/tmp/bbb_1ch_8kHz_s16le.raw"))
+ << "Unable to open Resource";
+ EXPECT_EQ(OK, playback->create()) << "AudioTrack Creation failed";
+ audio_session_t audioTrackSession = playback->getAudioTrackHandle()->getSessionId();
+ EXPECT_EQ(OK, mAC->start(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE, audioTrackSession))
+ << "record creation failed";
+ EXPECT_EQ(OK, playback->start());
+ RawBuffer buffer;
+ status_t status = mAC->obtainBufferCb(buffer);
+ EXPECT_EQ(status, TIMED_OUT) << "Not expecting any callbacks until track sends Sync event";
+ playback->stop();
+ EXPECT_EQ(OK, mAC->audioProcess()) << "audioProcess failed";
+}
+
+TEST_F(AudioRecordTest, TestGetSetMarker) {
+ mAC->mMarkerPosition = (mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1);
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->setMarkerPosition(mAC->mMarkerPosition))
+ << "setMarkerPosition() failed";
+ uint32_t marker;
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getMarkerPosition(&marker))
+ << "getMarkerPosition() failed";
+ EXPECT_EQ(OK, mAC->start()) << "start recording failed";
+ EXPECT_EQ(OK, mAC->audioProcess()) << "audioProcess failed";
+ EXPECT_EQ(marker, mAC->mMarkerPosition)
+ << "configured marker and received marker are different";
+ EXPECT_EQ(mAC->mReceivedCbMarkerAtPosition, mAC->mMarkerPosition)
+ << "configured marker and received cb marker are different";
+}
+
+TEST_F(AudioRecordTest, TestGetSetMarkerPeriodical) {
+ mAC->mMarkerPeriod = (mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1);
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->setPositionUpdatePeriod(mAC->mMarkerPeriod))
+ << "setPositionUpdatePeriod() failed";
+ uint32_t marker;
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getPositionUpdatePeriod(&marker))
+ << "getPositionUpdatePeriod() failed";
+ EXPECT_EQ(OK, mAC->start()) << "start recording failed";
+ EXPECT_EQ(OK, mAC->audioProcess()) << "audioProcess failed";
+ EXPECT_EQ(marker, mAC->mMarkerPeriod) << "configured marker and received marker are different";
+ EXPECT_EQ(mAC->mReceivedCbMarkerCount, mAC->mNumFramesToRecord / mAC->mMarkerPeriod)
+ << "configured marker and received cb marker are different";
+}
+
+TEST_F(AudioRecordTest, TestGetPosition) {
+ uint32_t position;
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getPosition(&position)) << "getPosition() failed";
+ EXPECT_EQ(0, position);
+ EXPECT_EQ(OK, mAC->start()) << "start recording failed";
+ EXPECT_EQ(OK, mAC->audioProcess()) << "audioProcess failed";
+ EXPECT_EQ(OK, mAC->stop());
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getPosition(&position)) << "getPosition() failed";
+}
+
+// TODO: Add checkPatchCapture(), verify the information of patch via dumpPort() and dumpPatch()
+TEST_P(AudioRecordCreateTest, TestCreateRecord) {
+ EXPECT_EQ(mFormat, mAC->getAudioRecordHandle()->format());
+ EXPECT_EQ(audio_channel_count_from_in_mask(mChannelMask),
+ mAC->getAudioRecordHandle()->channelCount());
+ if (mAC->mFrameCount != 0)
+ EXPECT_LE(mAC->mFrameCount, mAC->getAudioRecordHandle()->frameCount());
+ EXPECT_EQ(mInputSource, mAC->getAudioRecordHandle()->inputSource());
+ if (mSampleRate != 0) EXPECT_EQ(mSampleRate, mAC->getAudioRecordHandle()->getSampleRate());
+ if (mSessionId != AUDIO_SESSION_NONE)
+ EXPECT_EQ(mSessionId, mAC->getAudioRecordHandle()->getSessionId());
+ if (mTransferType != AudioRecord::TRANSFER_CALLBACK) {
+ uint32_t marker;
+ mAC->mMarkerPosition = (mAC->mNotificationFrames << 3) + (mAC->mNotificationFrames >> 1);
+ EXPECT_EQ(INVALID_OPERATION,
+ mAC->getAudioRecordHandle()->setMarkerPosition(mAC->mMarkerPosition));
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getMarkerPosition(&marker));
+ EXPECT_EQ(INVALID_OPERATION,
+ mAC->getAudioRecordHandle()->setPositionUpdatePeriod(mAC->mMarkerPosition));
+ EXPECT_EQ(OK, mAC->getAudioRecordHandle()->getPositionUpdatePeriod(&marker));
+ }
+ EXPECT_EQ(OK, mAC->start()) << "start recording failed";
+ EXPECT_EQ(OK, mAC->audioProcess()) << "audioProcess failed";
+}
+
+// for port primary input
+INSTANTIATE_TEST_SUITE_P(AudioRecordPrimaryInput, AudioRecordCreateTest,
+ ::testing::Combine(::testing::Values(8000, 11025, 12000, 16000, 22050,
+ 24000, 32000, 44100, 48000),
+ ::testing::Values(AUDIO_FORMAT_PCM_8_24_BIT),
+ ::testing::Values(AUDIO_CHANNEL_IN_MONO,
+ AUDIO_CHANNEL_IN_STEREO,
+ AUDIO_CHANNEL_IN_FRONT_BACK),
+ ::testing::Values(AUDIO_INPUT_FLAG_NONE),
+ ::testing::Values(AUDIO_SESSION_NONE),
+ ::testing::Values(AUDIO_SOURCE_DEFAULT)));
+
+// for port fast input
+INSTANTIATE_TEST_SUITE_P(AudioRecordFastInput, AudioRecordCreateTest,
+ ::testing::Combine(::testing::Values(8000, 11025, 12000, 16000, 22050,
+ 24000, 32000, 44100, 48000),
+ ::testing::Values(AUDIO_FORMAT_PCM_8_24_BIT),
+ ::testing::Values(AUDIO_CHANNEL_IN_MONO,
+ AUDIO_CHANNEL_IN_STEREO,
+ AUDIO_CHANNEL_IN_FRONT_BACK),
+ ::testing::Values(AUDIO_INPUT_FLAG_FAST),
+ ::testing::Values(AUDIO_SESSION_NONE),
+ ::testing::Values(AUDIO_SOURCE_DEFAULT)));
+
+// misc
+INSTANTIATE_TEST_SUITE_P(AudioRecordMiscInput, AudioRecordCreateTest,
+ ::testing::Combine(::testing::Values(48000),
+ ::testing::Values(AUDIO_FORMAT_PCM_16_BIT),
+ ::testing::Values(AUDIO_CHANNEL_IN_MONO),
+ ::testing::Values(AUDIO_INPUT_FLAG_NONE),
+ ::testing::Values(AUDIO_SESSION_NONE),
+ ::testing::Values(AUDIO_SOURCE_MIC,
+ AUDIO_SOURCE_CAMCORDER,
+ AUDIO_SOURCE_VOICE_RECOGNITION,
+ AUDIO_SOURCE_VOICE_COMMUNICATION,
+ AUDIO_SOURCE_UNPROCESSED)));
diff --git a/media/libaudioclient/tests/audiorouting_tests.cpp b/media/libaudioclient/tests/audiorouting_tests.cpp
new file mode 100644
index 0000000..32ba597
--- /dev/null
+++ b/media/libaudioclient/tests/audiorouting_tests.cpp
@@ -0,0 +1,253 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+
+#include <cutils/properties.h>
+#include <gtest/gtest.h>
+#include <libxml/parser.h>
+#include <libxml/xinclude.h>
+#include <string.h>
+#include <system/audio_config.h>
+
+#include "audio_test_utils.h"
+
+using namespace android;
+
+template <class T>
+constexpr void (*xmlDeleter)(T* t);
+template <>
+constexpr auto xmlDeleter<xmlDoc> = xmlFreeDoc;
+template <>
+constexpr auto xmlDeleter<xmlChar> = [](xmlChar* s) { xmlFree(s); };
+
+/** @return a unique_ptr with the correct deleter for the libxml2 object. */
+template <class T>
+constexpr auto make_xmlUnique(T* t) {
+ // Wrap deleter in lambda to enable empty base optimization
+ auto deleter = [](T* t) { xmlDeleter<T>(t); };
+ return std::unique_ptr<T, decltype(deleter)>{t, deleter};
+}
+
+std::string getXmlAttribute(const xmlNode* cur, const char* attribute) {
+ auto charPtr = make_xmlUnique(xmlGetProp(cur, reinterpret_cast<const xmlChar*>(attribute)));
+ if (charPtr == NULL) {
+ return "";
+ }
+ std::string value(reinterpret_cast<const char*>(charPtr.get()));
+ return value;
+}
+
+struct MixPort {
+ std::string name;
+ std::string role;
+ std::string flags;
+};
+
+struct Route {
+ std::string name;
+ std::string sources;
+ std::string sink;
+};
+
+status_t parse_audio_policy_configuration_xml(std::vector<std::string>& attachedDevices,
+ std::vector<MixPort>& mixPorts,
+ std::vector<Route>& routes) {
+ std::string path = audio_find_readable_configuration_file("audio_policy_configuration.xml");
+ if (path.length() == 0) return UNKNOWN_ERROR;
+ auto doc = make_xmlUnique(xmlParseFile(path.c_str()));
+ if (doc == nullptr) return UNKNOWN_ERROR;
+ xmlNode* root = xmlDocGetRootElement(doc.get());
+ if (root == nullptr) return UNKNOWN_ERROR;
+ if (xmlXIncludeProcess(doc.get()) < 0) return UNKNOWN_ERROR;
+ mixPorts.clear();
+ if (!xmlStrcmp(root->name, reinterpret_cast<const xmlChar*>("audioPolicyConfiguration"))) {
+ std::string raw{getXmlAttribute(root, "version")};
+ for (auto* child = root->xmlChildrenNode; child != nullptr; child = child->next) {
+ if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>("modules"))) {
+ xmlNode* root = child;
+ for (auto* child = root->xmlChildrenNode; child != nullptr; child = child->next) {
+ if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>("module"))) {
+ xmlNode* root = child;
+ for (auto* child = root->xmlChildrenNode; child != nullptr;
+ child = child->next) {
+ if (!xmlStrcmp(child->name,
+ reinterpret_cast<const xmlChar*>("mixPorts"))) {
+ xmlNode* root = child;
+ for (auto* child = root->xmlChildrenNode; child != nullptr;
+ child = child->next) {
+ if (!xmlStrcmp(child->name,
+ reinterpret_cast<const xmlChar*>("mixPort"))) {
+ MixPort mixPort;
+ xmlNode* root = child;
+ mixPort.name = getXmlAttribute(root, "name");
+ mixPort.role = getXmlAttribute(root, "role");
+ mixPort.flags = getXmlAttribute(root, "flags");
+ if (mixPort.role == "source") mixPorts.push_back(mixPort);
+ }
+ }
+ } else if (!xmlStrcmp(child->name, reinterpret_cast<const xmlChar*>(
+ "attachedDevices"))) {
+ xmlNode* root = child;
+ for (auto* child = root->xmlChildrenNode; child != nullptr;
+ child = child->next) {
+ if (!xmlStrcmp(child->name,
+ reinterpret_cast<const xmlChar*>("item"))) {
+ auto xmlValue = make_xmlUnique(xmlNodeListGetString(
+ child->doc, child->xmlChildrenNode, 1));
+ if (xmlValue == nullptr) {
+ raw = "";
+ } else {
+ raw = reinterpret_cast<const char*>(xmlValue.get());
+ }
+ std::string& value = raw;
+ attachedDevices.push_back(std::move(value));
+ }
+ }
+ } else if (!xmlStrcmp(child->name,
+ reinterpret_cast<const xmlChar*>("routes"))) {
+ xmlNode* root = child;
+ for (auto* child = root->xmlChildrenNode; child != nullptr;
+ child = child->next) {
+ if (!xmlStrcmp(child->name,
+ reinterpret_cast<const xmlChar*>("route"))) {
+ Route route;
+ xmlNode* root = child;
+ route.name = getXmlAttribute(root, "name");
+ route.sources = getXmlAttribute(root, "sources");
+ route.sink = getXmlAttribute(root, "sink");
+ routes.push_back(route);
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ return OK;
+}
+
+// UNIT TEST
+TEST(AudioTrackTest, TestPerformanceMode) {
+ std::vector<std::string> attachedDevices;
+ std::vector<MixPort> mixPorts;
+ std::vector<Route> routes;
+ EXPECT_EQ(OK, parse_audio_policy_configuration_xml(attachedDevices, mixPorts, routes));
+ std::string output_flags_string[] = {"AUDIO_OUTPUT_FLAG_FAST", "AUDIO_OUTPUT_FLAG_DEEP_BUFFER"};
+ audio_output_flags_t output_flags[] = {AUDIO_OUTPUT_FLAG_FAST, AUDIO_OUTPUT_FLAG_DEEP_BUFFER};
+ audio_flags_mask_t flags[] = {AUDIO_FLAG_LOW_LATENCY, AUDIO_FLAG_DEEP_BUFFER};
+ bool hasFlag = false;
+ for (int i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) {
+ hasFlag = false;
+ for (int j = 0; j < mixPorts.size() && !hasFlag; j++) {
+ MixPort port = mixPorts[j];
+ if (port.role == "source" && port.flags.find(output_flags_string[i]) != -1) {
+ for (int k = 0; k < routes.size() && !hasFlag; k++) {
+ if (routes[k].sources.find(port.name) != -1 &&
+ std::find(attachedDevices.begin(), attachedDevices.end(), routes[k].sink) !=
+ attachedDevices.end()) {
+ hasFlag = true;
+ std::cerr << "found port with flag " << output_flags_string[i] << "@ "
+ << " port :: name : " << port.name << " role : " << port.role
+ << " port :: flags : " << port.flags
+ << " connected via route name : " << routes[k].name
+ << " route sources : " << routes[k].sources
+ << " route sink : " << routes[k].sink << std::endl;
+ }
+ }
+ }
+ }
+ if (!hasFlag) continue;
+ audio_attributes_t attributes = AUDIO_ATTRIBUTES_INITIALIZER;
+ attributes.usage = AUDIO_USAGE_MEDIA;
+ attributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
+ attributes.flags = flags[i];
+ sp<AudioPlayback> ap = sp<AudioPlayback>::make(
+ 0 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE, AudioTrack::TRANSFER_OBTAIN,
+ &attributes);
+ ASSERT_NE(nullptr, ap);
+ ASSERT_EQ(OK, ap->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"))
+ << "Unable to open Resource";
+ EXPECT_EQ(OK, ap->create()) << "track creation failed";
+ sp<OnAudioDeviceUpdateNotifier> cb = new OnAudioDeviceUpdateNotifier();
+ EXPECT_EQ(OK, ap->getAudioTrackHandle()->addAudioDeviceCallback(cb));
+ EXPECT_EQ(OK, ap->start()) << "audio track start failed";
+ EXPECT_EQ(OK, ap->onProcess());
+ EXPECT_EQ(OK, cb->waitForAudioDeviceCb());
+ EXPECT_TRUE(checkPatchPlayback(cb->mAudioIo, cb->mDeviceId));
+ EXPECT_NE(0, ap->getAudioTrackHandle()->getFlags() & output_flags[i]);
+ audio_patch patch;
+ EXPECT_EQ(OK, getPatchForOutputMix(cb->mAudioIo, patch));
+ for (auto j = 0; j < patch.num_sources; j++) {
+ if (patch.sources[j].type == AUDIO_PORT_TYPE_MIX &&
+ patch.sources[j].ext.mix.handle == cb->mAudioIo) {
+ if ((patch.sources[j].flags.output & output_flags[i]) == 0) {
+ ADD_FAILURE() << "expected output flag " << output_flags[i] << " is absent";
+ std::cerr << dumpPortConfig(patch.sources[j]);
+ }
+ }
+ }
+ ap->stop();
+ ap->getAudioTrackHandle()->removeAudioDeviceCallback(cb);
+ }
+}
+
+TEST(AudioTrackTest, TestRemoteSubmix) {
+ std::vector<std::string> attachedDevices;
+ std::vector<MixPort> mixPorts;
+ std::vector<Route> routes;
+ EXPECT_EQ(OK, parse_audio_policy_configuration_xml(attachedDevices, mixPorts, routes));
+ bool hasFlag = false;
+ for (int j = 0; j < attachedDevices.size() && !hasFlag; j++) {
+ if (attachedDevices[j].find("Remote Submix") != -1) hasFlag = true;
+ }
+ if (!hasFlag) GTEST_SKIP() << " Device does not have Remote Submix port.";
+ sp<AudioCapture> capture = new AudioCapture(AUDIO_SOURCE_REMOTE_SUBMIX, 48000,
+ AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_IN_STEREO);
+ ASSERT_NE(nullptr, capture);
+ ASSERT_EQ(OK, capture->create()) << "record creation failed";
+
+ sp<AudioPlayback> playback = sp<AudioPlayback>::make(
+ 48000 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO,
+ AUDIO_OUTPUT_FLAG_NONE, AUDIO_SESSION_NONE);
+ ASSERT_NE(nullptr, playback);
+ ASSERT_EQ(OK, playback->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"))
+ << "Unable to open Resource";
+ ASSERT_EQ(OK, playback->create()) << "track creation failed";
+
+ audio_port_v7 port;
+ status_t status = getPortByAttributes(AUDIO_PORT_ROLE_SOURCE, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_IN_REMOTE_SUBMIX, port);
+ EXPECT_EQ(OK, status) << "Could not find port";
+
+ EXPECT_EQ(OK, capture->start()) << "start recording failed";
+ EXPECT_EQ(port.id, capture->getAudioRecordHandle()->getRoutedDeviceId())
+ << "Capture NOT routed on expected port";
+
+ status = getPortByAttributes(AUDIO_PORT_ROLE_SINK, AUDIO_PORT_TYPE_DEVICE,
+ AUDIO_DEVICE_OUT_REMOTE_SUBMIX, port);
+ EXPECT_EQ(OK, status) << "Could not find port";
+
+ EXPECT_EQ(OK, playback->start()) << "audio track start failed";
+ EXPECT_EQ(OK, playback->onProcess());
+ ASSERT_EQ(port.id, playback->getAudioTrackHandle()->getRoutedDeviceId())
+ << "Playback NOT routed on expected port";
+ capture->stop();
+ playback->stop();
+}
diff --git a/media/libaudioclient/tests/audiotrack_tests.cpp b/media/libaudioclient/tests/audiotrack_tests.cpp
new file mode 100644
index 0000000..a49af96
--- /dev/null
+++ b/media/libaudioclient/tests/audiotrack_tests.cpp
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+//#define LOG_NDEBUG 0
+
+#include <gtest/gtest.h>
+
+#include "audio_test_utils.h"
+
+using namespace android;
+
+TEST(AudioTrackTest, TestPlayTrack) {
+ const auto ap = sp<AudioPlayback>::make(44100 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_CHANNEL_OUT_STEREO, AUDIO_OUTPUT_FLAG_NONE,
+ AUDIO_SESSION_NONE, AudioTrack::TRANSFER_OBTAIN);
+ ASSERT_NE(nullptr, ap);
+ ASSERT_EQ(OK, ap->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"))
+ << "Unable to open Resource";
+ EXPECT_EQ(OK, ap->create()) << "track creation failed";
+ EXPECT_EQ(OK, ap->start()) << "audio track start failed";
+ EXPECT_EQ(OK, ap->onProcess());
+ ap->stop();
+}
+
+TEST(AudioTrackTest, TestSeek) {
+ const auto ap = sp<AudioPlayback>::make(
+ 44100 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT, AUDIO_CHANNEL_OUT_STEREO);
+ ASSERT_NE(nullptr, ap);
+ ASSERT_EQ(OK, ap->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"))
+ << "Unable to open Resource";
+ EXPECT_EQ(OK, ap->create()) << "track creation failed";
+ EXPECT_EQ(OK, ap->start()) << "audio track start failed";
+ EXPECT_EQ(OK, ap->onProcess(true));
+ ap->stop();
+}
+
+TEST(AudioTrackTest, TestAudioCbNotifier) {
+ const auto ap = sp<AudioPlayback>::make(0 /* sampleRate */, AUDIO_FORMAT_PCM_16_BIT,
+ AUDIO_CHANNEL_OUT_STEREO, AUDIO_OUTPUT_FLAG_FAST,
+ AUDIO_SESSION_NONE, AudioTrack::TRANSFER_SHARED);
+ ASSERT_NE(nullptr, ap);
+ ASSERT_EQ(OK, ap->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"))
+ << "Unable to open Resource";
+ EXPECT_EQ(OK, ap->create()) << "track creation failed";
+ EXPECT_EQ(BAD_VALUE, ap->getAudioTrackHandle()->addAudioDeviceCallback(nullptr));
+ sp<OnAudioDeviceUpdateNotifier> cb = new OnAudioDeviceUpdateNotifier();
+ sp<OnAudioDeviceUpdateNotifier> cbOld = new OnAudioDeviceUpdateNotifier();
+ EXPECT_EQ(OK, ap->getAudioTrackHandle()->addAudioDeviceCallback(cbOld));
+ EXPECT_EQ(INVALID_OPERATION, ap->getAudioTrackHandle()->addAudioDeviceCallback(cbOld));
+ EXPECT_EQ(OK, ap->getAudioTrackHandle()->addAudioDeviceCallback(cb));
+ EXPECT_EQ(OK, ap->start()) << "audio track start failed";
+ EXPECT_EQ(OK, ap->onProcess());
+ EXPECT_EQ(OK, cb->waitForAudioDeviceCb());
+ EXPECT_EQ(AUDIO_IO_HANDLE_NONE, cbOld->mAudioIo);
+ EXPECT_EQ(AUDIO_PORT_HANDLE_NONE, cbOld->mDeviceId);
+ EXPECT_NE(AUDIO_IO_HANDLE_NONE, cb->mAudioIo);
+ EXPECT_NE(AUDIO_PORT_HANDLE_NONE, cb->mDeviceId);
+ EXPECT_TRUE(checkPatchPlayback(cb->mAudioIo, cb->mDeviceId));
+ EXPECT_EQ(BAD_VALUE, ap->getAudioTrackHandle()->removeAudioDeviceCallback(nullptr));
+ EXPECT_EQ(INVALID_OPERATION, ap->getAudioTrackHandle()->removeAudioDeviceCallback(cbOld));
+ EXPECT_EQ(OK, ap->getAudioTrackHandle()->removeAudioDeviceCallback(cb));
+ ap->stop();
+}
+
+class AudioTrackCreateTest
+ : public ::testing::TestWithParam<std::tuple<uint32_t, audio_format_t, audio_channel_mask_t,
+ audio_output_flags_t, audio_session_t>> {
+ public:
+ AudioTrackCreateTest()
+ : mSampleRate(std::get<0>(GetParam())),
+ mFormat(std::get<1>(GetParam())),
+ mChannelMask(std::get<2>(GetParam())),
+ mFlags(std::get<3>(GetParam())),
+ mSessionId(std::get<4>(GetParam())){};
+
+ const uint32_t mSampleRate;
+ const audio_format_t mFormat;
+ const audio_channel_mask_t mChannelMask;
+ const audio_output_flags_t mFlags;
+ const audio_session_t mSessionId;
+
+ sp<AudioPlayback> mAP;
+
+ virtual void SetUp() override {
+ mAP = sp<AudioPlayback>::make(mSampleRate, mFormat, mChannelMask, mFlags,
+ mSessionId);
+ ASSERT_NE(nullptr, mAP);
+ ASSERT_EQ(OK, mAP->loadResource("/data/local/tmp/bbb_2ch_24kHz_s16le.raw"))
+ << "Unable to open Resource";
+ ASSERT_EQ(OK, mAP->create()) << "track creation failed";
+ }
+
+ virtual void TearDown() override {
+ if (mAP) mAP->stop();
+ }
+};
+
+TEST_P(AudioTrackCreateTest, TestCreateTrack) {
+ EXPECT_EQ(mFormat, mAP->getAudioTrackHandle()->format());
+ EXPECT_EQ(audio_channel_count_from_out_mask(mChannelMask),
+ mAP->getAudioTrackHandle()->channelCount());
+ if (mSampleRate != 0) EXPECT_EQ(mSampleRate, mAP->getAudioTrackHandle()->getSampleRate());
+ if (mSessionId != AUDIO_SESSION_NONE)
+ EXPECT_EQ(mSessionId, mAP->getAudioTrackHandle()->getSessionId());
+ EXPECT_EQ(mSampleRate, mAP->getAudioTrackHandle()->getOriginalSampleRate());
+ EXPECT_EQ(OK, mAP->start()) << "audio track start failed";
+ EXPECT_EQ(OK, mAP->onProcess());
+}
+
+// sampleRate, format, channelMask, flags, sessionId
+INSTANTIATE_TEST_SUITE_P(
+ AudioTrackParameterizedTest, AudioTrackCreateTest,
+ ::testing::Combine(::testing::Values(48000), ::testing::Values(AUDIO_FORMAT_PCM_16_BIT),
+ ::testing::Values(AUDIO_CHANNEL_OUT_STEREO),
+ ::testing::Values(AUDIO_OUTPUT_FLAG_NONE,
+ AUDIO_OUTPUT_FLAG_PRIMARY | AUDIO_OUTPUT_FLAG_FAST,
+ AUDIO_OUTPUT_FLAG_RAW | AUDIO_OUTPUT_FLAG_FAST,
+ AUDIO_OUTPUT_FLAG_DEEP_BUFFER),
+ ::testing::Values(AUDIO_SESSION_NONE)));
diff --git a/media/libaudioclient/tests/bbb_1ch_8kHz_s16le.raw b/media/libaudioclient/tests/bbb_1ch_8kHz_s16le.raw
new file mode 100644
index 0000000..2d1e4bf
--- /dev/null
+++ b/media/libaudioclient/tests/bbb_1ch_8kHz_s16le.raw
Binary files differ
diff --git a/media/libaudioclient/tests/bbb_2ch_24kHz_s16le.raw b/media/libaudioclient/tests/bbb_2ch_24kHz_s16le.raw
new file mode 100644
index 0000000..c8ac5f7
--- /dev/null
+++ b/media/libaudioclient/tests/bbb_2ch_24kHz_s16le.raw
Binary files differ
diff --git a/media/libaudioclient/tests/test_create_audiorecord.cpp b/media/libaudioclient/tests/test_create_audiorecord.cpp
index 2e0883b..277110b 100644
--- a/media/libaudioclient/tests/test_create_audiorecord.cpp
+++ b/media/libaudioclient/tests/test_create_audiorecord.cpp
@@ -29,14 +29,13 @@
#define NUM_ARGUMENTS 8
#define VERSION_VALUE "1.0"
-#define PACKAGE_NAME "AudioRecord test"
+#define PACKAGE_NAME "AudioRecord test"
namespace android {
using android::content::AttributionSourceState;
-int testRecord(FILE *inputFile, int outputFileFd)
-{
+int testRecord(FILE* inputFile, int outputFileFd) {
char line[MAX_INPUT_FILE_LINE_LENGTH];
uint32_t testCount = 0;
Vector<String16> args;
@@ -47,11 +46,9 @@
attributionSource.token = sp<BBinder>::make();
if (inputFile == nullptr) {
- sp<AudioRecord> record = new AudioRecord(AUDIO_SOURCE_DEFAULT,
- 0 /* sampleRate */,
- AUDIO_FORMAT_DEFAULT,
- AUDIO_CHANNEL_IN_MONO,
- attributionSource);
+ sp<AudioRecord> record =
+ new AudioRecord(AUDIO_SOURCE_DEFAULT, 0 /* sampleRate */, AUDIO_FORMAT_DEFAULT,
+ AUDIO_CHANNEL_IN_MONO, attributionSource);
if (record == 0 || record->initCheck() != NO_ERROR) {
write(outputFileFd, "Error creating AudioRecord\n",
sizeof("Error creating AudioRecord\n"));
@@ -80,11 +77,10 @@
char statusStr[MAX_OUTPUT_FILE_LINE_LENGTH];
bool fast = false;
- if (sscanf(line, " %u %x %x %zu %d %x %u %u",
- &sampleRate, &format, &channelMask,
- &frameCount, ¬ificationFrames,
- &flags, &sessionId, &inputSource) != NUM_ARGUMENTS) {
- fprintf(stderr, "Malformed line for test #%u in input file\n", testCount+1);
+ if (sscanf(line, " %u %x %x %zu %d %x %u %u", &sampleRate, &format, &channelMask,
+ &frameCount, ¬ificationFrames, &flags, &sessionId,
+ &inputSource) != NUM_ARGUMENTS) {
+ fprintf(stderr, "Malformed line for test #%u in input file\n", testCount + 1);
ret = 1;
continue;
}
@@ -100,21 +96,10 @@
sp<AudioRecord> record = new AudioRecord(attributionSource);
const auto emptyCallback = sp<AudioRecord::IAudioRecordCallback>::make();
- record->set(AUDIO_SOURCE_DEFAULT,
- sampleRate,
- format,
- channelMask,
- frameCount,
- fast ? emptyCallback : nullptr,
- notificationFrames,
- false,
- sessionId,
- fast ? AudioRecord::TRANSFER_CALLBACK : AudioRecord::TRANSFER_DEFAULT,
- flags,
- getuid(),
- getpid(),
- &attributes,
- AUDIO_PORT_HANDLE_NONE);
+ record->set(AUDIO_SOURCE_DEFAULT, sampleRate, format, channelMask, frameCount,
+ fast ? emptyCallback : nullptr, notificationFrames, false, sessionId,
+ fast ? AudioRecord::TRANSFER_CALLBACK : AudioRecord::TRANSFER_DEFAULT, flags,
+ getuid(), getpid(), &attributes, AUDIO_PORT_HANDLE_NONE);
status = record->initCheck();
sprintf(statusStr, "\n#### Test %u status %d\n", testCount, status);
write(outputFileFd, statusStr, strlen(statusStr));
@@ -126,11 +111,8 @@
return ret;
}
-}; // namespace android
+}; // namespace android
-
-int main(int argc, char **argv)
-{
+int main(int argc, char** argv) {
return android::main(argc, argv, android::testRecord);
}
-
diff --git a/media/libaudioclient/tests/test_create_audiotrack.cpp b/media/libaudioclient/tests/test_create_audiotrack.cpp
index e7231d3..4e09e21 100644
--- a/media/libaudioclient/tests/test_create_audiotrack.cpp
+++ b/media/libaudioclient/tests/test_create_audiotrack.cpp
@@ -32,18 +32,15 @@
namespace android {
-int testTrack(FILE *inputFile, int outputFileFd)
-{
+int testTrack(FILE* inputFile, int outputFileFd) {
char line[MAX_INPUT_FILE_LINE_LENGTH];
uint32_t testCount = 0;
Vector<String16> args;
int ret = 0;
if (inputFile == nullptr) {
- sp<AudioTrack> track = new AudioTrack(AUDIO_STREAM_DEFAULT,
- 0 /* sampleRate */,
- AUDIO_FORMAT_DEFAULT,
- AUDIO_CHANNEL_OUT_STEREO);
+ sp<AudioTrack> track = new AudioTrack(AUDIO_STREAM_DEFAULT, 0 /* sampleRate */,
+ AUDIO_FORMAT_DEFAULT, AUDIO_CHANNEL_OUT_STEREO);
if (track == 0 || track->initCheck() != NO_ERROR) {
write(outputFileFd, "Error creating AudioTrack\n",
sizeof("Error creating AudioTrack\n"));
@@ -78,11 +75,10 @@
bool offload = false;
bool fast = false;
- if (sscanf(line, " %u %x %x %zu %d %u %x %u %u %u",
- &sampleRate, &format, &channelMask,
- &frameCount, ¬ificationFrames, &useSharedBuffer,
- &flags, &sessionId, &usage, &contentType) != NUM_ARGUMENTS) {
- fprintf(stderr, "Malformed line for test #%u in input file\n", testCount+1);
+ if (sscanf(line, " %u %x %x %zu %d %u %x %u %u %u", &sampleRate, &format, &channelMask,
+ &frameCount, ¬ificationFrames, &useSharedBuffer, &flags, &sessionId, &usage,
+ &contentType) != NUM_ARGUMENTS) {
+ fprintf(stderr, "Malformed line for test #%u in input file\n", testCount + 1);
ret = 1;
continue;
}
@@ -90,7 +86,7 @@
if (useSharedBuffer != 0) {
size_t heapSize = audio_channel_count_from_out_mask(channelMask) *
- audio_bytes_per_sample(format) * frameCount;
+ audio_bytes_per_sample(format) * frameCount;
heap = new MemoryDealer(heapSize, "AudioTrack Heap Base");
sharedBuffer = heap->allocate(heapSize);
frameCount = 0;
@@ -111,25 +107,13 @@
attributes.usage = usage;
sp<AudioTrack> track = new AudioTrack();
const auto emptyCallback = sp<AudioTrack::IAudioTrackCallback>::make();
- track->set(AUDIO_STREAM_DEFAULT,
- sampleRate,
- format,
- channelMask,
- frameCount,
- flags,
- (fast || offload) ? emptyCallback : nullptr,
- notificationFrames,
- sharedBuffer,
- false,
- sessionId,
- ((fast && sharedBuffer == 0) || offload) ?
- AudioTrack::TRANSFER_CALLBACK : AudioTrack::TRANSFER_DEFAULT,
- offload ? &offloadInfo : nullptr,
- AttributionSourceState(),
- &attributes,
- false,
- 1.0f,
- AUDIO_PORT_HANDLE_NONE);
+ track->set(AUDIO_STREAM_DEFAULT, sampleRate, format, channelMask, frameCount, flags,
+ (fast || offload) ? emptyCallback : nullptr, notificationFrames, sharedBuffer,
+ false, sessionId,
+ ((fast && sharedBuffer == 0) || offload) ? AudioTrack::TRANSFER_CALLBACK
+ : AudioTrack::TRANSFER_DEFAULT,
+ offload ? &offloadInfo : nullptr, AttributionSourceState(), &attributes, false,
+ 1.0f, AUDIO_PORT_HANDLE_NONE);
status = track->initCheck();
sprintf(statusStr, "\n#### Test %u status %d\n", testCount, status);
write(outputFileFd, statusStr, strlen(statusStr));
@@ -141,11 +125,8 @@
return ret;
}
-}; // namespace android
+}; // namespace android
-
-int main(int argc, char **argv)
-{
+int main(int argc, char** argv) {
return android::main(argc, argv, android::testTrack);
}
-
diff --git a/media/libaudioclient/tests/test_create_utils.cpp b/media/libaudioclient/tests/test_create_utils.cpp
index caf5227..c2c2e8b 100644
--- a/media/libaudioclient/tests/test_create_utils.cpp
+++ b/media/libaudioclient/tests/test_create_utils.cpp
@@ -23,10 +23,10 @@
namespace android {
-int readLine(FILE *inputFile, char *line, int size) {
+int readLine(FILE* inputFile, char* line, int size) {
int ret = 0;
while (true) {
- char *str = fgets(line, size, inputFile);
+ char* str = fgets(line, size, inputFile);
if (str == nullptr) {
ret = -1;
break;
@@ -42,8 +42,7 @@
return ret;
}
-bool checkVersion(FILE *inputFile, const char *version)
-{
+bool checkVersion(FILE* inputFile, const char* version) {
char line[MAX_INPUT_FILE_LINE_LENGTH];
char versionKey[MAX_INPUT_FILE_LINE_LENGTH];
char versionValue[MAX_INPUT_FILE_LINE_LENGTH];
@@ -68,9 +67,8 @@
return true;
}
-int main(int argc, char **argv, test_func_t testFunc)
-{
- FILE *inputFile = nullptr;
+int main(int argc, char** argv, test_func_t testFunc) {
+ FILE* inputFile = nullptr;
int outputFileFd = STDOUT_FILENO;
mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH;
int ret = 0;
@@ -96,7 +94,7 @@
if (strcmp(*argv, "-o") == 0) {
argv++;
if (*argv) {
- outputFileFd = open(*argv, O_WRONLY|O_CREAT, mode);
+ outputFileFd = open(*argv, O_WRONLY | O_CREAT, mode);
if (outputFileFd < 0) {
ret = 1;
}
@@ -126,5 +124,4 @@
return ret;
}
-}; // namespace android
-
+}; // namespace android
diff --git a/media/libaudioclient/tests/test_create_utils.h b/media/libaudioclient/tests/test_create_utils.h
index 9a6f9fa..110baf7 100644
--- a/media/libaudioclient/tests/test_create_utils.h
+++ b/media/libaudioclient/tests/test_create_utils.h
@@ -27,13 +27,12 @@
namespace android {
-int readLine(FILE *inputFile, char *line, int size);
+int readLine(FILE* inputFile, char* line, int size);
-bool checkVersion(FILE *inputFile, const char *version);
+bool checkVersion(FILE* inputFile, const char* version);
+typedef int (*test_func_t)(FILE* inputFile, int outputFileFd);
-typedef int (*test_func_t)(FILE *inputFile, int outputFileFd);
+int main(int argc, char** argv, test_func_t testFunc);
-int main(int argc, char **argv, test_func_t testFunc);
-
-}; // namespace android
+}; // namespace android
diff --git a/media/libaudioclient/tests/trackplayerbase_tests.cpp b/media/libaudioclient/tests/trackplayerbase_tests.cpp
new file mode 100644
index 0000000..c9b704d
--- /dev/null
+++ b/media/libaudioclient/tests/trackplayerbase_tests.cpp
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2021 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "TrackPlayerBaseTest"
+
+#include <gtest/gtest.h>
+
+#include <media/TrackPlayerBase.h>
+
+using namespace android;
+using namespace android::media;
+
+class TrackPlayer : public TrackPlayerBase, public AudioTrack::IAudioTrackCallback {
+ public:
+ // methods protected in base class
+ using TrackPlayerBase::playerPause;
+ using TrackPlayerBase::playerSetVolume;
+ using TrackPlayerBase::playerStart;
+ using TrackPlayerBase::playerStop;
+};
+
+class TrackPlayerBaseTest
+ : public ::testing::TestWithParam<std::tuple<double, double, uint32_t, uint32_t>> {
+ public:
+ TrackPlayerBaseTest()
+ : mDuration(std::get<0>(GetParam())),
+ mPulseFreq(std::get<1>(GetParam())),
+ mChannelCount(std::get<2>(GetParam())),
+ mSampleRate(std::get<3>(GetParam())){};
+
+ virtual void SetUp() override {
+ mFrameCount = mDuration * mSampleRate;
+ audio_channel_mask_t channelMask = audio_channel_out_mask_from_count(mChannelCount);
+ sp<AudioTrack> track = new AudioTrack(mStreamType, mSampleRate, mFormat, channelMask,
+ mFrameCount, mFlags, nullptr /* callback */,
+ 0 /* notificationFrames */, AUDIO_SESSION_NONE);
+ ASSERT_EQ(track->initCheck(), NO_ERROR);
+
+ mPlayer = new TrackPlayer();
+ mPlayer->init(track.get(), mPlayer, PLAYER_TYPE_AAUDIO, AUDIO_USAGE_MEDIA,
+ AUDIO_SESSION_NONE);
+ sp<AudioTrack> playerTrack = mPlayer->mAudioTrack;
+ ASSERT_EQ(playerTrack->initCheck(), NO_ERROR);
+
+ mBufferSize = mFrameCount * playerTrack->frameSize();
+ mBuffer.resize(mBufferSize, 0);
+
+ // populate buffer
+ ASSERT_NE(mPulseFreq, 0);
+ int32_t nPulseSamples = mSampleRate / mPulseFreq;
+ int32_t pulseSize = nPulseSamples * playerTrack->frameSize();
+
+ int32_t marker = 0;
+ while (marker + pulseSize <= mBufferSize) {
+ memset(mBuffer.data() + marker, 127, pulseSize / 2);
+ marker += pulseSize;
+ }
+ }
+
+ void playBuffer() {
+ bool blocking = true;
+ ssize_t nbytes = mPlayer->mAudioTrack->write(mBuffer.data(), mBufferSize, blocking);
+ EXPECT_EQ(nbytes, mBufferSize) << "Did not write all data in blocking mode";
+ }
+
+ const double mDuration; // seconds
+ sp<TrackPlayer> mPlayer;
+
+ private:
+ const double mPulseFreq;
+ const uint32_t mChannelCount;
+ const uint32_t mSampleRate;
+
+ const audio_format_t mFormat = AUDIO_FORMAT_PCM_16_BIT;
+ const audio_output_flags_t mFlags = AUDIO_OUTPUT_FLAG_NONE;
+ const audio_stream_type_t mStreamType = AUDIO_STREAM_MUSIC;
+
+ int32_t mBufferSize;
+ int32_t mFrameCount;
+ std::vector<uint8_t> mBuffer;
+};
+
+class PlaybackTestParam : public TrackPlayerBaseTest {};
+
+TEST_P(PlaybackTestParam, PlaybackTest) {
+ // no-op implementation
+ EXPECT_TRUE(mPlayer->setStartDelayMs(0).isOk());
+
+ ASSERT_EQ(mPlayer->playerStart(), NO_ERROR);
+ ASSERT_NO_FATAL_FAILURE(playBuffer());
+ EXPECT_EQ(mPlayer->playerStop(), NO_ERROR);
+}
+
+INSTANTIATE_TEST_SUITE_P(TrackPlayerTest, PlaybackTestParam,
+ ::testing::Values(std::make_tuple(2.5, 25.0, 2, 48000)));
+
+class ChangeVolumeTestParam : public TrackPlayerBaseTest {};
+
+TEST_P(ChangeVolumeTestParam, ChangeVolumeTest) {
+ float volume = 1.0f;
+ (void)mPlayer->setPlayerVolume(volume / 2, volume);
+
+ ASSERT_TRUE(mPlayer->start().isOk());
+ ASSERT_EQ(mPlayer->playerSetVolume(), NO_ERROR);
+
+ ASSERT_NO_FATAL_FAILURE(playBuffer());
+
+ EXPECT_TRUE(mPlayer->stop().isOk());
+
+ std::vector<float> setVol = {0.95f, 0.05f, 0.5f, 0.25f, -1.0f, 1.0f, 1.0f};
+ std::vector<float> setPan = {0.0f, 0.0f, 1.0f, -1.0f, -1.0f, 0.5f, -0.5f};
+
+ ASSERT_TRUE(mPlayer->start().isOk());
+
+ for (int32_t i = 0; i < setVol.size(); i++) {
+ EXPECT_TRUE(mPlayer->setVolume(setVol[i]).isOk());
+ EXPECT_TRUE(mPlayer->setPan(setPan[i]).isOk());
+ ASSERT_NO_FATAL_FAILURE(playBuffer());
+ }
+ EXPECT_TRUE(mPlayer->stop().isOk());
+}
+
+INSTANTIATE_TEST_SUITE_P(TrackPlayerTest, ChangeVolumeTestParam,
+ ::testing::Values(std::make_tuple(1.0, 100.0, 1, 24000)));
+
+class PauseTestParam : public TrackPlayerBaseTest {};
+
+TEST_P(PauseTestParam, PauseTest) {
+ ASSERT_EQ(mPlayer->playerStart(), NO_ERROR);
+ ASSERT_NO_FATAL_FAILURE(playBuffer());
+
+ ASSERT_EQ(mPlayer->playerPause(), NO_ERROR);
+ ASSERT_EQ(mPlayer->playerStart(), NO_ERROR);
+
+ ASSERT_NO_FATAL_FAILURE(playBuffer());
+
+ EXPECT_EQ(mPlayer->playerStop(), NO_ERROR);
+
+ for (int32_t i = 0; i < 5; i++) {
+ ASSERT_TRUE(mPlayer->start().isOk());
+ ASSERT_NO_FATAL_FAILURE(playBuffer());
+ ASSERT_TRUE(mPlayer->pause().isOk());
+ }
+ EXPECT_TRUE(mPlayer->stop().isOk());
+}
+
+INSTANTIATE_TEST_SUITE_P(TrackPlayerTest, PauseTestParam,
+ ::testing::Values(std::make_tuple(1.0, 75.0, 2, 24000)));
diff --git a/media/libaudiofoundation/DeviceDescriptorBase.cpp b/media/libaudiofoundation/DeviceDescriptorBase.cpp
index 5ffbffc..91efb96 100644
--- a/media/libaudiofoundation/DeviceDescriptorBase.cpp
+++ b/media/libaudiofoundation/DeviceDescriptorBase.cpp
@@ -126,7 +126,14 @@
"%*sEncapsulation modes: %u, metadata types: %u\n", spaces, "",
mEncapsulationModes, mEncapsulationMetadataTypes));
- AudioPort::dump(dst, spaces, nullptr, verbose);
+ std::string portStr;
+ AudioPort::dump(&portStr, spaces, nullptr, verbose);
+ if (!portStr.empty()) {
+ if (!mName.empty()) {
+ dst->append(base::StringPrintf("%*s", spaces, ""));
+ }
+ dst->append(portStr);
+ }
}
std::string DeviceDescriptorBase::toString(bool includeSensitiveInfo) const
diff --git a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
index 3137e13..d8cf20e 100644
--- a/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
+++ b/media/libeffects/hapticgenerator/EffectHapticGenerator.cpp
@@ -22,6 +22,7 @@
#include <algorithm>
#include <memory>
+#include <sstream>
#include <string>
#include <utility>
@@ -94,6 +95,33 @@
return defaultValue;
}
+std::string hapticParamToString(const struct HapticGeneratorParam& param) {
+ std::stringstream ss;
+ ss << "\t\tHapticGenerator Parameters:\n";
+ ss << "\t\t- resonant frequency: " << param.resonantFrequency << '\n';
+ ss << "\t\t- bpf Q: " << param.bpfQ << '\n';
+ ss << "\t\t- slow env normalization power: " << param.slowEnvNormalizationPower << '\n';
+ ss << "\t\t- bsf zero Q: " << param.bsfZeroQ << '\n';
+ ss << "\t\t- bsf pole Q: " << param.bsfPoleQ << '\n';
+ ss << "\t\t- distortion corner frequency: " << param.distortionCornerFrequency << '\n';
+ ss << "\t\t- distortion input gain: " << param.distortionInputGain << '\n';
+ ss << "\t\t- distortion cube threshold: " << param.distortionCubeThreshold << '\n';
+ ss << "\t\t- distortion output gain: " << param.distortionOutputGain << '\n';
+ return ss.str();
+}
+
+std::string hapticSettingToString(const struct HapticGeneratorParam& param) {
+ std::stringstream ss;
+ ss << "\t\tHaptic setting:\n";
+ ss << "\t\t- tracks intensity map:\n";
+ for (const auto&[id, intensity] : param.id2Intensity) {
+ ss << "\t\t\t- id=" << id << ", intensity=" << (int) intensity;
+ }
+ ss << "\t\t- max intensity: " << (int) param.maxHapticIntensity << '\n';
+ ss << "\t\t- max haptic amplitude: " << param.maxHapticAmplitude << '\n';
+ return ss.str();
+}
+
int HapticGenerator_Init(struct HapticGeneratorContext *context) {
context->itfe = &gHapticGeneratorInterface;
@@ -129,7 +157,7 @@
context->param.distortionCubeThreshold = 0.1f;
context->param.distortionOutputGain = getFloatProperty(
"vendor.audio.hapticgenerator.distortion.output.gain", DEFAULT_DISTORTION_OUTPUT_GAIN);
- ALOGD("Using distortion output gain as %f", context->param.distortionOutputGain);
+ ALOGD("%s\n%s", __func__, hapticParamToString(context->param).c_str());
context->state = HAPTICGENERATOR_STATE_INITIALIZED;
return 0;
@@ -289,6 +317,7 @@
}
int id = *(int *) value;
os::HapticScale hapticIntensity = static_cast<os::HapticScale>(*((int *) value + 1));
+ ALOGD("Setting haptic intensity as %d", hapticIntensity);
if (hapticIntensity == os::HapticScale::MUTE) {
context->param.id2Intensity.erase(id);
} else {
@@ -313,6 +342,10 @@
context->param.bsfZeroQ = isnan(qFactor) ? DEFAULT_BSF_POLE_Q : qFactor;
context->param.bsfPoleQ = context->param.bsfZeroQ / 2.0f;
context->param.maxHapticAmplitude = maxAmplitude;
+ ALOGD("Updating vibrator info, resonantFrequency=%f, bsfZeroQ=%f, bsfPoleQ=%f, "
+ "maxHapticAmplitude=%f",
+ context->param.resonantFrequency, context->param.bsfZeroQ, context->param.bsfPoleQ,
+ context->param.maxHapticAmplitude);
if (context->processorsRecord.bpf != nullptr) {
context->processorsRecord.bpf->setCoefficients(
@@ -358,6 +391,11 @@
return in;
}
+void HapticGenerator_Dump(int32_t fd, const struct HapticGeneratorParam& param) {
+ dprintf(fd, "%s", hapticParamToString(param).c_str());
+ dprintf(fd, "%s", hapticSettingToString(param).c_str());
+}
+
} // namespace (anonymous)
//-----------------------------------------------------------------------------
@@ -562,6 +600,10 @@
case EFFECT_CMD_SET_AUDIO_MODE:
break;
+ case EFFECT_CMD_DUMP:
+ HapticGenerator_Dump(*(reinterpret_cast<int32_t*>(cmdData)), context->param);
+ break;
+
default:
ALOGW("HapticGenerator_Command invalid command %u", cmdCode);
return -EINVAL;
diff --git a/media/libheif/HeifDecoderImpl.cpp b/media/libheif/HeifDecoderImpl.cpp
index 1b8656d..923f5c1 100644
--- a/media/libheif/HeifDecoderImpl.cpp
+++ b/media/libheif/HeifDecoderImpl.cpp
@@ -735,9 +735,9 @@
HeifFrameInfo* info = &mImageInfo;
if (info != nullptr) {
return mImageInfo.mBitDepth;
+ } else {
+ return 0;
}
-
- return 0;
}
} // namespace android
diff --git a/media/libheif/include/HeifDecoderAPI.h b/media/libheif/include/HeifDecoderAPI.h
index dc12486..56f4765 100644
--- a/media/libheif/include/HeifDecoderAPI.h
+++ b/media/libheif/include/HeifDecoderAPI.h
@@ -47,7 +47,7 @@
int32_t mRotationAngle; // Rotation angle, clockwise, should be multiple of 90
uint32_t mBytesPerPixel; // Number of bytes for one pixel
int64_t mDurationUs; // Duration of the frame in us
- uint32_t mBitDepth; // Number of bits for each of the R/G/B channels
+ uint32_t mBitDepth; // Number of bits of R/G/B channel
std::vector<uint8_t> mIccData; // ICC data array
};
@@ -164,7 +164,7 @@
virtual size_t skipScanlines(size_t count) = 0;
/*
- * Returns color depth in bits for each of the R/G/B channels.
+ * Returns color depth of R/G/B channel.
*/
virtual uint32_t getColorDepth() = 0;
diff --git a/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp b/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp
index 7799f44..a189d04 100644
--- a/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp
+++ b/media/libmediaplayerservice/fuzzer/mediaplayer_fuzzer.cpp
@@ -26,6 +26,8 @@
#include <media/IMediaRecorder.h>
#include <media/IRemoteDisplay.h>
#include <media/IRemoteDisplayClient.h>
+#include <media/MediaHTTPConnection.h>
+#include <media/MediaHTTPService.h>
#include <media/stagefright/RemoteDataSource.h>
#include <media/stagefright/foundation/base64.h>
#include <thread>
@@ -102,6 +104,42 @@
IBinder *onAsBinder() { return nullptr; };
};
+struct TestMediaHTTPConnection : public MediaHTTPConnection {
+ public:
+ TestMediaHTTPConnection() {}
+ virtual ~TestMediaHTTPConnection() {}
+
+ virtual bool connect(const char* /*uri*/, const KeyedVector<String8, String8>* /*headers*/) {
+ return true;
+ }
+
+ virtual void disconnect() { return; }
+
+ virtual ssize_t readAt(off64_t /*offset*/, void* /*data*/, size_t size) { return size; }
+
+ virtual off64_t getSize() { return 0; }
+ virtual status_t getMIMEType(String8* /*mimeType*/) { return NO_ERROR; }
+ virtual status_t getUri(String8* /*uri*/) { return NO_ERROR; }
+
+ private:
+ DISALLOW_EVIL_CONSTRUCTORS(TestMediaHTTPConnection);
+};
+
+struct TestMediaHTTPService : public BnInterface<IMediaHTTPService> {
+ public:
+ TestMediaHTTPService() {}
+ ~TestMediaHTTPService(){};
+
+ virtual sp<MediaHTTPConnection> makeHTTPConnection() {
+ mMediaHTTPConnection = sp<TestMediaHTTPConnection>::make();
+ return mMediaHTTPConnection;
+ }
+
+ private:
+ sp<TestMediaHTTPConnection> mMediaHTTPConnection = nullptr;
+ DISALLOW_EVIL_CONSTRUCTORS(TestMediaHTTPService);
+};
+
class BinderDeathNotifier : public IBinder::DeathRecipient {
public:
void binderDied(const wp<IBinder> &) { abort(); }
@@ -140,7 +178,9 @@
AString out;
encodeBase64(uriSuffix.data(), uriSuffix.size(), &out);
uri += out.c_str();
- status = mMediaPlayer->setDataSource(nullptr /*httpService*/, uri.c_str(), &headers);
+ sp<TestMediaHTTPService> testService = sp<TestMediaHTTPService>::make();
+ status =
+ mMediaPlayer->setDataSource(testService /*httpService*/, uri.c_str(), &headers);
break;
}
case fd: {
diff --git a/media/libnblog/Reader.cpp b/media/libnblog/Reader.cpp
index 67d028d..d6232d4 100644
--- a/media/libnblog/Reader.cpp
+++ b/media/libnblog/Reader.cpp
@@ -208,11 +208,14 @@
}
while (back + Entry::kPreviousLengthOffset >= front) {
const uint8_t *prev = back - back[Entry::kPreviousLengthOffset] - Entry::kOverhead;
- const Event type = (const Event)prev[offsetof(entry, type)];
if (prev < front
- || prev + prev[offsetof(entry, length)] + Entry::kOverhead != back
- || type <= EVENT_RESERVED || type >= EVENT_UPPER_BOUND) {
- // prev points to an out of limits or inconsistent entry
+ || prev + prev[offsetof(entry, length)] + Entry::kOverhead != back) {
+ // prev points to an out of limits entry
+ return nullptr;
+ }
+ const Event type = (const Event)prev[offsetof(entry, type)];
+ if (type <= EVENT_RESERVED || type >= EVENT_UPPER_BOUND) {
+ // prev points to an inconsistent entry
return nullptr;
}
// if invalidTypes does not contain the type, then the type is valid.
diff --git a/media/libstagefright/FrameDecoder.cpp b/media/libstagefright/FrameDecoder.cpp
index 3df8766..6de112a 100644
--- a/media/libstagefright/FrameDecoder.cpp
+++ b/media/libstagefright/FrameDecoder.cpp
@@ -349,6 +349,10 @@
status_t err = OK;
bool done = false;
size_t retriesLeft = kRetryCount;
+ if (!mDecoder) {
+ ALOGE("decoder is not initialized");
+ return NO_INIT;
+ }
do {
size_t index;
int64_t ptsUs = 0LL;
@@ -913,7 +917,7 @@
return ERROR_MALFORMED;
}
- int32_t width, height, stride, srcFormat;
+ int32_t width, height, stride;
if (outputFormat->findInt32("width", &width) == false) {
ALOGE("MediaImageDecoder::onOutputReceived:width is missing in outputFormat");
return ERROR_MALFORMED;
@@ -926,10 +930,9 @@
ALOGE("MediaImageDecoder::onOutputReceived:stride is missing in outputFormat");
return ERROR_MALFORMED;
}
- if (outputFormat->findInt32("color-format", &srcFormat) == false) {
- ALOGE("MediaImageDecoder::onOutputReceived: color format is missing in outputFormat");
- return ERROR_MALFORMED;
- }
+
+ int32_t srcFormat;
+ CHECK(outputFormat->findInt32("color-format", &srcFormat));
uint32_t bitDepth = 8;
if (COLOR_FormatYUVP010 == srcFormat) {
diff --git a/media/libstagefright/HevcUtils.cpp b/media/libstagefright/HevcUtils.cpp
index 5f9c20e..60df162 100644
--- a/media/libstagefright/HevcUtils.cpp
+++ b/media/libstagefright/HevcUtils.cpp
@@ -102,10 +102,11 @@
static bool findParam(uint32_t key, T *param,
KeyedVector<uint32_t, uint64_t> ¶ms) {
CHECK(param);
- if (params.indexOfKey(key) < 0) {
+ ssize_t index = params.indexOfKey(key);
+ if (index < 0) {
return false;
}
- *param = (T) params[key];
+ *param = (T) params[index];
return true;
}
diff --git a/media/libstagefright/MediaClock.cpp b/media/libstagefright/MediaClock.cpp
index 24608a7..ed0819d 100644
--- a/media/libstagefright/MediaClock.cpp
+++ b/media/libstagefright/MediaClock.cpp
@@ -110,8 +110,12 @@
if (mAnchorTimeRealUs != -1) {
int64_t oldNowMediaUs =
mAnchorTimeMediaUs + (nowUs - mAnchorTimeRealUs) * (double)mPlaybackRate;
- if (nowMediaUs < oldNowMediaUs + kAnchorFluctuationAllowedUs
- && nowMediaUs > oldNowMediaUs - kAnchorFluctuationAllowedUs) {
+ // earlier, we ensured that the anchor times are non-negative and the
+ // math to calculate the now/oldNow times stays non-negative.
+ // by casting into uint64_t, we gain headroom to avoid any overflows at the upper end
+ // when adding the fluctuation allowance.
+ if ((uint64_t)nowMediaUs < (uint64_t)oldNowMediaUs + kAnchorFluctuationAllowedUs
+ && (uint64_t)nowMediaUs + kAnchorFluctuationAllowedUs > (uint64_t)oldNowMediaUs) {
return;
}
}
diff --git a/media/libstagefright/Utils.cpp b/media/libstagefright/Utils.cpp
index 4b6470a..a443ed9 100644
--- a/media/libstagefright/Utils.cpp
+++ b/media/libstagefright/Utils.cpp
@@ -126,14 +126,10 @@
}
}
-static bool isHdr(const sp<AMessage> &format) {
- // if CSD specifies HDR transfer(s), we assume HDR. Otherwise, if it specifies non-HDR
- // transfers, we must assume non-HDR. This is because CSD trumps any color-transfer key
- // in the format.
- int32_t isHdr;
- if (format->findInt32("android._is-hdr", &isHdr)) {
- return isHdr;
- }
+/**
+ * Returns true if, and only if, the given format corresponds to HDR10 or HDR10+.
+ */
+static bool isHdr10or10Plus(const sp<AMessage> &format) {
// if user/container supplied HDR static info without transfer set, assume true
if ((format->contains("hdr-static-info") || format->contains("hdr10-plus-info"))
@@ -143,8 +139,7 @@
// otherwise, verify that an HDR transfer function is set
int32_t transfer;
if (format->findInt32("color-transfer", &transfer)) {
- return transfer == ColorUtils::kColorTransferST2084
- || transfer == ColorUtils::kColorTransferHLG;
+ return transfer == ColorUtils::kColorTransferST2084;
}
return false;
}
@@ -419,8 +414,12 @@
}
// bump to HDR profile
- if (isHdr(format) && codecProfile == HEVCProfileMain10) {
- codecProfile = HEVCProfileMain10HDR10;
+ if (isHdr10or10Plus(format) && codecProfile == HEVCProfileMain10) {
+ if (format->contains("hdr10-plus-info")) {
+ codecProfile = HEVCProfileMain10HDR10Plus;
+ } else {
+ codecProfile = HEVCProfileMain10HDR10;
+ }
}
format->setInt32("profile", codecProfile);
@@ -615,16 +614,25 @@
{ 3, VP9Profile3 },
};
- const static ALookup<int32_t, int32_t> toHdr {
+ const static ALookup<int32_t, int32_t> toHdr10 {
{ VP9Profile2, VP9Profile2HDR },
{ VP9Profile3, VP9Profile3HDR },
};
+ const static ALookup<int32_t, int32_t> toHdr10Plus {
+ { VP9Profile2, VP9Profile2HDR10Plus },
+ { VP9Profile3, VP9Profile3HDR10Plus },
+ };
+
int32_t profile;
if (profiles.map(data[0], &profile)) {
// convert to HDR profile
- if (isHdr(format)) {
- toHdr.lookup(profile, &profile);
+ if (isHdr10or10Plus(format)) {
+ if (format->contains("hdr10-plus-info")) {
+ toHdr10Plus.lookup(profile, &profile);
+ } else {
+ toHdr10.lookup(profile, &profile);
+ }
}
format->setInt32("profile", profile);
@@ -684,7 +692,7 @@
int32_t profile;
if (profiles.map(std::make_pair(highBitDepth, profileData), &profile)) {
// bump to HDR profile
- if (isHdr(format) && profile == AV1ProfileMain10) {
+ if (isHdr10or10Plus(format) && profile == AV1ProfileMain10) {
if (format->contains("hdr10-plus-info")) {
profile = AV1ProfileMain10HDR10Plus;
} else {
diff --git a/media/libstagefright/bqhelper/Android.bp b/media/libstagefright/bqhelper/Android.bp
index 0e2b472..a4f0fb3 100644
--- a/media/libstagefright/bqhelper/Android.bp
+++ b/media/libstagefright/bqhelper/Android.bp
@@ -12,6 +12,7 @@
double_loadable: true,
srcs: [
+ ":libgui_frame_event_aidl",
"FrameDropper.cpp",
"GraphicBufferSource.cpp",
],
diff --git a/media/libstagefright/data/media_codecs_google_c2_video.xml b/media/libstagefright/data/media_codecs_google_c2_video.xml
index 3509ef8..03d8b78 100644
--- a/media/libstagefright/data/media_codecs_google_c2_video.xml
+++ b/media/libstagefright/data/media_codecs_google_c2_video.xml
@@ -60,7 +60,7 @@
<MediaCodec name="c2.android.vp8.decoder" type="video/x-vnd.on2.vp8">
<Alias name="OMX.google.vp8.decoder" />
<Limit name="size" min="2x2" max="2048x2048" />
- <Limit name="alignment" value="2x2" />
+ <Limit name="alignment" value="1x1" />
<Limit name="block-size" value="16x16" />
<Limit name="block-count" range="1-16384" />
<Limit name="blocks-per-second" range="1-1000000" />
@@ -70,7 +70,7 @@
<MediaCodec name="c2.android.vp9.decoder" type="video/x-vnd.on2.vp9">
<Alias name="OMX.google.vp9.decoder" />
<Limit name="size" min="2x2" max="2048x2048" />
- <Limit name="alignment" value="2x2" />
+ <Limit name="alignment" value="1x1" />
<Limit name="block-size" value="16x16" />
<Limit name="block-count" range="1-16384" />
<Limit name="blocks-per-second" range="1-500000" />
diff --git a/media/libstagefright/data/media_codecs_sw.xml b/media/libstagefright/data/media_codecs_sw.xml
index d7e2d18..fb4f9a0 100644
--- a/media/libstagefright/data/media_codecs_sw.xml
+++ b/media/libstagefright/data/media_codecs_sw.xml
@@ -150,7 +150,7 @@
<MediaCodec name="c2.android.vp8.decoder" type="video/x-vnd.on2.vp8" variant="slow-cpu,!slow-cpu">
<Alias name="OMX.google.vp8.decoder" />
<Limit name="size" min="2x2" max="2048x2048" />
- <Limit name="alignment" value="2x2" />
+ <Limit name="alignment" value="1x1" />
<Limit name="block-size" value="16x16" />
<Variant name="!slow-cpu">
<Limit name="block-count" range="1-16384" />
@@ -166,7 +166,7 @@
</MediaCodec>
<MediaCodec name="c2.android.vp9.decoder" type="video/x-vnd.on2.vp9" variant="slow-cpu,!slow-cpu">
<Alias name="OMX.google.vp9.decoder" />
- <Limit name="alignment" value="2x2" />
+ <Limit name="alignment" value="1x1" />
<Limit name="block-size" value="16x16" />
<Variant name="!slow-cpu">
<Limit name="size" min="2x2" max="2048x2048" />
diff --git a/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp b/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp
index d94c8ff..9f46a74 100644
--- a/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp
+++ b/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.cpp
@@ -66,8 +66,8 @@
for (size_t i = 0; i < extractor->countTracks(); ++i) {
sp<MetaData> meta = extractor->getTrackMetaData(i);
- const char *trackMime;
- if (!strcasecmp(mime.c_str(), trackMime)) {
+ std::string trackMime = dataProvider->PickValueInArray(kTestedMimeTypes);
+ if (!strcasecmp(mime.c_str(), trackMime.c_str())) {
sp<IMediaSource> track = extractor->getTrack(i);
if (track == NULL) {
return NULL;
diff --git a/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.h b/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.h
index 98bfb94..6856ac0 100644
--- a/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.h
+++ b/media/libstagefright/tests/fuzzers/FuzzerMediaUtility.h
@@ -42,6 +42,51 @@
kMaxValue = MPEG2TS,
};
+static std::string kTestedMimeTypes[] = {"audio/3gpp",
+ "audio/amr-wb",
+ "audio/vorbis",
+ "audio/opus",
+ "audio/mp4a-latm",
+ "audio/mpeg",
+ "audio/mpeg-L1",
+ "audio/mpeg-L2",
+ "audio/midi",
+ "audio/qcelp",
+ "audio/g711-alaw",
+ "audio/g711-mlaw",
+ "audio/flac",
+ "audio/aac-adts",
+ "audio/gsm",
+ "audio/ac3",
+ "audio/eac3",
+ "audio/eac3-joc",
+ "audio/ac4",
+ "audio/scrambled",
+ "audio/alac",
+ "audio/x-ms-wma",
+ "audio/x-adpcm-ms",
+ "audio/x-adpcm-dvi-ima",
+ "video/avc",
+ "video/hevc",
+ "video/mp4v-es",
+ "video/3gpp",
+ "video/x-vnd.on2.vp8",
+ "video/x-vnd.on2.vp9",
+ "video/av01",
+ "video/mpeg2",
+ "video/dolby-vision",
+ "video/scrambled",
+ "video/divx",
+ "video/divx3",
+ "video/xvid",
+ "video/x-motion-jpeg",
+ "text/3gpp-tt",
+ "application/x-subrip",
+ "text/vtt",
+ "text/cea-608",
+ "text/cea-708",
+ "application/x-id3v4"};
+
std::string genMimeType(FuzzedDataProvider *dataProvider);
sp<IMediaExtractor> genMediaExtractor(FuzzedDataProvider *dataProvider, uint16_t dataAmount);
sp<MediaSource> genMediaSource(FuzzedDataProvider *dataProvider, uint16_t maxMediaBlobSize);
diff --git a/media/ndk/NdkImage.cpp b/media/ndk/NdkImage.cpp
index 12a0d53..0bdb41b 100644
--- a/media/ndk/NdkImage.cpp
+++ b/media/ndk/NdkImage.cpp
@@ -24,6 +24,7 @@
#include <android_media_Utils.h>
#include <private/android/AHardwareBufferHelpers.h>
+#include <ui/PublicFormat.h>
#include <utils/Log.h>
using namespace android;
@@ -34,6 +35,8 @@
int64_t timestamp, int32_t width, int32_t height, int32_t numPlanes) :
mReader(reader), mFormat(format), mUsage(usage), mBuffer(buffer), mLockedBuffer(nullptr),
mTimestamp(timestamp), mWidth(width), mHeight(height), mNumPlanes(numPlanes) {
+ PublicFormat publicFormat = static_cast<PublicFormat>(format);
+ mHalDataSpace = mapPublicFormatToHalDataspace(publicFormat);
LOG_FATAL_IF(reader == nullptr, "AImageReader shouldn't be null while creating AImage");
}
@@ -156,6 +159,20 @@
return AMEDIA_OK;
}
+media_status_t
+AImage::getDataSpace(android_dataspace* dataSpace) const {
+ if (dataSpace == nullptr) {
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+ *dataSpace = static_cast<android_dataspace>(-1);
+ if (isClosed()) {
+ ALOGE("%s: image %p has been closed!", __FUNCTION__, this);
+ return AMEDIA_ERROR_INVALID_OBJECT;
+ }
+ *dataSpace = mHalDataSpace;
+ return AMEDIA_OK;
+}
+
media_status_t AImage::lockImage() {
if (mBuffer == nullptr || mBuffer->mGraphicBuffer == nullptr) {
LOG_ALWAYS_FATAL("%s: AImage %p has no buffer.", __FUNCTION__, this);
@@ -762,3 +779,15 @@
}
return image->getHardwareBuffer(buffer);
}
+
+EXPORT
+media_status_t AImage_getDataSpace(
+ const AImage* image, /*out*/int32_t* dataSpace) {
+ ALOGV("%s", __FUNCTION__);
+
+ if (image == nullptr || dataSpace == nullptr) {
+ ALOGE("%s: bad argument. image %p dataSpace %p", __FUNCTION__, image, dataSpace);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+ return image->getDataSpace((android_dataspace*)(dataSpace));
+}
\ No newline at end of file
diff --git a/media/ndk/NdkImagePriv.h b/media/ndk/NdkImagePriv.h
index 05115b9..dc10a6a 100644
--- a/media/ndk/NdkImagePriv.h
+++ b/media/ndk/NdkImagePriv.h
@@ -82,6 +82,7 @@
media_status_t getPlaneRowStride(int planeIdx, /*out*/int32_t* rowStride) const;
media_status_t getPlaneData(int planeIdx,/*out*/uint8_t** data, /*out*/int* dataLength) const;
media_status_t getHardwareBuffer(/*out*/AHardwareBuffer** buffer) const;
+ media_status_t getDataSpace(/*out*/android_dataspace* dataSpace) const;
private:
// AImage should be deleted through free() API.
@@ -101,6 +102,7 @@
const int32_t mWidth;
const int32_t mHeight;
const int32_t mNumPlanes;
+ android_dataspace mHalDataSpace = HAL_DATASPACE_UNKNOWN;
bool mIsClosed = false;
mutable Mutex mLock;
};
diff --git a/media/ndk/NdkImageReader.cpp b/media/ndk/NdkImageReader.cpp
index 1067e24..ac5cba8 100644
--- a/media/ndk/NdkImageReader.cpp
+++ b/media/ndk/NdkImageReader.cpp
@@ -46,6 +46,9 @@
static constexpr int kWindowHalTokenSizeMax = 256;
+static media_status_t validateParameters(int32_t width, int32_t height, int32_t format,
+ uint64_t usage, int32_t maxImages,
+ /*out*/ AImageReader**& reader);
static native_handle_t *convertHalTokenToNativeHandle(const HalToken &halToken);
bool
@@ -263,13 +266,17 @@
int32_t height,
int32_t format,
uint64_t usage,
- int32_t maxImages)
+ int32_t maxImages,
+ uint32_t hardwareBufferFormat,
+ android_dataspace dataSpace)
: mWidth(width),
mHeight(height),
mFormat(format),
mUsage(usage),
mMaxImages(maxImages),
mNumPlanes(getNumPlanesForFormat(format)),
+ mHalFormat(hardwareBufferFormat),
+ mHalDataSpace(dataSpace),
mFrameListener(new FrameListener(this)),
mBufferRemovedListener(new BufferRemovedListener(this)) {}
@@ -280,9 +287,6 @@
media_status_t
AImageReader::init() {
- PublicFormat publicFormat = static_cast<PublicFormat>(mFormat);
- mHalFormat = mapPublicFormatToHalFormat(publicFormat);
- mHalDataSpace = mapPublicFormatToHalDataspace(publicFormat);
mHalUsage = AHardwareBuffer_convertToGrallocUsageBits(mUsage);
sp<IGraphicBufferProducer> gbProducer;
@@ -646,6 +650,41 @@
}
}
+static
+media_status_t validateParameters(int32_t width, int32_t height, int32_t format,
+ uint64_t usage, int32_t maxImages,
+ /*out*/ AImageReader**& reader) {
+ if (reader == nullptr) {
+ ALOGE("%s: reader argument is null", __FUNCTION__);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (width < 1 || height < 1) {
+ ALOGE("%s: image dimension must be positive: w:%d h:%d",
+ __FUNCTION__, width, height);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (maxImages < 1) {
+ ALOGE("%s: max outstanding image count must be at least 1 (%d)",
+ __FUNCTION__, maxImages);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (maxImages > BufferQueueDefs::NUM_BUFFER_SLOTS) {
+ ALOGE("%s: max outstanding image count (%d) cannot be larget than %d.",
+ __FUNCTION__, maxImages, BufferQueueDefs::NUM_BUFFER_SLOTS);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+
+ if (!AImageReader::isSupportedFormatAndUsage(format, usage)) {
+ ALOGE("%s: format %d is not supported with usage 0x%" PRIx64 " by AImageReader",
+ __FUNCTION__, format, usage);
+ return AMEDIA_ERROR_INVALID_PARAMETER;
+ }
+ return AMEDIA_OK;
+}
+
static native_handle_t *convertHalTokenToNativeHandle(
const HalToken &halToken) {
// We attempt to store halToken in the ints of the native_handle_t after its
@@ -696,42 +735,32 @@
} //extern "C"
EXPORT
+media_status_t AImageReader_newWithDataSpace(
+ int32_t width, int32_t height, uint64_t usage, int32_t maxImages,
+ uint32_t hardwareBufferFormat, int32_t dataSpace,
+ /*out*/ AImageReader** reader) {
+ ALOGV("%s", __FUNCTION__);
+
+ android_dataspace halDataSpace = static_cast<android_dataspace>(dataSpace);
+ int32_t format = static_cast<int32_t>(
+ mapHalFormatDataspaceToPublicFormat(hardwareBufferFormat, halDataSpace));
+ return AImageReader_newWithUsage(width, height, format, usage, maxImages, reader);
+}
+
+EXPORT
media_status_t AImageReader_newWithUsage(
int32_t width, int32_t height, int32_t format, uint64_t usage,
int32_t maxImages, /*out*/ AImageReader** reader) {
ALOGV("%s", __FUNCTION__);
- if (width < 1 || height < 1) {
- ALOGE("%s: image dimension must be positive: w:%d h:%d",
- __FUNCTION__, width, height);
- return AMEDIA_ERROR_INVALID_PARAMETER;
- }
+ validateParameters(width, height, format, usage, maxImages, reader);
- if (maxImages < 1) {
- ALOGE("%s: max outstanding image count must be at least 1 (%d)",
- __FUNCTION__, maxImages);
- return AMEDIA_ERROR_INVALID_PARAMETER;
- }
-
- if (maxImages > BufferQueueDefs::NUM_BUFFER_SLOTS) {
- ALOGE("%s: max outstanding image count (%d) cannot be larget than %d.",
- __FUNCTION__, maxImages, BufferQueueDefs::NUM_BUFFER_SLOTS);
- return AMEDIA_ERROR_INVALID_PARAMETER;
- }
-
- if (!AImageReader::isSupportedFormatAndUsage(format, usage)) {
- ALOGE("%s: format %d is not supported with usage 0x%" PRIx64 " by AImageReader",
- __FUNCTION__, format, usage);
- return AMEDIA_ERROR_INVALID_PARAMETER;
- }
-
- if (reader == nullptr) {
- ALOGE("%s: reader argument is null", __FUNCTION__);
- return AMEDIA_ERROR_INVALID_PARAMETER;
- }
+ PublicFormat publicFormat = static_cast<PublicFormat>(format);
+ uint32_t halFormat = mapPublicFormatToHalFormat(publicFormat);
+ android_dataspace halDataSpace = mapPublicFormatToHalDataspace(publicFormat);
AImageReader* tmpReader = new AImageReader(
- width, height, format, usage, maxImages);
+ width, height, format, usage, maxImages, halFormat, halDataSpace);
if (tmpReader == nullptr) {
ALOGE("%s: AImageReader allocation failed", __FUNCTION__);
return AMEDIA_ERROR_UNKNOWN;
diff --git a/media/ndk/NdkImageReaderPriv.h b/media/ndk/NdkImageReaderPriv.h
index 37c606e..0199616 100644
--- a/media/ndk/NdkImageReaderPriv.h
+++ b/media/ndk/NdkImageReaderPriv.h
@@ -56,10 +56,12 @@
int32_t height,
int32_t format,
uint64_t usage,
- int32_t maxImages);
+ int32_t maxImages,
+ uint32_t hardwareBufferFormat,
+ android_dataspace dataSpace);
~AImageReader();
- // Inintialize AImageReader, uninitialized or failed to initialize AImageReader
+ // Initialize AImageReader, uninitialized or failed to initialize AImageReader
// should never be passed to application
media_status_t init();
@@ -79,7 +81,6 @@
void close();
private:
-
friend struct AImage; // for grabing reader lock
BufferItem* getBufferItemLocked();
@@ -118,13 +119,16 @@
const int32_t mWidth;
const int32_t mHeight;
- const int32_t mFormat;
+ int32_t mFormat;
const uint64_t mUsage; // AHARDWAREBUFFER_USAGE_* flags.
const int32_t mMaxImages;
// TODO(jwcai) Seems completely unused in AImageReader class.
const int32_t mNumPlanes;
+ uint32_t mHalFormat = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM;
+ android_dataspace mHalDataSpace = HAL_DATASPACE_UNKNOWN;
+
struct FrameListener : public ConsumerBase::FrameAvailableListener {
public:
explicit FrameListener(AImageReader* parent) : mReader(parent) {}
@@ -155,8 +159,6 @@
};
sp<BufferRemovedListener> mBufferRemovedListener;
- int mHalFormat;
- android_dataspace mHalDataSpace;
uint64_t mHalUsage;
sp<IGraphicBufferProducer> mProducer;
diff --git a/media/ndk/include/media/NdkImage.h b/media/ndk/include/media/NdkImage.h
index 71bc6d9..814a327 100644
--- a/media/ndk/include/media/NdkImage.h
+++ b/media/ndk/include/media/NdkImage.h
@@ -583,7 +583,7 @@
* Available since API level 24.
*
* @param image the {@link AImage} of interest.
- * @param width the width of the image will be filled here if the method call succeeeds.
+ * @param width the width of the image will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -599,7 +599,7 @@
* Available since API level 24.
*
* @param image the {@link AImage} of interest.
- * @param height the height of the image will be filled here if the method call succeeeds.
+ * @param height the height of the image will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -617,7 +617,7 @@
* Available since API level 24.
*
* @param image the {@link AImage} of interest.
- * @param format the format of the image will be filled here if the method call succeeeds.
+ * @param format the format of the image will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -636,7 +636,7 @@
* Available since API level 24.
*
* @param image the {@link AImage} of interest.
- * @param rect the cropped rectangle of the image will be filled here if the method call succeeeds.
+ * @param rect the cropped rectangle of the image will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -662,7 +662,7 @@
* Available since API level 24.
*
* @param image the {@link AImage} of interest.
- * @param timestampNs the timestamp of the image will be filled here if the method call succeeeds.
+ * @param timestampNs the timestamp of the image will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -682,7 +682,7 @@
*
* @param image the {@link AImage} of interest.
* @param numPlanes the number of planes of the image will be filled here if the method call
- * succeeeds.
+ * succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -706,7 +706,7 @@
*
* @param image the {@link AImage} of interest.
* @param planeIdx the index of the plane. Must be less than the number of planes of input image.
- * @param pixelStride the pixel stride of the image will be filled here if the method call succeeeds.
+ * @param pixelStride the pixel stride of the image will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -735,7 +735,7 @@
*
* @param image the {@link AImage} of interest.
* @param planeIdx the index of the plane. Must be less than the number of planes of input image.
- * @param rowStride the row stride of the image will be filled here if the method call succeeeds.
+ * @param rowStride the row stride of the image will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -762,8 +762,8 @@
*
* @param image the {@link AImage} of interest.
* @param planeIdx the index of the plane. Must be less than the number of planes of input image.
- * @param data the data pointer of the image will be filled here if the method call succeeeds.
- * @param dataLength the valid length of data will be filled here if the method call succeeeds.
+ * @param data the data pointer of the image will be filled here if the method call succeeds.
+ * @param dataLength the valid length of data will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -826,6 +826,25 @@
*/
media_status_t AImage_getHardwareBuffer(const AImage* image, /*out*/AHardwareBuffer** buffer) __INTRODUCED_IN(26);
+/**
+ * Query the dataspace of the input {@link AImage}.
+ *
+ * Available since API level 33.
+ *
+ * @param image the {@link AImage} of interest.
+ * @param dataSpace the dataspace of the image will be filled here if the method call succeeds.
+ * This must be one of the ADATASPACE_* enum value defined in
+ * {@link ADataSpace}.
+ *
+ * @return <ul>
+ * <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ * <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if image or dataSpace is NULL.</li>
+ * <li>{@link AMEDIA_ERROR_INVALID_OBJECT} if the {@link AImageReader} generated this
+ * image has been deleted.</li></ul>
+ */
+media_status_t AImage_getDataSpace(const AImage* image,
+ /*out*/int32_t* dataSpace) __INTRODUCED_IN(33);
+
__END_DECLS
#endif //_NDK_IMAGE_H
diff --git a/media/ndk/include/media/NdkImageReader.h b/media/ndk/include/media/NdkImageReader.h
index 4bd7f2a..992955b 100644
--- a/media/ndk/include/media/NdkImageReader.h
+++ b/media/ndk/include/media/NdkImageReader.h
@@ -79,7 +79,7 @@
* by the user, one of them has to be released before a new {@link AImage} will become
* available for access through {@link AImageReader_acquireLatestImage} or
* {@link AImageReader_acquireNextImage}. Must be greater than 0.
- * @param reader The created image reader will be filled here if the method call succeeeds.
+ * @param reader The created image reader will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -133,7 +133,7 @@
* Available since API level 24.
*
* @param reader The image reader of interest.
- * @param width the default width of the reader will be filled here if the method call succeeeds.
+ * @param width the default width of the reader will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -151,7 +151,7 @@
* Available since API level 24.
*
* @param reader The image reader of interest.
- * @param height the default height of the reader will be filled here if the method call succeeeds.
+ * @param height the default height of the reader will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -165,7 +165,7 @@
* Available since API level 24.
*
* @param reader The image reader of interest.
- * @param format the fromat of the reader will be filled here if the method call succeeeds. The
+ * @param format the format of the reader will be filled here if the method call succeeds. The
* value will be one of the AIMAGE_FORMAT_* enum value defiend in {@link NdkImage.h}.
*
* @return <ul>
@@ -181,7 +181,7 @@
*
* @param reader The image reader of interest.
* @param maxImages the maximum number of concurrently acquired images of the reader will be filled
- * here if the method call succeeeds.
+ * here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -212,7 +212,7 @@
* Available since API level 24.
*
* @param reader The image reader of interest.
- * @param image the acquired {@link AImage} will be filled here if the method call succeeeds.
+ * @param image the acquired {@link AImage} will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -257,7 +257,7 @@
* Available since API level 24.
*
* @param reader The image reader of interest.
- * @param image the acquired {@link AImage} will be filled here if the method call succeeeds.
+ * @param image the acquired {@link AImage} will be filled here if the method call succeeds.
*
* @return <ul>
* <li>{@link AMEDIA_OK} if the method call succeeds.</li>
@@ -387,6 +387,44 @@
/*out*/ AImageReader** reader) __INTRODUCED_IN(26);
/**
+ * AImageReader constructor similar to {@link AImageReader_newWithUsage} that takes
+ * two additional parameters to build the format of the Image. All other parameters
+ * and the return values are identical to those passed to {@link AImageReader_newWithUsage}.
+ *
+ * <p>Instead of passing {@code format} parameter, this constructor accepts
+ * the combination of {@code hardwareBufferFormat} and {@code dataSpace} for the
+ * format of the Image that the reader will produce.</p>
+ *
+ * Available since API level 33.
+ *
+ * @param width The default width in pixels of the Images that this reader will produce.
+ * @param height The default height in pixels of the Images that this reader will produce.
+ * @param usage specifies how the consumer will access the AImage.
+ * See {@link AImageReader_newWithUsage} parameter description for more details.
+ * @param maxImages The maximum number of images the user will want to access simultaneously.
+ * See {@link AImageReader_newWithUsage} parameter description for more details.
+ * @param hardwareBufferFormat The hardware buffer format passed by the producer.
+ * This must be one of the AHARDWAREBUFFER_FORMAT_* enum values defined
+ * in {@link hardware_buffer.h}.
+ * @param dataSpace The dataspace of the Image passed by the producer.
+ * This must be one of the ADATASPACE_* enum values defined in
+ * {@link ADataSpace}.
+ * @param reader The created image reader will be filled here if the method call succeeds.
+ *
+ * @return <ul>
+ * <li>{@link AMEDIA_OK} if the method call succeeds.</li>
+ * <li>{@link AMEDIA_ERROR_INVALID_PARAMETER} if reader is NULL, or one or more of width,
+ * height, maxImages, hardwareBufferFormat or dataSpace arguments
+ * is not supported.</li>
+ * <li>{@link AMEDIA_ERROR_UNKNOWN} if the method fails for some other reasons.</li></ul>
+ *
+ * @see AImageReader_newWithUsage
+ */
+media_status_t AImageReader_newWithDataSpace(int32_t width, int32_t height, uint64_t usage,
+ int32_t maxImages, uint32_t hardwareBufferFormat, int32_t dataSpace,
+ /*out*/ AImageReader** reader) __INTRODUCED_IN(33);
+
+/**
* Acquire the next {@link AImage} from the image reader's queue asynchronously.
*
* <p>AImageReader acquire method similar to {@link AImageReader_acquireNextImage} that takes an
diff --git a/media/ndk/libmediandk.map.txt b/media/ndk/libmediandk.map.txt
index c8faced..bac4b22 100644
--- a/media/ndk/libmediandk.map.txt
+++ b/media/ndk/libmediandk.map.txt
@@ -13,11 +13,13 @@
AImageReader_getWindow; # introduced=24
AImageReader_new; # introduced=24
AImageReader_newWithUsage; # introduced=26
+ AImageReader_newWithDataSpace; # introduced=Tiramisu
AImageReader_setBufferRemovedListener; # introduced=26
AImageReader_setImageListener; # introduced=24
AImage_delete; # introduced=24
AImage_deleteAsync; # introduced=26
AImage_getCropRect; # introduced=24
+ AImage_getDataSpace; # introduced=Tiramisu
AImage_getFormat; # introduced=24
AImage_getHardwareBuffer; # introduced=26
AImage_getHeight; # introduced=24
diff --git a/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp b/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
index 130feee..32fc3be 100644
--- a/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
+++ b/media/utils/fuzzers/SchedulingPolicyServiceFuzz.cpp
@@ -34,11 +34,16 @@
const sp<IServiceManager> sm(defaultServiceManager());
if (sm != nullptr) {
const String16 name("batterystats");
- batteryStatService = checked_interface_cast<IBatteryStats>(sm->checkService(name));
- if (batteryStatService == nullptr) {
+ sp<IBinder> obj = sm->checkService(name);
+ if (!obj) {
ALOGW("batterystats service unavailable!");
return nullptr;
}
+ batteryStatService = checked_interface_cast<IBatteryStats>(obj);
+ if (batteryStatService == nullptr) {
+ ALOGW("batterystats service interface is invalid");
+ return nullptr;
+ }
}
return batteryStatService;
}
diff --git a/media/utils/include/mediautils/ExtendedAccumulator.h b/media/utils/include/mediautils/ExtendedAccumulator.h
new file mode 100644
index 0000000..7e3e170
--- /dev/null
+++ b/media/utils/include/mediautils/ExtendedAccumulator.h
@@ -0,0 +1,90 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#pragma once
+
+#include <atomic>
+#include <cstdint>
+#include <tuple>
+#include <type_traits>
+
+#include <log/log.h>
+
+namespace android::mediautils {
+
+// The goal of this class is to detect and accumulate wraparound occurrences on a
+// lower sized integer.
+
+// This class assumes that the underlying unsigned type is either incremented or
+// decremented by at most the underlying signed type between any two subsequent
+// polls (or construction). This is well-defined as the modular nature of
+// unsigned arithmetic ensures that every new value maps 1-1 to an
+// increment/decrement over the same sized signed type. It also ensures that our
+// counter will be equivalent mod the size of the integer even if the underlying
+// type is modified outside of this range.
+//
+// For convenience, this class is thread compatible. Additionally, it is safe
+// as long as there is only one writer.
+template <typename Integral = uint32_t, typename AccumulatingType = uint64_t>
+class ExtendedAccumulator {
+ static_assert(sizeof(Integral) < sizeof(AccumulatingType),
+ "Accumulating type should be larger than underlying type");
+ static_assert(std::is_integral_v<Integral> && std::is_unsigned_v<Integral>,
+ "Wraparound behavior is only well-defiend for unsigned ints");
+ static_assert(std::is_integral_v<AccumulatingType>);
+
+ public:
+ enum class Wrap {
+ NORMAL = 0,
+ UNDERFLOW = 1,
+ OVERFLOW = 2,
+ };
+
+ using UnsignedInt = Integral;
+ using SignedInt = std::make_signed_t<UnsignedInt>;
+
+ explicit ExtendedAccumulator(AccumulatingType initial = 0) : mAccumulated(initial) {}
+
+ // Returns a pair of the calculated change on the accumulating value, and a
+ // Wrap type representing the type of wraparound (if any) which occurred.
+ std::pair<SignedInt, Wrap> poll(UnsignedInt value) {
+ auto acc = mAccumulated.load(std::memory_order_relaxed);
+ const auto bottom_bits = static_cast<UnsignedInt>(acc);
+ std::pair<SignedInt, Wrap> res = {0, Wrap::NORMAL};
+ const bool overflow = __builtin_sub_overflow(value, bottom_bits, &res.first);
+
+ if (overflow) {
+ res.second = (res.first > 0) ? Wrap::OVERFLOW : Wrap::UNDERFLOW;
+ }
+
+ const bool acc_overflow = __builtin_add_overflow(acc, res.first, &acc);
+ // If our *accumulating* type overflows or underflows (depending on its
+ // signedness), we should abort.
+ if (acc_overflow) LOG_ALWAYS_FATAL("Unexpected overflow/underflow in %s", __func__);
+
+ mAccumulated.store(acc, std::memory_order_relaxed);
+ return res;
+ }
+
+ AccumulatingType getValue() const { return mAccumulated.load(std::memory_order_relaxed); }
+
+ private:
+ // Invariant - the bottom underlying bits of accumulated are the same as the
+ // last value provided to poll.
+ std::atomic<AccumulatingType> mAccumulated;
+};
+
+} // namespace android::mediautils
diff --git a/media/utils/tests/Android.bp b/media/utils/tests/Android.bp
index 1024018..82dd0dc 100644
--- a/media/utils/tests/Android.bp
+++ b/media/utils/tests/Android.bp
@@ -15,11 +15,11 @@
"-Wextra",
],
- sanitize:{
- address: true,
- cfi: true,
- integer_overflow: true,
- memtag_heap: true,
+ sanitize: {
+ address: true,
+ cfi: true,
+ integer_overflow: true,
+ memtag_heap: true,
},
shared_libs: [
@@ -28,7 +28,7 @@
srcs: [
"sharedtest.cpp",
- ]
+ ],
}
cc_test {
@@ -40,11 +40,11 @@
"-Wextra",
],
- sanitize:{
- address: true,
- cfi: true,
- integer_overflow: true,
- memtag_heap: true,
+ sanitize: {
+ address: true,
+ cfi: true,
+ integer_overflow: true,
+ memtag_heap: true,
},
shared_libs: [
@@ -174,11 +174,11 @@
"-Wextra",
],
- sanitize:{
- address: true,
- cfi: true,
- integer_overflow: true,
- memtag_heap: true,
+ sanitize: {
+ address: true,
+ cfi: true,
+ integer_overflow: true,
+ memtag_heap: true,
},
shared_libs: [
@@ -191,3 +191,30 @@
"timecheck_tests.cpp",
],
}
+
+cc_test {
+ name: "extended_accumulator_tests",
+ cflags: [
+ "-Wall",
+ "-Werror",
+ "-Wextra",
+ ],
+
+ sanitize: {
+ address: true,
+ cfi: true,
+ integer_overflow: true,
+ memtag_heap: true,
+ },
+
+ shared_libs: [
+ "libbase",
+ "liblog",
+ "libmediautils",
+ "libutils",
+ ],
+
+ srcs: [
+ "extended_accumulator_tests.cpp",
+ ],
+}
diff --git a/media/utils/tests/extended_accumulator_tests.cpp b/media/utils/tests/extended_accumulator_tests.cpp
new file mode 100644
index 0000000..e243e7e
--- /dev/null
+++ b/media/utils/tests/extended_accumulator_tests.cpp
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2022 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define LOG_TAG "extended_accumulator_tests"
+
+#include <mediautils/ExtendedAccumulator.h>
+
+#include <type_traits>
+#include <cstdint>
+#include <limits.h>
+
+#include <gtest/gtest.h>
+#include <log/log.h>
+
+using namespace android;
+using namespace android::mediautils;
+
+// Conditionally choose a base accumulating counter value in order to prevent
+// unsigned underflow on the accumulator from aborting the tests.
+template <typename TType, typename CType>
+static constexpr CType getBase() {
+ static_assert(sizeof(TType) < sizeof(CType));
+ if constexpr (std::is_unsigned_v<CType>) {
+ return std::numeric_limits<TType>::max() + 1;
+ } else {
+ return 0;
+ }
+}
+
+// Since the entire state of this utility is the previous value, and the
+// behavior is isomorphic mod the underlying type on the previous value, we can
+// test combinations of the previous value of the underlying type and a
+// hypothetical signed update to that type and ensure the accumulator moves
+// correctly and reports overflow correctly.
+template <typename TestUInt, typename CType>
+void testPair(TestUInt prevVal, std::make_signed_t<TestUInt> delta) {
+ using TestDetect = ExtendedAccumulator<TestUInt, CType>;
+ using TestInt = typename TestDetect::SignedInt;
+ static_assert(std::is_same_v<typename TestDetect::UnsignedInt, TestUInt>);
+ static_assert(std::is_same_v<TestInt, std::make_signed_t<TestUInt>>);
+ static_assert(sizeof(TestUInt) < sizeof(CType));
+
+ // To safely detect underflow/overflow for testing
+ // Should be 0 mod TestUInt, max + 1 is convenient
+ static constexpr CType base = getBase<TestUInt, CType>();
+ const CType prev = base + prevVal;
+ TestDetect test{prev};
+ EXPECT_EQ(test.getValue(), prev);
+ // Prevent unsigned wraparound abort
+ CType next;
+ const auto err = __builtin_add_overflow(prev, delta, &next);
+ LOG_ALWAYS_FATAL_IF(err, "Unexpected wrap in tests");
+ const auto [result, status] = test.poll(static_cast<TestUInt>(next));
+ EXPECT_EQ(test.getValue(), next);
+ EXPECT_EQ(result, delta);
+
+ // Test overflow/underflow event reporting.
+ if (next < base) EXPECT_EQ(TestDetect::Wrap::UNDERFLOW, status);
+ else if (next > base + std::numeric_limits<TestUInt>::max())
+ EXPECT_EQ(TestDetect::Wrap::OVERFLOW, status);
+ else EXPECT_EQ(TestDetect::Wrap::NORMAL, status);
+}
+
+// Test this utility on every combination of prior and update value for the
+// type uint8_t, with an unsigned containing type.
+TEST(wraparound_tests, cover_u8_u64) {
+ using TType = uint8_t;
+ using CType = uint64_t;
+ static constexpr CType max = std::numeric_limits<TType>::max();
+ for (CType i = 0; i <= max; i++) {
+ for (CType j = 0; j <= max; j++) {
+ testPair<TType, CType>(i, static_cast<int64_t>(j));
+ }
+ }
+}
+
+// Test this utility on every combination of prior and update value for the
+// type uint8_t, with a signed containing type.
+TEST(wraparound_tests, cover_u8_s64) {
+ using TType = uint8_t;
+ using CType = int64_t;
+ static constexpr CType max = std::numeric_limits<TType>::max();
+ for (CType i = 0; i <= max; i++) {
+ for (CType j = 0; j <= max; j++) {
+ testPair<TType, CType>(i, static_cast<int64_t>(j));
+ }
+ }
+}
diff --git a/services/audioflinger/AudioFlinger.cpp b/services/audioflinger/AudioFlinger.cpp
index 894b31c..5661206 100644
--- a/services/audioflinger/AudioFlinger.cpp
+++ b/services/audioflinger/AudioFlinger.cpp
@@ -654,6 +654,7 @@
void AudioFlinger::onExternalVibrationStop(const sp<os::ExternalVibration>& externalVibration) {
sp<os::IExternalVibratorService> evs = getExternalVibratorService();
if (evs != 0) {
+ ALOGD("%s, stopping external vibration", __func__);
evs->onExternalVibrationStop(*externalVibration);
}
}
@@ -765,6 +766,11 @@
(uint32_t)(mStandbyTimeInNsecs / 1000000));
result.append(buffer);
write(fd, result.string(), result.size());
+
+ dprintf(fd, "Vibrator infos(size=%zu):\n", mAudioVibratorInfos.size());
+ for (const auto& vibratorInfo : mAudioVibratorInfos) {
+ dprintf(fd, " - %s\n", vibratorInfo.toString().c_str());
+ }
}
void AudioFlinger::dumpPermissionDenial(int fd, const Vector<String16>& args __unused)
diff --git a/services/audioflinger/Effects.cpp b/services/audioflinger/Effects.cpp
index e6d7cf7..72c378d 100644
--- a/services/audioflinger/Effects.cpp
+++ b/services/audioflinger/Effects.cpp
@@ -569,7 +569,8 @@
mMaxDisableWaitCnt(1), // set by configure(), should be >= 1
mDisableWaitCnt(0), // set by process() and updateState()
mOffloaded(false),
- mAddedToHal(false)
+ mAddedToHal(false),
+ mIsOutput(false)
#ifdef FLOAT_EFFECT_CHAIN
, mSupportsFloat(false)
#endif
@@ -962,6 +963,7 @@
mConfig.outputCfg.mask = EFFECT_CONFIG_ALL;
mConfig.inputCfg.buffer.frameCount = callback->frameCount();
mConfig.outputCfg.buffer.frameCount = mConfig.inputCfg.buffer.frameCount;
+ mIsOutput = callback->isOutput();
ALOGV("configure() %p chain %p buffer %p framecount %zu",
this, callback->chain().promote().get(),
@@ -980,7 +982,7 @@
#ifdef MULTICHANNEL_EFFECT_CHAIN
if (status != NO_ERROR &&
- callback->isOutput() &&
+ mIsOutput &&
(mConfig.inputCfg.channels != AUDIO_CHANNEL_OUT_STEREO
|| mConfig.outputCfg.channels != AUDIO_CHANNEL_OUT_STEREO)) {
// Older effects may require exact STEREO position mask.
@@ -1643,6 +1645,22 @@
return status;
}
+status_t AudioFlinger::EffectModule::getConfigs(
+ audio_config_base_t* inputCfg, audio_config_base_t* outputCfg, bool* isOutput) const {
+ Mutex::Autolock _l(mLock);
+ if (mConfig.inputCfg.mask == 0 || mConfig.outputCfg.mask == 0) {
+ return NO_INIT;
+ }
+ inputCfg->sample_rate = mConfig.inputCfg.samplingRate;
+ inputCfg->channel_mask = static_cast<audio_channel_mask_t>(mConfig.inputCfg.channels);
+ inputCfg->format = static_cast<audio_format_t>(mConfig.inputCfg.format);
+ outputCfg->sample_rate = mConfig.outputCfg.samplingRate;
+ outputCfg->channel_mask = static_cast<audio_channel_mask_t>(mConfig.outputCfg.channels);
+ outputCfg->format = static_cast<audio_format_t>(mConfig.outputCfg.format);
+ *isOutput = mIsOutput;
+ return NO_ERROR;
+}
+
static std::string dumpInOutBuffer(bool isInput, const sp<EffectBufferHalInterface> &buffer) {
std::stringstream ss;
@@ -1760,6 +1778,7 @@
BINDER_METHOD_ENTRY(command) \
BINDER_METHOD_ENTRY(disconnect) \
BINDER_METHOD_ENTRY(getCblk) \
+BINDER_METHOD_ENTRY(getConfig) \
// singleton for Binder Method Statistics for IEffect
mediautils::MethodStatistics<int>& getIEffectStatistics() {
@@ -1803,6 +1822,13 @@
*_aidl_return = (code); \
return Status::ok();
+#define VALUE_OR_RETURN_STATUS_AS_OUT(exp) \
+ ({ \
+ auto _tmp = (exp); \
+ if (!_tmp.ok()) { RETURN(_tmp.error()); } \
+ std::move(_tmp.value()); \
+ })
+
Status AudioFlinger::EffectHandle::enable(int32_t* _aidl_return)
{
AutoMutex _l(mLock);
@@ -1914,6 +1940,32 @@
return Status::ok();
}
+Status AudioFlinger::EffectHandle::getConfig(
+ media::EffectConfig* _config, int32_t* _aidl_return) {
+ AutoMutex _l(mLock);
+ sp<EffectBase> effect = mEffect.promote();
+ if (effect == nullptr || mDisconnected) {
+ RETURN(DEAD_OBJECT);
+ }
+ sp<EffectModule> effectModule = effect->asEffectModule();
+ if (effectModule == nullptr) {
+ RETURN(INVALID_OPERATION);
+ }
+ audio_config_base_t inputCfg = AUDIO_CONFIG_BASE_INITIALIZER;
+ audio_config_base_t outputCfg = AUDIO_CONFIG_BASE_INITIALIZER;
+ bool isOutput;
+ status_t status = effectModule->getConfigs(&inputCfg, &outputCfg, &isOutput);
+ if (status == NO_ERROR) {
+ constexpr bool isInput = false; // effects always use 'OUT' channel masks.
+ _config->inputCfg = VALUE_OR_RETURN_STATUS_AS_OUT(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(inputCfg, isInput));
+ _config->outputCfg = VALUE_OR_RETURN_STATUS_AS_OUT(
+ legacy2aidl_audio_config_base_t_AudioConfigBase(outputCfg, isInput));
+ _config->isOnInputStream = !isOutput;
+ }
+ RETURN(status);
+}
+
Status AudioFlinger::EffectHandle::command(int32_t cmdCode,
const std::vector<uint8_t>& cmdData,
int32_t maxResponseSize,
diff --git a/services/audioflinger/Effects.h b/services/audioflinger/Effects.h
index 42614cc..a89a814 100644
--- a/services/audioflinger/Effects.h
+++ b/services/audioflinger/Effects.h
@@ -283,6 +283,10 @@
status_t setHapticIntensity(int id, int intensity);
status_t setVibratorInfo(const media::AudioVibratorInfo& vibratorInfo);
+ status_t getConfigs(audio_config_base_t* inputCfg,
+ audio_config_base_t* outputCfg,
+ bool* isOutput) const;
+
void dump(int fd, const Vector<String16>& args);
private:
@@ -314,6 +318,7 @@
uint32_t mDisableWaitCnt; // current process() calls count during disable period.
bool mOffloaded; // effect is currently offloaded to the audio DSP
bool mAddedToHal; // effect has been added to the audio HAL
+ bool mIsOutput; // direction of the AF thread
#ifdef FLOAT_EFFECT_CHAIN
bool mSupportsFloat; // effect supports float processing
@@ -370,6 +375,8 @@
int32_t* _aidl_return) override;
android::binder::Status disconnect() override;
android::binder::Status getCblk(media::SharedFileRegion* _aidl_return) override;
+ android::binder::Status getConfig(media::EffectConfig* _config,
+ int32_t* _aidl_return) override;
sp<Client> client() const { return mClient; }
diff --git a/services/audioflinger/Threads.cpp b/services/audioflinger/Threads.cpp
index 56ebb6e..2d00dbe 100644
--- a/services/audioflinger/Threads.cpp
+++ b/services/audioflinger/Threads.cpp
@@ -10248,20 +10248,10 @@
} else {
sp<MmapStreamCallback> callback = mCallback.promote();
if (callback != 0) {
- int channelCount;
- if (isOutput()) {
- channelCount = audio_channel_count_from_out_mask(mChannelMask);
- } else {
- channelCount = audio_channel_count_from_in_mask(mChannelMask);
- }
- Vector<float> values;
- for (int i = 0; i < channelCount; i++) {
- values.add(volume);
- }
mHalVolFloat = volume; // SW volume control worked, so update value.
mNoCallbackWarningCount = 0;
mLock.unlock();
- callback->onVolumeChanged(mChannelMask, values);
+ callback->onVolumeChanged(volume);
mLock.lock();
} else {
if (mNoCallbackWarningCount < kMaxNoCallbackWarnings) {
diff --git a/services/audioflinger/Tracks.cpp b/services/audioflinger/Tracks.cpp
index 6135020..a9720da 100644
--- a/services/audioflinger/Tracks.cpp
+++ b/services/audioflinger/Tracks.cpp
@@ -1908,6 +1908,7 @@
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
&& playbackThread->mHapticChannelCount > 0) {
+ ALOGD("%s, haptic playback was muted for track %d", __func__, mTrack->id());
mTrack->setHapticPlaybackEnabled(false);
*ret = true;
}
@@ -1925,6 +1926,7 @@
PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
if ((mTrack->channelMask() & AUDIO_CHANNEL_HAPTIC_ALL) != AUDIO_CHANNEL_NONE
&& playbackThread->mHapticChannelCount > 0) {
+ ALOGD("%s, haptic playback was unmuted for track %d", __func__, mTrack->id());
mTrack->setHapticPlaybackEnabled(true);
*ret = true;
}
diff --git a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
index 744609f..84a015b 100644
--- a/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
+++ b/services/audiopolicy/managerdefault/AudioPolicyManager.cpp
@@ -2595,31 +2595,19 @@
flags = (audio_input_flags_t)(flags | AUDIO_INPUT_FLAG_ULTRASOUND);
}
- // find a compatible input profile (not necessarily identical in parameters)
- sp<IOProfile> profile;
// sampling rate and flags may be updated by getInputProfile
uint32_t profileSamplingRate = (config->sample_rate == 0) ?
SAMPLE_RATE_HZ_DEFAULT : config->sample_rate;
- audio_format_t profileFormat;
+ audio_format_t profileFormat = config->format;
audio_channel_mask_t profileChannelMask = config->channel_mask;
audio_input_flags_t profileFlags = flags;
- for (;;) {
- profileFormat = config->format; // reset each time through loop, in case it is updated
- profile = getInputProfile(device, profileSamplingRate, profileFormat, profileChannelMask,
- profileFlags);
- if (profile != 0) {
- break; // success
- } else if (profileFlags & AUDIO_INPUT_FLAG_RAW) {
- profileFlags = (audio_input_flags_t) (profileFlags & ~AUDIO_INPUT_FLAG_RAW); // retry
- } else if (profileFlags != AUDIO_INPUT_FLAG_NONE && audio_is_linear_pcm(config->format)) {
- profileFlags = AUDIO_INPUT_FLAG_NONE; // retry
- } else { // fail
- ALOGW("%s could not find profile for device %s, sampling rate %u, format %#x, "
- "channel mask 0x%X, flags %#x", __func__, device->toString().c_str(),
- config->sample_rate, config->format, config->channel_mask, flags);
- return input;
- }
+ // find a compatible input profile (not necessarily identical in parameters)
+ sp<IOProfile> profile = getInputProfile(
+ device, profileSamplingRate, profileFormat, profileChannelMask, profileFlags);
+ if (profile == nullptr) {
+ return input;
}
+
// Pick input sampling rate if not specified by client
uint32_t samplingRate = config->sample_rate;
if (samplingRate == 0) {
@@ -7094,51 +7082,68 @@
{
// Choose an input profile based on the requested capture parameters: select the first available
// profile supporting all requested parameters.
+ // The flags can be ignored if it doesn't contain a much match flag.
//
// TODO: perhaps isCompatibleProfile should return a "matching" score so we can return
// the best matching profile, not the first one.
- sp<IOProfile> firstInexact;
- uint32_t updatedSamplingRate = 0;
- audio_format_t updatedFormat = AUDIO_FORMAT_INVALID;
- audio_channel_mask_t updatedChannelMask = AUDIO_CHANNEL_INVALID;
- for (const auto& hwModule : mHwModules) {
- for (const auto& profile : hwModule->getInputProfiles()) {
- // profile->log();
- //updatedFormat = format;
- if (profile->isCompatibleProfile(DeviceVector(device), samplingRate,
- &samplingRate /*updatedSamplingRate*/,
- format,
- &format, /*updatedFormat*/
- channelMask,
- &channelMask /*updatedChannelMask*/,
- // FIXME ugly cast
- (audio_output_flags_t) flags,
- true /*exactMatchRequiredForInputFlags*/)) {
- return profile;
- }
- if (firstInexact == nullptr && profile->isCompatibleProfile(DeviceVector(device),
- samplingRate,
- &updatedSamplingRate,
- format,
- &updatedFormat,
- channelMask,
- &updatedChannelMask,
- // FIXME ugly cast
- (audio_output_flags_t) flags,
- false /*exactMatchRequiredForInputFlags*/)) {
- firstInexact = profile;
- }
+ const audio_input_flags_t mustMatchFlag = AUDIO_INPUT_FLAG_MMAP_NOIRQ;
+ const audio_input_flags_t oriFlags = flags;
+ for (;;) {
+ sp<IOProfile> firstInexact = nullptr;
+ uint32_t updatedSamplingRate = 0;
+ audio_format_t updatedFormat = AUDIO_FORMAT_INVALID;
+ audio_channel_mask_t updatedChannelMask = AUDIO_CHANNEL_INVALID;
+ for (const auto& hwModule : mHwModules) {
+ for (const auto& profile : hwModule->getInputProfiles()) {
+ // profile->log();
+ //updatedFormat = format;
+ if (profile->isCompatibleProfile(DeviceVector(device), samplingRate,
+ &samplingRate /*updatedSamplingRate*/,
+ format,
+ &format, /*updatedFormat*/
+ channelMask,
+ &channelMask /*updatedChannelMask*/,
+ // FIXME ugly cast
+ (audio_output_flags_t) flags,
+ true /*exactMatchRequiredForInputFlags*/)) {
+ return profile;
+ }
+ if (firstInexact == nullptr && profile->isCompatibleProfile(DeviceVector(device),
+ samplingRate,
+ &updatedSamplingRate,
+ format,
+ &updatedFormat,
+ channelMask,
+ &updatedChannelMask,
+ // FIXME ugly cast
+ (audio_output_flags_t) flags,
+ false /*exactMatchRequiredForInputFlags*/)) {
+ firstInexact = profile;
+ }
+ }
+ }
+
+ if (firstInexact != nullptr) {
+ samplingRate = updatedSamplingRate;
+ format = updatedFormat;
+ channelMask = updatedChannelMask;
+ return firstInexact;
+ } else if (flags & AUDIO_INPUT_FLAG_RAW) {
+ flags = (audio_input_flags_t) (flags & ~AUDIO_INPUT_FLAG_RAW); // retry
+ } else if ((flags & mustMatchFlag) == AUDIO_INPUT_FLAG_NONE &&
+ flags != AUDIO_INPUT_FLAG_NONE && audio_is_linear_pcm(format)) {
+ flags = AUDIO_INPUT_FLAG_NONE;
+ } else { // fail
+ ALOGW("%s could not find profile for device %s, sampling rate %u, format %#x, "
+ "channel mask 0x%X, flags %#x", __func__, device->toString().c_str(),
+ samplingRate, format, channelMask, oriFlags);
+ break;
}
}
- if (firstInexact != nullptr) {
- samplingRate = updatedSamplingRate;
- format = updatedFormat;
- channelMask = updatedChannelMask;
- return firstInexact;
- }
- return NULL;
+
+ return nullptr;
}
float AudioPolicyManager::computeVolume(IVolumeCurves &curves,
diff --git a/services/audiopolicy/service/AudioPolicyEffects.cpp b/services/audiopolicy/service/AudioPolicyEffects.cpp
index 70fdfcb..c7a60c2 100644
--- a/services/audiopolicy/service/AudioPolicyEffects.cpp
+++ b/services/audiopolicy/service/AudioPolicyEffects.cpp
@@ -127,7 +127,8 @@
attributionSource.packageName = "android";
attributionSource.token = sp<BBinder>::make();
sp<AudioEffect> fx = new AudioEffect(attributionSource);
- fx->set(NULL, &effect->mUuid, -1, 0, 0, audioSession, input);
+ fx->set(nullptr /*type */, &effect->mUuid, -1 /* priority */, nullptr /* callback */,
+ audioSession, input);
status_t status = fx->initCheck();
if (status != NO_ERROR && status != ALREADY_EXISTS) {
ALOGW("addInputEffects(): failed to create Fx %s on source %d",
@@ -279,7 +280,8 @@
attributionSource.packageName = "android";
attributionSource.token = sp<BBinder>::make();
sp<AudioEffect> fx = new AudioEffect(attributionSource);
- fx->set(NULL, &effect->mUuid, 0, 0, 0, audioSession, output);
+ fx->set(nullptr /* type */, &effect->mUuid, 0 /* priority */, nullptr /* callback */,
+ audioSession, output);
status_t status = fx->initCheck();
if (status != NO_ERROR && status != ALREADY_EXISTS) {
ALOGE("addOutputSessionEffects(): failed to create Fx %s on session %d",
@@ -984,8 +986,8 @@
attributionSource.packageName = "android";
attributionSource.token = sp<BBinder>::make();
sp<AudioEffect> fx = new AudioEffect(attributionSource);
- fx->set(EFFECT_UUID_NULL, &effectDesc->mUuid, 0, nullptr,
- nullptr, AUDIO_SESSION_DEVICE, AUDIO_IO_HANDLE_NONE,
+ fx->set(EFFECT_UUID_NULL, &effectDesc->mUuid, 0 /* priority */, nullptr /* callback */,
+ AUDIO_SESSION_DEVICE, AUDIO_IO_HANDLE_NONE,
AudioDeviceTypeAddr{deviceEffects->getDeviceType(),
deviceEffects->getDeviceAddress()});
status_t status = fx->initCheck();
diff --git a/services/audiopolicy/service/Spatializer.cpp b/services/audiopolicy/service/Spatializer.cpp
index c199a76..a98d474 100644
--- a/services/audiopolicy/service/Spatializer.cpp
+++ b/services/audiopolicy/service/Spatializer.cpp
@@ -718,9 +718,10 @@
// create FX instance on output
AttributionSourceState attributionSource = AttributionSourceState();
mEngine = new AudioEffect(attributionSource);
- mEngine->set(nullptr, &mEngineDescriptor.uuid, 0, Spatializer::engineCallback /* cbf */,
- this /* user */, AUDIO_SESSION_OUTPUT_STAGE, output, {} /* device */,
- false /* probe */, true /* notifyFramesProcessed */);
+ mEngine->set(nullptr /* type */, &mEngineDescriptor.uuid, 0 /* priority */,
+ wp<AudioEffect::IAudioEffectCallback>::fromExisting(this),
+ AUDIO_SESSION_OUTPUT_STAGE, output, {} /* device */, false /* probe */,
+ true /* notifyFramesProcessed */);
status_t status = mEngine->initCheck();
ALOGV("%s mEngine create status %d", __func__, (int)status);
if (status != NO_ERROR) {
@@ -837,27 +838,10 @@
}
}
-void Spatializer::engineCallback(int32_t event, void *user, void *info) {
- if (user == nullptr) {
- return;
- }
- Spatializer* const me = reinterpret_cast<Spatializer *>(user);
- switch (event) {
- case AudioEffect::EVENT_FRAMES_PROCESSED: {
- int frames = info == nullptr ? 0 : *(int*)info;
- ALOGV("%s frames processed %d for me %p", __func__, frames, me);
- me->postFramesProcessedMsg(frames);
- } break;
- default:
- ALOGV("%s event %d", __func__, event);
- break;
- }
-}
-
-void Spatializer::postFramesProcessedMsg(int frames) {
+void Spatializer::onFramesProcessed(int32_t framesProcessed) {
sp<AMessage> msg =
new AMessage(EngineCallbackHandler::kWhatOnFramesProcessed, mHandler);
- msg->setInt32(EngineCallbackHandler::kNumFramesKey, frames);
+ msg->setInt32(EngineCallbackHandler::kNumFramesKey, framesProcessed);
msg->post();
}
diff --git a/services/audiopolicy/service/Spatializer.h b/services/audiopolicy/service/Spatializer.h
index 29f4b08..ad45fa9 100644
--- a/services/audiopolicy/service/Spatializer.h
+++ b/services/audiopolicy/service/Spatializer.h
@@ -84,6 +84,7 @@
* spatializer mixer thread is destroyed.
*/
class Spatializer : public media::BnSpatializer,
+ public AudioEffect::IAudioEffectCallback,
public IBinder::DeathRecipient,
private SpatializerPoseController::Listener {
public:
@@ -274,7 +275,7 @@
return NO_ERROR;
}
- void postFramesProcessedMsg(int frames);
+ virtual void onFramesProcessed(int32_t framesProcessed) override;
/**
* Checks if head and screen sensors must be actively monitored based on
diff --git a/services/camera/libcameraservice/Android.bp b/services/camera/libcameraservice/Android.bp
index e98975e..981c569 100644
--- a/services/camera/libcameraservice/Android.bp
+++ b/services/camera/libcameraservice/Android.bp
@@ -163,7 +163,6 @@
"android.hardware.camera.device@3.5",
"android.hardware.camera.device@3.6",
"android.hardware.camera.device@3.7",
- "android.hardware.camera.device@3.8",
"android.hardware.camera.device-V1-ndk",
"media_permission-aidl-cpp",
],
diff --git a/services/camera/libcameraservice/CameraService.cpp b/services/camera/libcameraservice/CameraService.cpp
index c4b5a6c..fea6c0d 100644
--- a/services/camera/libcameraservice/CameraService.cpp
+++ b/services/camera/libcameraservice/CameraService.cpp
@@ -269,7 +269,10 @@
cameraId.c_str());
continue;
}
- i->getListener()->onTorchStatusChanged(mapToInterface(status), String16{cameraId});
+ auto ret = i->getListener()->onTorchStatusChanged(mapToInterface(status),
+ String16{cameraId});
+ i->handleBinderStatus(ret, "%s: Failed to trigger onTorchStatusChanged for %d:%d: %d",
+ __FUNCTION__, i->getListenerUid(), i->getListenerPid(), ret.exceptionCode());
}
}
@@ -538,8 +541,12 @@
id.c_str());
continue;
}
- listener->getListener()->onPhysicalCameraStatusChanged(mapToInterface(newStatus),
- id16, physicalId16);
+ auto ret = listener->getListener()->onPhysicalCameraStatusChanged(
+ mapToInterface(newStatus), id16, physicalId16);
+ listener->handleBinderStatus(ret,
+ "%s: Failed to trigger onPhysicalCameraStatusChanged for %d:%d: %d",
+ __FUNCTION__, listener->getListenerUid(), listener->getListenerPid(),
+ ret.exceptionCode());
}
}
}
@@ -580,8 +587,11 @@
int32_t newStrengthLevel) {
Mutex::Autolock lock(mStatusListenerLock);
for (auto& i : mListenerList) {
- i->getListener()->onTorchStrengthLevelChanged(String16{cameraId},
+ auto ret = i->getListener()->onTorchStrengthLevelChanged(String16{cameraId},
newStrengthLevel);
+ i->handleBinderStatus(ret,
+ "%s: Failed to trigger onTorchStrengthLevelChanged for %d:%d: %d", __FUNCTION__,
+ i->getListenerUid(), i->getListenerPid(), ret.exceptionCode());
}
}
@@ -887,29 +897,37 @@
BasicClient::BasicClient::sCameraService = nullptr;
}
-int CameraService::getDeviceVersion(const String8& cameraId, int* facing, int* orientation) {
+std::pair<int, IPCTransport> CameraService::getDeviceVersion(const String8& cameraId, int* facing,
+ int* orientation) {
ATRACE_CALL();
int deviceVersion = 0;
status_t res;
hardware::hidl_version maxVersion{0,0};
+ IPCTransport transport = IPCTransport::INVALID;
res = mCameraProviderManager->getHighestSupportedVersion(cameraId.string(),
- &maxVersion);
- if (res != OK) return -1;
+ &maxVersion, &transport);
+ if (res != OK || transport == IPCTransport::INVALID) {
+ ALOGE("%s: Unable to get highest supported version for camera id %s", __FUNCTION__,
+ cameraId.string());
+ return std::make_pair(-1, IPCTransport::INVALID) ;
+ }
deviceVersion = HARDWARE_DEVICE_API_VERSION(maxVersion.get_major(), maxVersion.get_minor());
hardware::CameraInfo info;
if (facing) {
res = mCameraProviderManager->getCameraInfo(cameraId.string(), &info);
- if (res != OK) return -1;
+ if (res != OK) {
+ return std::make_pair(-1, IPCTransport::INVALID);
+ }
*facing = info.facing;
if (orientation) {
*orientation = info.orientation;
}
}
- return deviceVersion;
+ return std::make_pair(deviceVersion, transport);
}
Status CameraService::filterGetInfoErrorCode(status_t err) {
@@ -933,45 +951,47 @@
const sp<IInterface>& cameraCb, const String16& packageName, bool systemNativeClient,
const std::optional<String16>& featureId, const String8& cameraId,
int api1CameraId, int facing, int sensorOrientation, int clientPid, uid_t clientUid,
- int servicePid, int deviceVersion, apiLevel effectiveApiLevel, bool overrideForPerfClass,
- /*out*/sp<BasicClient>* client) {
-
- // Create CameraClient based on device version reported by the HAL.
- switch(deviceVersion) {
- case CAMERA_DEVICE_API_VERSION_1_0:
- ALOGE("Camera using old HAL version: %d", deviceVersion);
- return STATUS_ERROR_FMT(ERROR_DEPRECATED_HAL,
- "Camera device \"%s\" HAL version %d no longer supported",
- cameraId.string(), deviceVersion);
- break;
- case CAMERA_DEVICE_API_VERSION_3_0:
- case CAMERA_DEVICE_API_VERSION_3_1:
- case CAMERA_DEVICE_API_VERSION_3_2:
- case CAMERA_DEVICE_API_VERSION_3_3:
- case CAMERA_DEVICE_API_VERSION_3_4:
- case CAMERA_DEVICE_API_VERSION_3_5:
- case CAMERA_DEVICE_API_VERSION_3_6:
- case CAMERA_DEVICE_API_VERSION_3_7:
- case CAMERA_DEVICE_API_VERSION_3_8:
- if (effectiveApiLevel == API_1) { // Camera1 API route
- sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
- *client = new Camera2Client(cameraService, tmp, packageName, featureId,
- cameraId, api1CameraId, facing, sensorOrientation, clientPid, clientUid,
- servicePid, overrideForPerfClass);
- } else { // Camera2 API route
- sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
- static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
- *client = new CameraDeviceClient(cameraService, tmp, packageName,
- systemNativeClient, featureId, cameraId, facing, sensorOrientation,
- clientPid, clientUid, servicePid, overrideForPerfClass);
- }
- break;
- default:
- // Should not be reachable
- ALOGE("Unknown camera device HAL version: %d", deviceVersion);
- return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
- "Camera device \"%s\" has unknown HAL version %d",
- cameraId.string(), deviceVersion);
+ int servicePid, std::pair<int, IPCTransport> deviceVersionAndTransport,
+ apiLevel effectiveApiLevel, bool overrideForPerfClass, /*out*/sp<BasicClient>* client) {
+ // For HIDL devices
+ if (deviceVersionAndTransport.second == IPCTransport::HIDL) {
+ // Create CameraClient based on device version reported by the HAL.
+ int deviceVersion = deviceVersionAndTransport.first;
+ switch(deviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_1_0:
+ ALOGE("Camera using old HAL version: %d", deviceVersion);
+ return STATUS_ERROR_FMT(ERROR_DEPRECATED_HAL,
+ "Camera device \"%s\" HAL version %d no longer supported",
+ cameraId.string(), deviceVersion);
+ break;
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ case CAMERA_DEVICE_API_VERSION_3_1:
+ case CAMERA_DEVICE_API_VERSION_3_2:
+ case CAMERA_DEVICE_API_VERSION_3_3:
+ case CAMERA_DEVICE_API_VERSION_3_4:
+ case CAMERA_DEVICE_API_VERSION_3_5:
+ case CAMERA_DEVICE_API_VERSION_3_6:
+ case CAMERA_DEVICE_API_VERSION_3_7:
+ break;
+ default:
+ // Should not be reachable
+ ALOGE("Unknown camera device HAL version: %d", deviceVersion);
+ return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
+ "Camera device \"%s\" has unknown HAL version %d",
+ cameraId.string(), deviceVersion);
+ }
+ }
+ if (effectiveApiLevel == API_1) { // Camera1 API route
+ sp<ICameraClient> tmp = static_cast<ICameraClient*>(cameraCb.get());
+ *client = new Camera2Client(cameraService, tmp, packageName, featureId,
+ cameraId, api1CameraId, facing, sensorOrientation, clientPid, clientUid,
+ servicePid, overrideForPerfClass);
+ } else { // Camera2 API route
+ sp<hardware::camera2::ICameraDeviceCallbacks> tmp =
+ static_cast<hardware::camera2::ICameraDeviceCallbacks*>(cameraCb.get());
+ *client = new CameraDeviceClient(cameraService, tmp, packageName,
+ systemNativeClient, featureId, cameraId, facing, sensorOrientation,
+ clientPid, clientUid, servicePid, overrideForPerfClass);
}
return Status::ok();
}
@@ -1822,7 +1842,8 @@
// give flashlight a chance to close devices if necessary.
mFlashlight->prepareDeviceOpen(cameraId);
- int deviceVersion = getDeviceVersion(cameraId, /*out*/&facing, /*out*/&orientation);
+ auto deviceVersionAndTransport =
+ getDeviceVersion(cameraId, /*out*/&facing, /*out*/&orientation);
if (facing == -1) {
ALOGE("%s: Unable to get camera device \"%s\" facing", __FUNCTION__, cameraId.string());
return STATUS_ERROR_FMT(ERROR_INVALID_OPERATION,
@@ -1835,7 +1856,7 @@
if(!(ret = makeClient(this, cameraCb, clientPackageName, systemNativeClient,
clientFeatureId, cameraId, api1CameraId, facing, orientation,
clientPid, clientUid, getpid(),
- deviceVersion, effectiveApiLevel, overrideForPerfClass,
+ deviceVersionAndTransport, effectiveApiLevel, overrideForPerfClass,
/*out*/&tmp)).isOk()) {
return ret;
}
@@ -1890,6 +1911,9 @@
}
}
+ // Enable/disable camera service watchdog
+ client->setCameraServiceWatchdog(mCameraServiceWatchdogEnabled);
+
// Set rotate-and-crop override behavior
if (mOverrideRotateAndCropMode != ANDROID_SCALER_ROTATE_AND_CROP_AUTO) {
client->setRotateAndCropOverride(mOverrideRotateAndCropMode);
@@ -2371,10 +2395,8 @@
for (const auto& it : mListenerList) {
auto ret = it->getListener()->onCameraAccessPrioritiesChanged();
- if (!ret.isOk()) {
- ALOGE("%s: Failed to trigger permission callback: %d", __FUNCTION__,
- ret.exceptionCode());
- }
+ it->handleBinderStatus(ret, "%s: Failed to trigger permission callback for %d:%d: %d",
+ __FUNCTION__, it->getListenerUid(), it->getListenerPid(), ret.exceptionCode());
}
}
@@ -2708,45 +2730,48 @@
return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
}
- int deviceVersion = getDeviceVersion(id);
- switch (deviceVersion) {
- case CAMERA_DEVICE_API_VERSION_1_0:
- case CAMERA_DEVICE_API_VERSION_3_0:
- case CAMERA_DEVICE_API_VERSION_3_1:
- if (apiVersion == API_VERSION_2) {
- ALOGV("%s: Camera id %s uses HAL version %d <3.2, doesn't support api2 without shim",
- __FUNCTION__, id.string(), deviceVersion);
- *isSupported = false;
- } else { // if (apiVersion == API_VERSION_1) {
- ALOGV("%s: Camera id %s uses older HAL before 3.2, but api1 is always supported",
+ auto deviceVersionAndTransport = getDeviceVersion(id);
+ if (deviceVersionAndTransport.first == -1) {
+ String8 msg = String8::format("Unknown camera ID %s", id.string());
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
+ }
+ if (deviceVersionAndTransport.second == IPCTransport::HIDL) {
+ int deviceVersion = deviceVersionAndTransport.first;
+ switch (deviceVersion) {
+ case CAMERA_DEVICE_API_VERSION_1_0:
+ case CAMERA_DEVICE_API_VERSION_3_0:
+ case CAMERA_DEVICE_API_VERSION_3_1:
+ if (apiVersion == API_VERSION_2) {
+ ALOGV("%s: Camera id %s uses HAL version %d <3.2, doesn't support api2 without "
+ "shim", __FUNCTION__, id.string(), deviceVersion);
+ *isSupported = false;
+ } else { // if (apiVersion == API_VERSION_1) {
+ ALOGV("%s: Camera id %s uses older HAL before 3.2, but api1 is always "
+ "supported", __FUNCTION__, id.string());
+ *isSupported = true;
+ }
+ break;
+ case CAMERA_DEVICE_API_VERSION_3_2:
+ case CAMERA_DEVICE_API_VERSION_3_3:
+ case CAMERA_DEVICE_API_VERSION_3_4:
+ case CAMERA_DEVICE_API_VERSION_3_5:
+ case CAMERA_DEVICE_API_VERSION_3_6:
+ case CAMERA_DEVICE_API_VERSION_3_7:
+ ALOGV("%s: Camera id %s uses HAL3.2 or newer, supports api1/api2 directly",
__FUNCTION__, id.string());
*isSupported = true;
+ break;
+ default: {
+ String8 msg = String8::format("Unknown device version %x for device %s",
+ deviceVersion, id.string());
+ ALOGE("%s: %s", __FUNCTION__, msg.string());
+ return STATUS_ERROR(ERROR_INVALID_OPERATION, msg.string());
}
- break;
- case CAMERA_DEVICE_API_VERSION_3_2:
- case CAMERA_DEVICE_API_VERSION_3_3:
- case CAMERA_DEVICE_API_VERSION_3_4:
- case CAMERA_DEVICE_API_VERSION_3_5:
- case CAMERA_DEVICE_API_VERSION_3_6:
- case CAMERA_DEVICE_API_VERSION_3_7:
- case CAMERA_DEVICE_API_VERSION_3_8:
- ALOGV("%s: Camera id %s uses HAL3.2 or newer, supports api1/api2 directly",
- __FUNCTION__, id.string());
- *isSupported = true;
- break;
- case -1: {
- String8 msg = String8::format("Unknown camera ID %s", id.string());
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(ERROR_ILLEGAL_ARGUMENT, msg.string());
}
- default: {
- String8 msg = String8::format("Unknown device version %x for device %s",
- deviceVersion, id.string());
- ALOGE("%s: %s", __FUNCTION__, msg.string());
- return STATUS_ERROR(ERROR_INVALID_OPERATION, msg.string());
- }
+ } else {
+ *isSupported = true;
}
-
return Status::ok();
}
@@ -4633,8 +4658,12 @@
cameraId.c_str());
continue;
}
- listener->getListener()->onStatusChanged(mapToInterface(status),
+ auto ret = listener->getListener()->onStatusChanged(mapToInterface(status),
String16(cameraId));
+ listener->handleBinderStatus(ret,
+ "%s: Failed to trigger onStatusChanged callback for %d:%d: %d",
+ __FUNCTION__, listener->getListenerUid(), listener->getListenerPid(),
+ ret.exceptionCode());
}
});
}
@@ -4667,10 +4696,10 @@
} else {
ret = it->getListener()->onCameraClosed(cameraId64);
}
- if (!ret.isOk()) {
- ALOGE("%s: Failed to trigger onCameraOpened/onCameraClosed callback: %d", __FUNCTION__,
- ret.exceptionCode());
- }
+
+ it->handleBinderStatus(ret,
+ "%s: Failed to trigger onCameraOpened/onCameraClosed callback for %d:%d: %d",
+ __FUNCTION__, it->getListenerUid(), it->getListenerPid(), ret.exceptionCode());
}
}
@@ -4771,8 +4800,12 @@
String8(physicalCameraId).c_str());
continue;
}
- listener->getListener()->onPhysicalCameraStatusChanged(status,
+ auto ret = listener->getListener()->onPhysicalCameraStatusChanged(status,
logicalCameraId, physicalCameraId);
+ listener->handleBinderStatus(ret,
+ "%s: Failed to trigger onPhysicalCameraStatusChanged for %d:%d: %d",
+ __FUNCTION__, listener->getListenerUid(), listener->getListenerPid(),
+ ret.exceptionCode());
}
}
}
@@ -4828,6 +4861,8 @@
return handleSetCameraMute(args);
} else if (args.size() >= 2 && args[0] == String16("watch")) {
return handleWatchCommand(args, in, out);
+ } else if (args.size() >= 2 && args[0] == String16("set-watchdog")) {
+ return handleSetCameraServiceWatchdog(args);
} else if (args.size() == 1 && args[0] == String16("help")) {
printHelp(out);
return OK;
@@ -4921,6 +4956,28 @@
return OK;
}
+status_t CameraService::handleSetCameraServiceWatchdog(const Vector<String16>& args) {
+ int enableWatchdog = atoi(String8(args[1]));
+
+ if (enableWatchdog < 0 || enableWatchdog > 1) return BAD_VALUE;
+
+ Mutex::Autolock lock(mServiceLock);
+
+ mCameraServiceWatchdogEnabled = enableWatchdog;
+
+ const auto clients = mActiveClientManager.getAll();
+ for (auto& current : clients) {
+ if (current != nullptr) {
+ const auto basicClient = current->getValue();
+ if (basicClient.get() != nullptr) {
+ basicClient->setCameraServiceWatchdog(enableWatchdog);
+ }
+ }
+ }
+
+ return OK;
+}
+
status_t CameraService::handleGetRotateAndCrop(int out) {
Mutex::Autolock lock(mServiceLock);
diff --git a/services/camera/libcameraservice/CameraService.h b/services/camera/libcameraservice/CameraService.h
index 89a537d..59315d5 100644
--- a/services/camera/libcameraservice/CameraService.h
+++ b/services/camera/libcameraservice/CameraService.h
@@ -47,6 +47,7 @@
#include "media/RingBuffer.h"
#include "utils/AutoConditionLock.h"
#include "utils/ClientManager.h"
+#include "utils/IPCTransport.h"
#include <set>
#include <string>
@@ -242,7 +243,7 @@
/////////////////////////////////////////////////////////////////////
// CameraDeviceFactory functionality
- int getDeviceVersion(const String8& cameraId, int* facing = nullptr,
+ std::pair<int, IPCTransport> getDeviceVersion(const String8& cameraId, int* facing = nullptr,
int* orientation = nullptr);
/////////////////////////////////////////////////////////////////////
@@ -338,6 +339,9 @@
// Set/reset camera mute
virtual status_t setCameraMute(bool enabled) = 0;
+ // Set Camera service watchdog
+ virtual status_t setCameraServiceWatchdog(bool enabled) = 0;
+
// The injection camera session to replace the internal camera
// session.
virtual status_t injectCamera(const String8& injectedCamId,
@@ -1054,6 +1058,29 @@
return IInterface::asBinder(mListener)->linkToDeath(this);
}
+ template<typename... args_t>
+ void handleBinderStatus(const binder::Status &ret, const char *logOnError,
+ args_t... args) {
+ if (!ret.isOk() &&
+ (ret.exceptionCode() != binder::Status::Exception::EX_TRANSACTION_FAILED
+ || !mLastTransactFailed)) {
+ ALOGE(logOnError, args...);
+ }
+
+ // If the transaction failed, the process may have died (or other things, see
+ // b/28321379). Mute consecutive errors from this listener to avoid log spam.
+ if (ret.exceptionCode() == binder::Status::Exception::EX_TRANSACTION_FAILED) {
+ if (!mLastTransactFailed) {
+ ALOGE("%s: Muting similar errors from listener %d:%d", __FUNCTION__,
+ mListenerUid, mListenerPid);
+ }
+ mLastTransactFailed = true;
+ } else {
+ // Reset mLastTransactFailed when binder becomes healthy again.
+ mLastTransactFailed = false;
+ }
+ }
+
virtual void binderDied(const wp<IBinder> &/*who*/) {
auto parent = mParent.promote();
if (parent.get() != nullptr) {
@@ -1074,6 +1101,9 @@
int mListenerPid = -1;
bool mIsVendorListener = false;
bool mOpenCloseCallbackAllowed = false;
+
+ // Flag for preventing log spam when binder becomes unhealthy
+ bool mLastTransactFailed = false;
};
// Guarded by mStatusListenerMutex
@@ -1197,6 +1227,9 @@
// Handle 'watch' command as passed through 'cmd'
status_t handleWatchCommand(const Vector<String16> &args, int inFd, int outFd);
+ // Set the camera service watchdog
+ status_t handleSetCameraServiceWatchdog(const Vector<String16>& args);
+
// Enable tag monitoring of the given tags in provided clients
status_t startWatchingTags(const Vector<String16> &args, int outFd);
@@ -1240,8 +1273,9 @@
const sp<IInterface>& cameraCb, const String16& packageName,
bool systemNativeClient, const std::optional<String16>& featureId,
const String8& cameraId, int api1CameraId, int facing, int sensorOrientation,
- int clientPid, uid_t clientUid, int servicePid, int deviceVersion,
- apiLevel effectiveApiLevel, bool overrideForPerfClass, /*out*/sp<BasicClient>* client);
+ int clientPid, uid_t clientUid, int servicePid,
+ std::pair<int, IPCTransport> deviceVersionAndIPCTransport, apiLevel effectiveApiLevel,
+ bool overrideForPerfClass, /*out*/sp<BasicClient>* client);
status_t checkCameraAccess(const String16& opPackageName);
@@ -1282,6 +1316,9 @@
// Current camera mute mode
bool mOverrideCameraMuteMode = false;
+ // Camera Service watchdog flag
+ bool mCameraServiceWatchdogEnabled = true;
+
/**
* A listener class that implements the IBinder::DeathRecipient interface
* for use to call back the error state injected by the external camera, and
diff --git a/services/camera/libcameraservice/CameraServiceWatchdog.cpp b/services/camera/libcameraservice/CameraServiceWatchdog.cpp
index fcd6ebe..950da6a 100644
--- a/services/camera/libcameraservice/CameraServiceWatchdog.cpp
+++ b/services/camera/libcameraservice/CameraServiceWatchdog.cpp
@@ -64,6 +64,17 @@
}
}
+void CameraServiceWatchdog::setEnabled(bool enable)
+{
+ AutoMutex _l(mEnabledLock);
+
+ if (enable) {
+ mEnabled = true;
+ } else {
+ mEnabled = false;
+ }
+}
+
void CameraServiceWatchdog::stop(uint32_t tid)
{
AutoMutex _l(mWatchdogLock);
diff --git a/services/camera/libcameraservice/CameraServiceWatchdog.h b/services/camera/libcameraservice/CameraServiceWatchdog.h
index f4955e2..f7c90a9 100644
--- a/services/camera/libcameraservice/CameraServiceWatchdog.h
+++ b/services/camera/libcameraservice/CameraServiceWatchdog.h
@@ -21,10 +21,12 @@
* expected duration has exceeded.
* Notes on multi-threaded behaviors:
* - The threadloop is blocked/paused when there are no calls being
- * monitored.
+ * monitored (when the TID cycle to counter map is empty).
* - The start and stop functions handle simultaneous call monitoring
* and single call monitoring differently. See function documentation for
* more details.
+ * To disable/enable:
+ * - adb shell cmd media.camera set-cameraservice-watchdog [0/1]
*/
#include <chrono>
@@ -49,15 +51,19 @@
public:
explicit CameraServiceWatchdog() : mPause(true), mMaxCycles(kMaxCycles),
- mCycleLengthMs(kCycleLengthMs) {};
+ mCycleLengthMs(kCycleLengthMs), mEnabled(true) {};
- explicit CameraServiceWatchdog (size_t maxCycles, uint32_t cycleLengthMs) :
- mPause(true), mMaxCycles(maxCycles), mCycleLengthMs(cycleLengthMs) {};
+ explicit CameraServiceWatchdog (size_t maxCycles, uint32_t cycleLengthMs, bool enabled) :
+ mPause(true), mMaxCycles(maxCycles), mCycleLengthMs(cycleLengthMs), mEnabled(enabled)
+ {};
virtual ~CameraServiceWatchdog() {};
virtual void requestExit();
+ /** Enables/disables the watchdog */
+ void setEnabled(bool enable);
+
/** Used to wrap monitored calls in start and stop functions using custom timer values */
template<typename T>
auto watchThread(T func, uint32_t tid, uint32_t cycles, uint32_t cycleLength) {
@@ -66,8 +72,13 @@
if (cycles != mMaxCycles || cycleLength != mCycleLengthMs) {
// Create another instance of the watchdog to prevent disruption
// of timer for current monitored calls
+
+ // Lock for mEnabled
+ mEnabledLock.lock();
sp<CameraServiceWatchdog> tempWatchdog =
- new CameraServiceWatchdog(cycles, cycleLength);
+ new CameraServiceWatchdog(cycles, cycleLength, mEnabled);
+ mEnabledLock.unlock();
+
tempWatchdog->run("CameraServiceWatchdog");
res = tempWatchdog->watchThread(func, tid);
tempWatchdog->requestExit();
@@ -84,11 +95,17 @@
/** Used to wrap monitored calls in start and stop functions using class timer values */
template<typename T>
auto watchThread(T func, uint32_t tid) {
+ AutoMutex _l(mEnabledLock);
+
auto res = NULL;
- start(tid);
- res = func();
- stop(tid);
+ if (mEnabled) {
+ start(tid);
+ res = func();
+ stop(tid);
+ } else {
+ res = func();
+ }
return res;
}
@@ -109,11 +126,13 @@
virtual bool threadLoop();
- Mutex mWatchdogLock; // Lock for condition variable
- Condition mWatchdogCondition; // Condition variable for stop/start
- bool mPause; // True if thread is currently paused
- uint32_t mMaxCycles; // Max cycles
- uint32_t mCycleLengthMs; // Length of time elapsed per cycle
+ Mutex mWatchdogLock; // Lock for condition variable
+ Mutex mEnabledLock; // Lock for enabled status
+ Condition mWatchdogCondition; // Condition variable for stop/start
+ bool mPause; // True if tid map is empty
+ uint32_t mMaxCycles; // Max cycles
+ uint32_t mCycleLengthMs; // Length of time elapsed per cycle
+ bool mEnabled; // True if watchdog is enabled
std::unordered_map<uint32_t, uint32_t> tidToCycleCounterMap; // Thread Id to cycle counter map
};
diff --git a/services/camera/libcameraservice/api1/Camera2Client.cpp b/services/camera/libcameraservice/api1/Camera2Client.cpp
index c2f8488..20bf73d 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.cpp
+++ b/services/camera/libcameraservice/api1/Camera2Client.cpp
@@ -112,7 +112,7 @@
{
SharedParameters::Lock l(mParameters);
- res = l.mParameters.initialize(mDevice.get(), mDeviceVersion);
+ res = l.mParameters.initialize(mDevice.get());
if (res != OK) {
ALOGE("%s: Camera %d: unable to build defaults: %s (%d)",
__FUNCTION__, mCameraId, strerror(-res), res);
@@ -2316,6 +2316,10 @@
return INVALID_OPERATION;
}
+status_t Camera2Client::setCameraServiceWatchdog(bool enabled) {
+ return mDevice->setCameraServiceWatchdog(enabled);
+}
+
status_t Camera2Client::setRotateAndCropOverride(uint8_t rotateAndCrop) {
if (rotateAndCrop > ANDROID_SCALER_ROTATE_AND_CROP_AUTO) return BAD_VALUE;
diff --git a/services/camera/libcameraservice/api1/Camera2Client.h b/services/camera/libcameraservice/api1/Camera2Client.h
index c8dfc46..8081efa 100644
--- a/services/camera/libcameraservice/api1/Camera2Client.h
+++ b/services/camera/libcameraservice/api1/Camera2Client.h
@@ -90,6 +90,8 @@
virtual bool supportsCameraMute();
virtual status_t setCameraMute(bool enabled);
+ virtual status_t setCameraServiceWatchdog(bool enabled);
+
/**
* Interface used by CameraService
*/
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.cpp b/services/camera/libcameraservice/api1/client2/Parameters.cpp
index 9a7ada2..123cd75 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.cpp
+++ b/services/camera/libcameraservice/api1/client2/Parameters.cpp
@@ -50,7 +50,7 @@
Parameters::~Parameters() {
}
-status_t Parameters::initialize(CameraDeviceBase *device, int deviceVersion) {
+status_t Parameters::initialize(CameraDeviceBase *device) {
status_t res;
if (device == nullptr) {
ALOGE("%s: device is null!", __FUNCTION__);
@@ -63,7 +63,6 @@
return BAD_VALUE;
}
Parameters::info = &info;
- mDeviceVersion = deviceVersion;
res = buildFastInfo(device);
if (res != OK) return res;
diff --git a/services/camera/libcameraservice/api1/client2/Parameters.h b/services/camera/libcameraservice/api1/client2/Parameters.h
index 263025e..cbe62a7 100644
--- a/services/camera/libcameraservice/api1/client2/Parameters.h
+++ b/services/camera/libcameraservice/api1/client2/Parameters.h
@@ -269,7 +269,7 @@
~Parameters();
// Sets up default parameters
- status_t initialize(CameraDeviceBase *device, int deviceVersion);
+ status_t initialize(CameraDeviceBase *device);
// Build fast-access device static info from static info
status_t buildFastInfo(CameraDeviceBase *device);
@@ -459,7 +459,6 @@
// Helper function to get the suggested video sizes
Vector<Size> getPreferredVideoSizes() const;
- int mDeviceVersion;
uint8_t mDefaultSceneMode;
};
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
index 5e91501..15df981 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.cpp
@@ -1730,6 +1730,10 @@
return binder::Status::ok();
}
+status_t CameraDeviceClient::setCameraServiceWatchdog(bool enabled) {
+ return mDevice->setCameraServiceWatchdog(enabled);
+}
+
status_t CameraDeviceClient::setRotateAndCropOverride(uint8_t rotateAndCrop) {
if (rotateAndCrop > ANDROID_SCALER_ROTATE_AND_CROP_AUTO) return BAD_VALUE;
diff --git a/services/camera/libcameraservice/api2/CameraDeviceClient.h b/services/camera/libcameraservice/api2/CameraDeviceClient.h
index c5aad6b..45915ba 100644
--- a/services/camera/libcameraservice/api2/CameraDeviceClient.h
+++ b/services/camera/libcameraservice/api2/CameraDeviceClient.h
@@ -206,6 +206,8 @@
virtual status_t stopWatchingTags(int out);
virtual status_t dumpWatchedEventsToVector(std::vector<std::string> &out);
+ virtual status_t setCameraServiceWatchdog(bool enabled);
+
/**
* Device listener interface
*/
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
index 9303fd2..beb655b 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.cpp
@@ -66,6 +66,10 @@
return OK;
}
+status_t CameraOfflineSessionClient::setCameraServiceWatchdog(bool) {
+ return OK;
+}
+
status_t CameraOfflineSessionClient::setRotateAndCropOverride(uint8_t /*rotateAndCrop*/) {
// Since we're not submitting more capture requests, changes to rotateAndCrop override
// make no difference.
diff --git a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
index f2c42d8..9ea1093 100644
--- a/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
+++ b/services/camera/libcameraservice/api2/CameraOfflineSessionClient.h
@@ -84,6 +84,8 @@
bool supportsCameraMute() override;
status_t setCameraMute(bool enabled) override;
+ status_t setCameraServiceWatchdog(bool enabled) override;
+
// permissions management
status_t startCameraOps() override;
status_t finishCameraOps() override;
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.cpp b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
index 49a9760..dc5002b 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.cpp
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.cpp
@@ -64,7 +64,6 @@
clientFeatureId, cameraId, api1CameraId, cameraFacing, sensorOrientation, clientPid,
clientUid, servicePid),
mSharedCameraCallbacks(remoteCallback),
- mDeviceVersion(cameraService->getDeviceVersion(TClientBase::mCameraIdStr)),
mDeviceActive(false), mApi1CameraId(api1CameraId)
{
ALOGI("Camera %s: Opened. Client: %s (PID %d, UID %d)", cameraId.string(),
@@ -423,11 +422,6 @@
}
template <typename TClientBase>
-int Camera2ClientBase<TClientBase>::getCameraDeviceVersion() const {
- return mDeviceVersion;
-}
-
-template <typename TClientBase>
const sp<CameraDeviceBase>& Camera2ClientBase<TClientBase>::getCameraDevice() {
return mDevice;
}
diff --git a/services/camera/libcameraservice/common/Camera2ClientBase.h b/services/camera/libcameraservice/common/Camera2ClientBase.h
index ec33f46..b0d1c3f 100644
--- a/services/camera/libcameraservice/common/Camera2ClientBase.h
+++ b/services/camera/libcameraservice/common/Camera2ClientBase.h
@@ -152,8 +152,6 @@
/** CameraDeviceBase instance wrapping HAL3+ entry */
- const int mDeviceVersion;
-
// Note: This was previously set to const to avoid mDevice being updated -
// b/112639939 (update of sp<> is racy) during dumpDevice (which is important to be lock free
// for debugging purpose). The const has been removed since CameraDeviceBase
diff --git a/services/camera/libcameraservice/common/CameraDeviceBase.h b/services/camera/libcameraservice/common/CameraDeviceBase.h
index 7e2f93c..69514f3 100644
--- a/services/camera/libcameraservice/common/CameraDeviceBase.h
+++ b/services/camera/libcameraservice/common/CameraDeviceBase.h
@@ -449,6 +449,11 @@
virtual status_t setCameraMute(bool enabled) = 0;
/**
+ * Enable/disable camera service watchdog
+ */
+ virtual status_t setCameraServiceWatchdog(bool enabled) = 0;
+
+ /**
* Get the status tracker of the camera device
*/
virtual wp<camera3::StatusTracker> getStatusTracker() = 0;
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.cpp b/services/camera/libcameraservice/common/CameraProviderManager.cpp
index 6edf1ac..6b20a56 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.cpp
+++ b/services/camera/libcameraservice/common/CameraProviderManager.cpp
@@ -21,7 +21,6 @@
#include "CameraProviderManager.h"
#include <aidl/android/hardware/camera/device/ICameraDevice.h>
-#include <android/hardware/camera/device/3.8/ICameraDevice.h>
#include <algorithm>
#include <chrono>
@@ -61,11 +60,6 @@
const float CameraProviderManager::kDepthARTolerance = .1f;
-// AIDL Devices start with major version 1, offset added to bring them up to HIDL.
-const uint16_t kAidlDeviceMajorOffset = 2;
-// AIDL Devices start with minor version 1, offset added to bring them up to HIDL.
-const uint16_t kAidlDeviceMinorOffset = 7;
-
CameraProviderManager::HidlServiceInteractionProxyImpl
CameraProviderManager::sHidlServiceInteractionProxy{};
@@ -280,15 +274,13 @@
return deviceIds;
}
-bool CameraProviderManager::isValidDevice(const std::string &id, uint16_t majorVersion) const {
- std::lock_guard<std::mutex> lock(mInterfaceMutex);
- return isValidDeviceLocked(id, majorVersion);
-}
-
-bool CameraProviderManager::isValidDeviceLocked(const std::string &id, uint16_t majorVersion) const {
+bool CameraProviderManager::isValidDeviceLocked(const std::string &id, uint16_t majorVersion,
+ IPCTransport transport) const {
for (auto& provider : mProviders) {
+ IPCTransport providerTransport = provider->getIPCTransport();
for (auto& deviceInfo : provider->mDevices) {
- if (deviceInfo->mId == id && deviceInfo->mVersion.get_major() == majorVersion) {
+ if (deviceInfo->mId == id && deviceInfo->mVersion.get_major() == majorVersion &&
+ transport == providerTransport) {
return true;
}
}
@@ -369,29 +361,32 @@
return getCameraCharacteristicsLocked(id, overrideForPerfClass, characteristics);
}
-// Till hidl is removed from the android source tree, we use this for aidl as
-// well. We artificially give aidl camera device version 1 a major version 3 and minor
-// version 8.
status_t CameraProviderManager::getHighestSupportedVersion(const std::string &id,
- hardware::hidl_version *v) {
+ hardware::hidl_version *v, IPCTransport *transport) {
+ if (v == nullptr || transport == nullptr) {
+ return BAD_VALUE;
+ }
std::lock_guard<std::mutex> lock(mInterfaceMutex);
hardware::hidl_version maxVersion{0,0};
bool found = false;
+ IPCTransport providerTransport = IPCTransport::INVALID;
for (auto& provider : mProviders) {
for (auto& deviceInfo : provider->mDevices) {
if (deviceInfo->mId == id) {
if (deviceInfo->mVersion > maxVersion) {
maxVersion = deviceInfo->mVersion;
+ providerTransport = provider->getIPCTransport();
found = true;
}
}
}
}
- if (!found) {
+ if (!found || providerTransport == IPCTransport::INVALID) {
return NAME_NOT_FOUND;
}
*v = maxVersion;
+ *transport = providerTransport;
return OK;
}
@@ -613,8 +608,7 @@
std::lock_guard<std::mutex> lock(mInterfaceMutex);
- auto deviceInfo = findDeviceInfoLocked(id,
- /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});
+ auto deviceInfo = findDeviceInfoLocked(id);
if (deviceInfo == nullptr) return NAME_NOT_FOUND;
auto *aidlDeviceInfo3 = static_cast<AidlProviderInfo::AidlDeviceInfo3*>(deviceInfo);
@@ -695,8 +689,7 @@
std::lock_guard<std::mutex> lock(mInterfaceMutex);
- auto deviceInfo = findDeviceInfoLocked(id,
- /*minVersion*/ {3,0}, /*maxVersion*/ {4,0});
+ auto deviceInfo = findDeviceInfoLocked(id);
if (deviceInfo == nullptr) return NAME_NOT_FOUND;
auto *hidlDeviceInfo3 = static_cast<HidlProviderInfo::HidlDeviceInfo3*>(deviceInfo);
@@ -897,9 +890,16 @@
}
CameraProviderManager::ProviderInfo::DeviceInfo* CameraProviderManager::findDeviceInfoLocked(
- const std::string& id,
- hardware::hidl_version minVersion, hardware::hidl_version maxVersion) const {
+ const std::string& id) const {
for (auto& provider : mProviders) {
+ using hardware::hidl_version;
+ IPCTransport transport = provider->getIPCTransport();
+ // AIDL min version starts at major: 1 minor: 1
+ hidl_version minVersion =
+ (transport == IPCTransport::HIDL) ? hidl_version{3, 2} : hidl_version{1, 1} ;
+ hidl_version maxVersion =
+ (transport == IPCTransport::HIDL) ? hidl_version{3, 7} : hidl_version{1000, 0};
+
for (auto& deviceInfo : provider->mDevices) {
if (deviceInfo->mId == id &&
minVersion <= deviceInfo->mVersion && maxVersion >= deviceInfo->mVersion) {
@@ -911,16 +911,13 @@
}
metadata_vendor_id_t CameraProviderManager::getProviderTagIdLocked(
- const std::string& id, hardware::hidl_version minVersion,
- hardware::hidl_version maxVersion) const {
+ const std::string& id) const {
metadata_vendor_id_t ret = CAMERA_METADATA_INVALID_VENDOR_ID;
std::lock_guard<std::mutex> lock(mInterfaceMutex);
for (auto& provider : mProviders) {
for (auto& deviceInfo : provider->mDevices) {
- if (deviceInfo->mId == id &&
- minVersion <= deviceInfo->mVersion &&
- maxVersion >= deviceInfo->mVersion) {
+ if (deviceInfo->mId == id) {
return provider->mProviderTagid;
}
}
@@ -1274,9 +1271,14 @@
status_t CameraProviderManager::ProviderInfo::DeviceInfo3::fixupMonochromeTags() {
status_t res = OK;
auto& c = mCameraCharacteristics;
-
+ sp<ProviderInfo> parentProvider = mParentProvider.promote();
+ if (parentProvider == nullptr) {
+ return DEAD_OBJECT;
+ }
+ IPCTransport ipcTransport = parentProvider->getIPCTransport();
// Override static metadata for MONOCHROME camera with older device version
- if (mVersion.get_major() == 3 && mVersion.get_minor() < 5) {
+ if (ipcTransport == IPCTransport::HIDL &&
+ (mVersion.get_major() == 3 && mVersion.get_minor() < 5)) {
camera_metadata_entry cap = c.find(ANDROID_REQUEST_AVAILABLE_CAPABILITIES);
for (size_t i = 0; i < cap.count; i++) {
if (cap.data.u8[i] == ANDROID_REQUEST_AVAILABLE_CAPABILITIES_MONOCHROME) {
@@ -1405,6 +1407,27 @@
return res;
}
+status_t CameraProviderManager::ProviderInfo::DeviceInfo3::addReadoutTimestampTag(
+ bool readoutTimestampSupported) {
+ status_t res = OK;
+ auto& c = mCameraCharacteristics;
+
+ auto entry = c.find(ANDROID_SENSOR_READOUT_TIMESTAMP);
+ if (entry.count != 0) {
+ ALOGE("%s: CameraCharacteristics must not contain ANDROID_SENSOR_READOUT_TIMESTAMP!",
+ __FUNCTION__);
+ }
+
+ uint8_t readoutTimestamp = ANDROID_SENSOR_READOUT_TIMESTAMP_NOT_SUPPORTED;
+ if (readoutTimestampSupported) {
+ readoutTimestamp = ANDROID_SENSOR_READOUT_TIMESTAMP_HARDWARE;
+ }
+
+ res = c.update(ANDROID_SENSOR_READOUT_TIMESTAMP, &readoutTimestamp, 1);
+
+ return res;
+}
+
status_t CameraProviderManager::ProviderInfo::DeviceInfo3::removeAvailableKeys(
CameraMetadata& c, const std::vector<uint32_t>& keys, uint32_t keyTag) {
status_t res = OK;
@@ -1636,6 +1659,7 @@
}
for (auto& provider : mProviders) {
+ IPCTransport transport = provider->getIPCTransport();
for (auto& deviceInfo : provider->mDevices) {
std::vector<std::string> physicalIds;
if (deviceInfo->mIsLogicalCamera) {
@@ -1643,7 +1667,8 @@
cameraId) != deviceInfo->mPhysicalIds.end()) {
int deviceVersion = HARDWARE_DEVICE_API_VERSION(
deviceInfo->mVersion.get_major(), deviceInfo->mVersion.get_minor());
- if (deviceVersion < CAMERA_DEVICE_API_VERSION_3_5) {
+ if (transport == IPCTransport::HIDL &&
+ deviceVersion < CAMERA_DEVICE_API_VERSION_3_5) {
ALOGE("%s: Wrong deviceVersion %x for hiddenPhysicalCameraId %s",
__FUNCTION__, deviceVersion, cameraId.c_str());
return falseRet;
@@ -1861,39 +1886,49 @@
uint16_t major, minor;
std::string type, id;
+ IPCTransport transport = getIPCTransport();
status_t res = parseDeviceName(name, &major, &minor, &type, &id);
if (res != OK) {
return res;
}
- if (getIPCTransport() == IPCTransport::AIDL) {
- // Till HIDL support exists, map AIDL versions to HIDL.
- // TODO:b/196432585 Explore if we can get rid of this.
- major += kAidlDeviceMajorOffset;
- minor += kAidlDeviceMinorOffset;
- }
if (type != mType) {
ALOGE("%s: Device type %s does not match provider type %s", __FUNCTION__,
type.c_str(), mType.c_str());
return BAD_VALUE;
}
- if (mManager->isValidDeviceLocked(id, major)) {
+ if (mManager->isValidDeviceLocked(id, major, transport)) {
ALOGE("%s: Device %s: ID %s is already in use for device major version %d", __FUNCTION__,
name.c_str(), id.c_str(), major);
return BAD_VALUE;
}
std::unique_ptr<DeviceInfo> deviceInfo;
- switch (major) {
- case 3:
- deviceInfo = initializeDeviceInfo(name, mProviderTagid, id, minor);
+ switch (transport) {
+ case IPCTransport::HIDL:
+ switch (major) {
+ case 3:
+ break;
+ default:
+ ALOGE("%s: Device %s: Unsupported HIDL device HAL major version %d:",
+ __FUNCTION__, name.c_str(), major);
+ return BAD_VALUE;
+ }
+ break;
+ case IPCTransport::AIDL:
+ if (major != 1) {
+ ALOGE("%s: Device %s: Unsupported AIDL device HAL major version %d:", __FUNCTION__,
+ name.c_str(), major);
+ return BAD_VALUE;
+ }
break;
default:
- ALOGE("%s: Device %s: Unsupported IDL device HAL major version %d:", __FUNCTION__,
- name.c_str(), major);
+ ALOGE("%s Invalid transport %d", __FUNCTION__, transport);
return BAD_VALUE;
}
+
+ deviceInfo = initializeDeviceInfo(name, mProviderTagid, id, minor);
if (deviceInfo == nullptr) return BAD_VALUE;
deviceInfo->notifyDeviceStateChange(getDeviceState());
deviceInfo->mStatus = initialStatus;
@@ -2675,7 +2710,7 @@
status_t CameraProviderManager::getCameraCharacteristicsLocked(const std::string &id,
bool overrideForPerfClass, CameraMetadata* characteristics) const {
- auto deviceInfo = findDeviceInfoLocked(id, /*minVersion*/ {3, 0}, /*maxVersion*/ {5, 0});
+ auto deviceInfo = findDeviceInfoLocked(id);
if (deviceInfo != nullptr) {
return deviceInfo->getCameraCharacteristics(overrideForPerfClass, characteristics);
}
diff --git a/services/camera/libcameraservice/common/CameraProviderManager.h b/services/camera/libcameraservice/common/CameraProviderManager.h
index 3a366e5..d049aff 100644
--- a/services/camera/libcameraservice/common/CameraProviderManager.h
+++ b/services/camera/libcameraservice/common/CameraProviderManager.h
@@ -41,7 +41,6 @@
#include <android/hardware/camera/provider/2.6/ICameraProvider.h>
#include <android/hardware/camera/provider/2.7/ICameraProvider.h>
#include <android/hardware/camera/device/3.7/types.h>
-#include <android/hardware/camera/device/3.8/types.h>
#include <android/hidl/manager/1.0/IServiceNotification.h>
#include <binder/IServiceManager.h>
#include <camera/VendorTagDescriptor.h>
@@ -96,7 +95,6 @@
#define CAMERA_DEVICE_API_VERSION_3_5 HARDWARE_DEVICE_API_VERSION(3, 5)
#define CAMERA_DEVICE_API_VERSION_3_6 HARDWARE_DEVICE_API_VERSION(3, 6)
#define CAMERA_DEVICE_API_VERSION_3_7 HARDWARE_DEVICE_API_VERSION(3, 7)
-#define CAMERA_DEVICE_API_VERSION_3_8 HARDWARE_DEVICE_API_VERSION(3, 8)
/**
* The vendor tag descriptor class that takes HIDL/AIDL vendor tag information as
@@ -233,11 +231,6 @@
std::vector<std::string> getAPI1CompatibleCameraDeviceIds() const;
/**
- * Return true if a device with a given ID and major version exists
- */
- bool isValidDevice(const std::string &id, uint16_t majorVersion) const;
-
- /**
* Return true if a device with a given ID has a flash unit. Returns false
* for devices that are unknown.
*/
@@ -286,7 +279,7 @@
* Return the highest supported device interface version for this ID
*/
status_t getHighestSupportedVersion(const std::string &id,
- hardware::hidl_version *v);
+ hardware::hidl_version *v, IPCTransport *transport);
/**
* Check if a given camera device support setTorchMode API.
@@ -392,9 +385,7 @@
/*
* Return provider type for a specific device.
*/
- metadata_vendor_id_t getProviderTagIdLocked(const std::string& id,
- hardware::hidl_version minVersion = hardware::hidl_version{0,0},
- hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
+ metadata_vendor_id_t getProviderTagIdLocked(const std::string& id) const;
/*
* Check if a camera is a logical camera. And if yes, return
@@ -672,6 +663,7 @@
status_t deriveHeicTags(bool maxResolution = false);
status_t addRotateCropTags();
status_t addPreCorrectionActiveArraySize();
+ status_t addReadoutTimestampTag(bool readoutTimestampSupported = true);
static void getSupportedSizes(const CameraMetadata& ch, uint32_t tag,
android_pixel_format_t format,
@@ -796,12 +788,8 @@
// Utility to find a DeviceInfo by ID; pointer is only valid while mInterfaceMutex is held
// and the calling code doesn't mutate the list of providers or their lists of devices.
- // Finds the first device of the given ID that falls within the requested version range
- // minVersion <= deviceVersion < maxVersion
// No guarantees on the order of traversal
- ProviderInfo::DeviceInfo* findDeviceInfoLocked(const std::string& id,
- hardware::hidl_version minVersion = hardware::hidl_version{0,0},
- hardware::hidl_version maxVersion = hardware::hidl_version{1000,0}) const;
+ ProviderInfo::DeviceInfo* findDeviceInfoLocked(const std::string& id) const;
// Map external providers to USB devices in order to handle USB hotplug
// events for lazy HALs
@@ -828,7 +816,8 @@
status_t removeProvider(const std::string& provider);
sp<StatusListener> getStatusListener() const;
- bool isValidDeviceLocked(const std::string &id, uint16_t majorVersion) const;
+ bool isValidDeviceLocked(const std::string &id, uint16_t majorVersion,
+ IPCTransport transport) const;
size_t mProviderInstanceId = 0;
std::vector<sp<ProviderInfo>> mProviders;
diff --git a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
index 81b4779..ef68f28 100644
--- a/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/aidl/AidlProviderInfo.cpp
@@ -532,6 +532,11 @@
ALOGE("%s: Unable to override zoomRatio related tags: %s (%d)",
__FUNCTION__, strerror(-res), res);
}
+ res = addReadoutTimestampTag();
+ if (OK != res) {
+ ALOGE("%s: Unable to add sensorReadoutTimestamp tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ }
camera_metadata_entry flashAvailable =
mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
diff --git a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
index b3cce1c..d60565f 100644
--- a/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
+++ b/services/camera/libcameraservice/common/hidl/HidlProviderInfo.cpp
@@ -27,7 +27,6 @@
#include <utils/Trace.h>
#include <android/hardware/camera/device/3.7/ICameraDevice.h>
-#include <android/hardware/camera/device/3.8/ICameraDevice.h>
namespace {
const bool kEnableLazyHal(property_get_bool("ro.camera.enableLazyHal", false));
@@ -656,6 +655,11 @@
ALOGE("%s: Unable to override zoomRatio related tags: %s (%d)",
__FUNCTION__, strerror(-res), res);
}
+ res = addReadoutTimestampTag(/*readoutTimestampSupported*/false);
+ if (OK != res) {
+ ALOGE("%s: Unable to add sensorReadoutTimestamp tag: %s (%d)",
+ __FUNCTION__, strerror(-res), res);
+ }
camera_metadata_entry flashAvailable =
mCameraCharacteristics.find(ANDROID_FLASH_INFO_AVAILABLE);
@@ -766,74 +770,14 @@
}
status_t HidlProviderInfo::HidlDeviceInfo3::turnOnTorchWithStrengthLevel(
- int32_t torchStrength) {
- const sp<hardware::camera::device::V3_2::ICameraDevice> interface = startDeviceInterface();
- if (interface == nullptr) {
- return DEAD_OBJECT;
- }
- sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = nullptr;
- auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
- if (castResult_3_8.isOk()) {
- interface_3_8 = castResult_3_8;
- }
-
- if (interface_3_8 == nullptr) {
- return INVALID_OPERATION;
- }
-
- Status s = interface_3_8->turnOnTorchWithStrengthLevel(torchStrength);
- if (s == Status::OK) {
- mTorchStrengthLevel = torchStrength;
- }
- return mapToStatusT(s);
+ int32_t /*torchStrengthLevel*/) {
+ ALOGE("%s HIDL does not support turning on torch with variable strength", __FUNCTION__);
+ return INVALID_OPERATION;
}
-status_t HidlProviderInfo::HidlDeviceInfo3::getTorchStrengthLevel(int32_t *torchStrength) {
- if (torchStrength == nullptr) {
- return BAD_VALUE;
- }
- const sp<hardware::camera::device::V3_2::ICameraDevice> interface = startDeviceInterface();
- if (interface == nullptr) {
- return DEAD_OBJECT;
- }
- auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
- sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = nullptr;
- if (castResult_3_8.isOk()) {
- interface_3_8 = castResult_3_8;
- }
-
- if (interface_3_8 == nullptr) {
- return INVALID_OPERATION;
- }
-
- Status callStatus;
- status_t res;
- hardware::Return<void> ret = interface_3_8->getTorchStrengthLevel([&callStatus, &torchStrength]
- (Status status, const int32_t& torchStrengthLevel) {
- callStatus = status;
- if (status == Status::OK) {
- *torchStrength = torchStrengthLevel;
- } });
-
- if (ret.isOk()) {
- switch (callStatus) {
- case Status::OK:
- // Expected case, do nothing.
- res = OK;
- break;
- case Status::METHOD_NOT_SUPPORTED:
- res = INVALID_OPERATION;
- break;
- default:
- ALOGE("%s: Get torch strength level failed: %d", __FUNCTION__, callStatus);
- res = UNKNOWN_ERROR;
- }
- } else {
- ALOGE("%s: Unexpected binder error: %s", __FUNCTION__, ret.description().c_str());
- res = UNKNOWN_ERROR;
- }
-
- return res;
+status_t HidlProviderInfo::HidlDeviceInfo3::getTorchStrengthLevel(int32_t * /*torchStrength*/) {
+ ALOGE("%s HIDL does not support variable torch strength level", __FUNCTION__);
+ return INVALID_OPERATION;
}
sp<hardware::camera::device::V3_2::ICameraDevice>
@@ -888,11 +832,11 @@
const SessionConfiguration &configuration, bool overrideForPerfClass,
metadataGetter getMetadata, bool *status) {
- hardware::camera::device::V3_8::StreamConfiguration streamConfiguration;
+ hardware::camera::device::V3_7::StreamConfiguration configuration_3_7;
bool earlyExit = false;
auto bRes = SessionConfigurationUtils::convertToHALStreamCombination(configuration,
String8(mId.c_str()), mCameraCharacteristics, getMetadata, mPhysicalIds,
- streamConfiguration, overrideForPerfClass, &earlyExit);
+ configuration_3_7, overrideForPerfClass, &earlyExit);
if (!bRes.isOk()) {
return UNKNOWN_ERROR;
@@ -914,8 +858,6 @@
sp<hardware::camera::device::V3_5::ICameraDevice> interface_3_5 = castResult_3_5;
auto castResult_3_7 = device::V3_7::ICameraDevice::castFrom(interface);
sp<hardware::camera::device::V3_7::ICameraDevice> interface_3_7 = castResult_3_7;
- auto castResult_3_8 = device::V3_8::ICameraDevice::castFrom(interface);
- sp<hardware::camera::device::V3_8::ICameraDevice> interface_3_8 = castResult_3_8;
status_t res;
Status callStatus;
@@ -925,27 +867,11 @@
callStatus = s;
*status = combStatus;
};
- if (interface_3_8 != nullptr) {
- ret = interface_3_8->isStreamCombinationSupported_3_8(streamConfiguration, halCb);
- } else if (interface_3_7 != nullptr) {
- hardware::camera::device::V3_7::StreamConfiguration configuration_3_7;
- bool success = SessionConfigurationUtils::convertHALStreamCombinationFromV38ToV37(
- configuration_3_7, streamConfiguration);
- if (!success) {
- *status = false;
- return OK;
- }
+ if (interface_3_7 != nullptr) {
ret = interface_3_7->isStreamCombinationSupported_3_7(configuration_3_7, halCb);
} else if (interface_3_5 != nullptr) {
- hardware::camera::device::V3_7::StreamConfiguration configuration_3_7;
- bool success = SessionConfigurationUtils::convertHALStreamCombinationFromV38ToV37(
- configuration_3_7, streamConfiguration);
- if (!success) {
- *status = false;
- return OK;
- }
hardware::camera::device::V3_4::StreamConfiguration configuration_3_4;
- success = SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
+ bool success = SessionConfigurationUtils::convertHALStreamCombinationFromV37ToV34(
configuration_3_4, configuration_3_7);
if (!success) {
*status = false;
@@ -988,7 +914,7 @@
status_t res = OK;
for (auto &cameraIdAndSessionConfig : cameraIdsAndSessionConfigs) {
const std::string& cameraId = cameraIdAndSessionConfig.mCameraId;
- hardware::camera::device::V3_8::StreamConfiguration streamConfiguration;
+ hardware::camera::device::V3_7::StreamConfiguration streamConfiguration;
CameraMetadata deviceInfo;
bool overrideForPerfClass =
SessionConfigurationUtils::targetPerfClassPrimaryCamera(
@@ -1022,8 +948,7 @@
}
CameraIdAndStreamCombination halCameraIdAndStream;
halCameraIdAndStream.cameraId = cameraId;
- SessionConfigurationUtils::convertHALStreamCombinationFromV38ToV37(
- halCameraIdAndStream.streamConfiguration, streamConfiguration);
+ halCameraIdAndStream.streamConfiguration = streamConfiguration;
halCameraIdsAndStreamsV.push_back(halCameraIdAndStream);
}
*halCameraIdsAndStreamCombinations = halCameraIdsAndStreamsV;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.cpp b/services/camera/libcameraservice/device3/Camera3Device.cpp
index dec439f..7f83849 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.cpp
+++ b/services/camera/libcameraservice/device3/Camera3Device.cpp
@@ -113,10 +113,6 @@
status_t Camera3Device::initializeCommonLocked() {
- /** Start watchdog thread */
- mCameraServiceWatchdog = new CameraServiceWatchdog();
- mCameraServiceWatchdog->run("CameraServiceWatchdog");
-
/** Start up status tracker thread */
mStatusTracker = new StatusTracker(this);
status_t res = mStatusTracker->run(String8::format("C3Dev-%s-Status", mId.string()).string());
@@ -230,6 +226,10 @@
// Hidl/AidlCamera3DeviceInjectionMethods
mInjectionMethods = createCamera3DeviceInjectionMethods(this);
+ /** Start watchdog thread */
+ mCameraServiceWatchdog = new CameraServiceWatchdog();
+ mCameraServiceWatchdog->run("CameraServiceWatchdog");
+
return OK;
}
@@ -4091,6 +4091,17 @@
}
}
+status_t Camera3Device::setCameraServiceWatchdog(bool enabled) {
+ Mutex::Autolock il(mInterfaceLock);
+ Mutex::Autolock l(mLock);
+
+ if (mCameraServiceWatchdog != NULL) {
+ mCameraServiceWatchdog->setEnabled(enabled);
+ }
+
+ return OK;
+}
+
void Camera3Device::RequestThread::cleanUpFailedRequests(bool sendRequestError) {
if (mNextRequests.empty()) {
return;
diff --git a/services/camera/libcameraservice/device3/Camera3Device.h b/services/camera/libcameraservice/device3/Camera3Device.h
index f927b4d..3c5cb78 100644
--- a/services/camera/libcameraservice/device3/Camera3Device.h
+++ b/services/camera/libcameraservice/device3/Camera3Device.h
@@ -281,6 +281,11 @@
*/
status_t setCameraMute(bool enabled);
+ /**
+ * Enables/disables camera service watchdog
+ */
+ status_t setCameraServiceWatchdog(bool enabled);
+
// Get the status trackeer for the camera device
wp<camera3::StatusTracker> getStatusTracker() { return mStatusTracker; }
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
index d2167e3..fd00284 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.cpp
@@ -66,6 +66,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(0),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -99,6 +100,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(0),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -139,6 +141,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(consumerUsage),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -187,6 +190,7 @@
mTraceFirstBuffer(true),
mUseBufferManager(false),
mTimestampOffset(timestampOffset),
+ mUseReadoutTime(false),
mConsumerUsage(consumerUsage),
mDropBuffers(false),
mMirrorMode(mirrorMode),
@@ -459,17 +463,19 @@
}
}
+ nsecs_t captureTime = (mUseReadoutTime && readoutTimestamp != 0 ?
+ readoutTimestamp : timestamp) - mTimestampOffset;
if (mPreviewFrameSpacer != nullptr) {
- res = mPreviewFrameSpacer->queuePreviewBuffer(timestamp - mTimestampOffset,
- readoutTimestamp - mTimestampOffset, transform, anwBuffer, anwReleaseFence);
+ nsecs_t readoutTime = (readoutTimestamp != 0 ? readoutTimestamp : timestamp)
+ - mTimestampOffset;
+ res = mPreviewFrameSpacer->queuePreviewBuffer(captureTime, readoutTime,
+ transform, anwBuffer, anwReleaseFence);
if (res != OK) {
ALOGE("%s: Stream %d: Error queuing buffer to preview buffer spacer: %s (%d)",
__FUNCTION__, mId, strerror(-res), res);
return res;
}
} else {
- nsecs_t captureTime = (mSyncToDisplay ? readoutTimestamp : timestamp)
- - mTimestampOffset;
nsecs_t presentTime = mSyncToDisplay ?
syncTimestampToDisplayLocked(captureTime) : captureTime;
@@ -702,12 +708,16 @@
mFrameCount = 0;
mLastTimestamp = 0;
+ mUseReadoutTime =
+ (timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR || mSyncToDisplay);
+
if (isDeviceTimeBaseRealtime()) {
if (isDefaultTimeBase && !isConsumedByHWComposer() && !isVideoStream()) {
// Default time base, but not hardware composer or video encoder
mTimestampOffset = 0;
} else if (timestampBase == OutputConfiguration::TIMESTAMP_BASE_REALTIME ||
- timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR) {
+ timestampBase == OutputConfiguration::TIMESTAMP_BASE_SENSOR ||
+ timestampBase == OutputConfiguration::TIMESTAMP_BASE_READOUT_SENSOR) {
mTimestampOffset = 0;
}
// If timestampBase is CHOREOGRAPHER SYNCED or MONOTONIC, leave
@@ -717,7 +727,7 @@
// Reverse offset for monotonicTime -> bootTime
mTimestampOffset = -mTimestampOffset;
} else {
- // If timestampBase is DEFAULT, MONOTONIC, SENSOR, or
+ // If timestampBase is DEFAULT, MONOTONIC, SENSOR, READOUT_SENSOR or
// CHOREOGRAPHER_SYNCED, timestamp offset is 0.
mTimestampOffset = 0;
}
diff --git a/services/camera/libcameraservice/device3/Camera3OutputStream.h b/services/camera/libcameraservice/device3/Camera3OutputStream.h
index 5f2831f..4ab052b 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputStream.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputStream.h
@@ -341,6 +341,11 @@
nsecs_t mTimestampOffset;
/**
+ * If camera readout time is used rather than the start-of-exposure time.
+ */
+ bool mUseReadoutTime;
+
+ /**
* Consumer end point usage flag set by the constructor for the deferred
* consumer case.
*/
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
index ed66df0..f4e3fad 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.cpp
@@ -787,10 +787,12 @@
SessionStatsBuilder& sessionStatsBuilder) {
bool timestampIncreasing =
!((request.zslCapture && request.stillCapture) || request.hasInputBuffer);
+ nsecs_t readoutTimestamp = request.resultExtras.hasReadoutTimestamp ?
+ request.resultExtras.readoutTimestamp : 0;
returnOutputBuffers(useHalBufManager, listener,
request.pendingOutputBuffers.array(),
request.pendingOutputBuffers.size(),
- request.shutterTimestamp, request.shutterReadoutTimestamp,
+ request.shutterTimestamp, readoutTimestamp,
/*requested*/true, request.requestTimeNs, sessionStatsBuilder, timestampIncreasing,
request.outputSurfaces, request.resultExtras,
request.errorBufStrategy, request.transform);
@@ -852,7 +854,10 @@
}
r.shutterTimestamp = msg.timestamp;
- r.shutterReadoutTimestamp = msg.readout_timestamp;
+ if (msg.readout_timestamp_valid) {
+ r.resultExtras.hasReadoutTimestamp = true;
+ r.resultExtras.readoutTimestamp = msg.readout_timestamp;
+ }
if (r.minExpectedDuration != states.minFrameDuration) {
for (size_t i = 0; i < states.outputStreams.size(); i++) {
auto outputStream = states.outputStreams[i];
diff --git a/services/camera/libcameraservice/device3/Camera3OutputUtils.h b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
index dd01408..d6107c2 100644
--- a/services/camera/libcameraservice/device3/Camera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/Camera3OutputUtils.h
@@ -26,8 +26,6 @@
#include <common/CameraDeviceBase.h>
-#include <android/hardware/camera/device/3.8/ICameraDeviceCallback.h>
-
#include "device3/BufferUtils.h"
#include "device3/DistortionMapper.h"
#include "device3/ZoomRatioMapper.h"
diff --git a/services/camera/libcameraservice/device3/InFlightRequest.h b/services/camera/libcameraservice/device3/InFlightRequest.h
index 493a9e2..fa00495 100644
--- a/services/camera/libcameraservice/device3/InFlightRequest.h
+++ b/services/camera/libcameraservice/device3/InFlightRequest.h
@@ -65,6 +65,7 @@
typedef struct camera_shutter_msg {
uint32_t frame_number;
uint64_t timestamp;
+ bool readout_timestamp_valid;
uint64_t readout_timestamp;
} camera_shutter_msg_t;
@@ -104,8 +105,6 @@
struct InFlightRequest {
// Set by notify() SHUTTER call.
nsecs_t shutterTimestamp;
- // Set by notify() SHUTTER call with readout time.
- nsecs_t shutterReadoutTimestamp;
// Set by process_capture_result().
nsecs_t sensorTimestamp;
int requestStatus;
diff --git a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
index 02eebd2..b2accc1 100644
--- a/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/aidl/AidlCamera3OutputUtils.cpp
@@ -110,6 +110,7 @@
m.type = CAMERA_MSG_SHUTTER;
m.message.shutter.frame_number = msg.get<Tag::shutter>().frameNumber;
m.message.shutter.timestamp = msg.get<Tag::shutter>().timestamp;
+ m.message.shutter.readout_timestamp_valid = true;
m.message.shutter.readout_timestamp = msg.get<Tag::shutter>().readoutTimestamp;
break;
}
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
index 6d76687..4bb426c 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.cpp
@@ -65,16 +65,9 @@
using namespace android::hardware::camera;
using namespace android::hardware::camera::device::V3_2;
using android::hardware::camera::metadata::V3_6::CameraMetadataEnumAndroidSensorPixelMode;
-using android::hardware::camera::metadata::V3_8::CameraMetadataEnumAndroidScalerAvailableStreamUseCases;
namespace android {
-CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap
-HidlCamera3Device::mapToHidlDynamicProfile(int64_t dynamicRangeProfile) {
- return static_cast<CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap>(
- dynamicRangeProfile);
-}
-
hardware::graphics::common::V1_0::PixelFormat HidlCamera3Device::mapToPixelFormat(
int frameworkFormat) {
return (hardware::graphics::common::V1_0::PixelFormat) frameworkFormat;
@@ -285,7 +278,8 @@
// Metadata tags needs fixup for monochrome camera device version less
// than 3.5.
hardware::hidl_version maxVersion{0,0};
- res = manager->getHighestSupportedVersion(mId.string(), &maxVersion);
+ IPCTransport transport = IPCTransport::HIDL;
+ res = manager->getHighestSupportedVersion(mId.string(), &maxVersion, &transport);
if (res != OK) {
ALOGE("%s: Error in getting camera device version id: %s (%d)",
__FUNCTION__, strerror(-res), res);
@@ -446,11 +440,6 @@
return notifyHelper<hardware::camera::device::V3_2::NotifyMsg>(msgs);
}
-hardware::Return<void> HidlCamera3Device::notify_3_8(
- const hardware::hidl_vec<hardware::camera::device::V3_8::NotifyMsg>& msgs) {
- return notifyHelper<hardware::camera::device::V3_8::NotifyMsg>(msgs);
-}
-
template<typename NotifyMsgType>
hardware::Return<void> HidlCamera3Device::notifyHelper(
const hardware::hidl_vec<NotifyMsgType>& msgs) {
@@ -735,10 +724,6 @@
mRequestMetadataQueue(queue) {
// Check with hardware service manager if we can downcast these interfaces
// Somewhat expensive, so cache the results at startup
- auto castResult_3_8 = device::V3_8::ICameraDeviceSession::castFrom(mHidlSession);
- if (castResult_3_8.isOk()) {
- mHidlSession_3_8 = castResult_3_8;
- }
auto castResult_3_7 = device::V3_7::ICameraDeviceSession::castFrom(mHidlSession);
if (castResult_3_7.isOk()) {
mHidlSession_3_7 = castResult_3_7;
@@ -766,7 +751,6 @@
}
void HidlCamera3Device::HidlHalInterface::clear() {
- mHidlSession_3_8.clear();
mHidlSession_3_7.clear();
mHidlSession_3_6.clear();
mHidlSession_3_5.clear();
@@ -905,16 +889,13 @@
device::V3_2::StreamConfiguration requestedConfiguration3_2;
device::V3_4::StreamConfiguration requestedConfiguration3_4;
device::V3_7::StreamConfiguration requestedConfiguration3_7;
- device::V3_8::StreamConfiguration requestedConfiguration3_8;
requestedConfiguration3_2.streams.resize(config->num_streams);
requestedConfiguration3_4.streams.resize(config->num_streams);
requestedConfiguration3_7.streams.resize(config->num_streams);
- requestedConfiguration3_8.streams.resize(config->num_streams);
for (size_t i = 0; i < config->num_streams; i++) {
device::V3_2::Stream &dst3_2 = requestedConfiguration3_2.streams[i];
device::V3_4::Stream &dst3_4 = requestedConfiguration3_4.streams[i];
device::V3_7::Stream &dst3_7 = requestedConfiguration3_7.streams[i];
- device::V3_8::Stream &dst3_8 = requestedConfiguration3_8.streams[i];
camera3::camera_stream_t *src = config->streams[i];
Camera3Stream* cam3stream = Camera3Stream::cast(src);
@@ -963,23 +944,17 @@
dst3_7.sensorPixelModesUsed[j++] =
static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
}
- if ((src->dynamic_range_profile !=
- ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) &&
- (mHidlSession_3_8 == nullptr)) {
+ if (src->dynamic_range_profile !=
+ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
ALOGE("%s: Camera device doesn't support non-standard dynamic range profiles: %" PRIx64,
__FUNCTION__, src->dynamic_range_profile);
return BAD_VALUE;
}
- if (src->use_case != ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT &&
- mHidlSession_3_8 == nullptr) {
+ if (src->use_case != ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT) {
ALOGE("%s: Camera device doesn't support non-default stream use case %" PRId64 "!",
__FUNCTION__, src->use_case);
return BAD_VALUE;
}
- dst3_8.v3_7 = dst3_7;
- dst3_8.dynamicRangeProfile = mapToHidlDynamicProfile(src->dynamic_range_profile);
- dst3_8.useCase =
- static_cast<CameraMetadataEnumAndroidScalerAvailableStreamUseCases>(src->use_case);
activeStreams.insert(streamId);
// Create Buffer ID map if necessary
mBufferRecords.tryCreateBufferCache(streamId);
@@ -1004,10 +979,6 @@
requestedConfiguration3_7.sessionParams.setToExternal(
reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
sessionParamSize);
- requestedConfiguration3_8.operationMode = operationMode;
- requestedConfiguration3_8.sessionParams.setToExternal(
- reinterpret_cast<uint8_t*>(const_cast<camera_metadata_t*>(sessionParams)),
- sessionParamSize);
// Invoke configureStreams
device::V3_3::HalStreamConfiguration finalConfiguration;
@@ -1053,18 +1024,7 @@
return OK;
};
- // See which version of HAL we have
- if (mHidlSession_3_8 != nullptr) {
- ALOGV("%s: v3.8 device found", __FUNCTION__);
- requestedConfiguration3_8.streamConfigCounter = mNextStreamConfigCounter++;
- requestedConfiguration3_8.multiResolutionInputImage = config->input_is_multi_resolution;
- auto err = mHidlSession_3_8->configureStreams_3_8(requestedConfiguration3_8,
- configStream36Cb);
- res = postprocConfigStream36(err);
- if (res != OK) {
- return res;
- }
- } else if (mHidlSession_3_7 != nullptr) {
+ if (mHidlSession_3_7 != nullptr) {
ALOGV("%s: v3.7 device found", __FUNCTION__);
requestedConfiguration3_7.streamConfigCounter = mNextStreamConfigCounter++;
requestedConfiguration3_7.multiResolutionInputImage = config->input_is_multi_resolution;
@@ -1474,15 +1434,10 @@
return OK;
}
-status_t HidlCamera3Device::HidlHalInterface::repeatingRequestEnd(uint32_t frameNumber,
- const std::vector<int32_t> &streamIds) {
+status_t HidlCamera3Device::HidlHalInterface::repeatingRequestEnd(uint32_t /*frameNumber*/,
+ const std::vector<int32_t> &/*streamIds*/) {
ATRACE_NAME("CameraHal::repeatingRequestEnd");
- if (!valid()) return INVALID_OPERATION;
-
- if (mHidlSession_3_8.get() != nullptr) {
- mHidlSession_3_8->repeatingRequestEnd(frameNumber, streamIds);
- }
- return OK;
+ return INVALID_OPERATION;
}
status_t HidlCamera3Device::HidlHalInterface::close() {
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.h b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.h
index 56c999a..2e98fe0 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.h
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3Device.h
@@ -22,13 +22,12 @@
namespace android {
-using android::hardware::camera::metadata::V3_8::CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap;
/**
* CameraDevice for HIDL HAL devices with version CAMERA_DEVICE_API_VERSION_3_0 or higher.
*/
class HidlCamera3Device :
- virtual public hardware::camera::device::V3_8::ICameraDeviceCallback,
+ virtual public hardware::camera::device::V3_5::ICameraDeviceCallback,
public Camera3Device {
public:
@@ -44,8 +43,6 @@
static hardware::camera::device::V3_2::DataspaceFlags mapToHidlDataspace(
android_dataspace dataSpace);
static hardware::camera::device::V3_2::BufferUsageFlags mapToConsumerUsage(uint64_t usage);
- static CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap mapToHidlDynamicProfile(
- int64_t dynamicRangeProfile);
static hardware::camera::device::V3_2::StreamRotation mapToStreamRotation(
camera_stream_rotation_t rotation);
// Returns a negative error code if the passed-in operation mode is not valid.
@@ -84,10 +81,6 @@
const hardware::hidl_vec<
hardware::camera::device::V3_2::StreamBuffer>& buffers) override;
- hardware::Return<void> notify_3_8(
- const hardware::hidl_vec<
- hardware::camera::device::V3_8::NotifyMsg>& msgs) override;
-
// Handle one notify message
void notify(const hardware::camera::device::V3_2::NotifyMsg& msg);
@@ -164,8 +157,6 @@
sp<hardware::camera::device::V3_6::ICameraDeviceSession> mHidlSession_3_6;
// Valid if ICameraDeviceSession is @3.7 or newer
sp<hardware::camera::device::V3_7::ICameraDeviceSession> mHidlSession_3_7;
- // Valid if ICameraDeviceSession is @3.7 or newer
- sp<hardware::camera::device::V3_8::ICameraDeviceSession> mHidlSession_3_8;
std::shared_ptr<RequestMetadataQueue> mRequestMetadataQueue;
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp
index 60e4e42..ff6fc17 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.cpp
@@ -63,29 +63,7 @@
}
void notify(CaptureOutputStates& states,
- const hardware::camera::device::V3_8::NotifyMsg& msg) {
- using android::hardware::camera::device::V3_2::MsgType;
-
- hardware::camera::device::V3_2::NotifyMsg msg_3_2;
- msg_3_2.type = msg.type;
- bool hasReadoutTime = false;
- uint64_t readoutTime = 0;
- switch (msg.type) {
- case MsgType::ERROR:
- msg_3_2.msg.error = msg.msg.error;
- break;
- case MsgType::SHUTTER:
- msg_3_2.msg.shutter = msg.msg.shutter.v3_2;
- hasReadoutTime = true;
- readoutTime = msg.msg.shutter.readoutTimestamp;
- break;
- }
- notify(states, msg_3_2, hasReadoutTime, readoutTime);
-}
-
-void notify(CaptureOutputStates& states,
- const hardware::camera::device::V3_2::NotifyMsg& msg,
- bool hasReadoutTime, uint64_t readoutTime) {
+ const hardware::camera::device::V3_2::NotifyMsg& msg) {
using android::hardware::camera::device::V3_2::MsgType;
using android::hardware::camera::device::V3_2::ErrorCode;
@@ -127,8 +105,8 @@
m.type = CAMERA_MSG_SHUTTER;
m.message.shutter.frame_number = msg.msg.shutter.frameNumber;
m.message.shutter.timestamp = msg.msg.shutter.timestamp;
- m.message.shutter.readout_timestamp = hasReadoutTime ?
- readoutTime : m.message.shutter.timestamp;
+ m.message.shutter.readout_timestamp_valid = false;
+ m.message.shutter.readout_timestamp = 0LL;
break;
}
notify(states, &m);
diff --git a/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.h b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.h
index 583d738..5e6cba6 100644
--- a/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.h
+++ b/services/camera/libcameraservice/device3/hidl/HidlCamera3OutputUtils.h
@@ -26,18 +26,12 @@
#include <common/CameraDeviceBase.h>
-#include <android/hardware/camera/device/3.8/ICameraDeviceCallback.h>
+#include <android/hardware/camera/device/3.5/ICameraDeviceCallback.h>
#include "device3/BufferUtils.h"
-//#include "device3/DistortionMapper.h"
-//#include "device3/ZoomRatioMapper.h"
-//#include "device3/RotateAndCropMapper.h"
#include "device3/InFlightRequest.h"
#include "device3/Camera3Stream.h"
-//#include "device3/Camera3OutputStreamInterface.h"
#include "device3/Camera3OutputUtils.h"
-//#include "utils/SessionStatsBuilder.h"
-//#include "utils/TagMonitor.h"
namespace android {
@@ -65,11 +59,7 @@
// Handle one notify message
void notify(CaptureOutputStates& states,
- const hardware::camera::device::V3_2::NotifyMsg& msg,
- bool hasReadoutTime = false, uint64_t readoutTime = 0LL);
- void notify(CaptureOutputStates& states,
- const hardware::camera::device::V3_8::NotifyMsg& msg);
-
+ const hardware::camera::device::V3_2::NotifyMsg& msg);
void requestStreamBuffers(RequestBufferStates& states,
const hardware::hidl_vec<hardware::camera::device::V3_5::BufferRequest>& bufReqs,
hardware::camera::device::V3_5::ICameraDeviceCallback::requestStreamBuffers_cb
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
index 3c98a5e..d909624 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/Android.bp
@@ -60,11 +60,11 @@
"android.hardware.camera.device@3.5",
"android.hardware.camera.device@3.6",
"android.hardware.camera.device@3.7",
- "android.hardware.camera.device@3.8",
],
fuzz_config: {
cc: [
"android-media-fuzzing-reports@google.com",
+ "android-camera-fwk-eng@google.com",
],
componentid: 155276,
libfuzzer_options: [
diff --git a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
index 97d7bf4..7a61f38 100644
--- a/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
+++ b/services/camera/libcameraservice/libcameraservice_fuzzer/camera_service_fuzzer.cpp
@@ -111,12 +111,15 @@
size_t mPreviewBufferCount = 0;
bool mAutoFocusMessage = false;
bool mSnapshotNotification = false;
+ bool mRecordingNotification = false;
mutable Mutex mPreviewLock;
mutable Condition mPreviewCondition;
mutable Mutex mAutoFocusLock;
mutable Condition mAutoFocusCondition;
mutable Mutex mSnapshotLock;
mutable Condition mSnapshotCondition;
+ mutable Mutex mRecordingLock;
+ mutable Condition mRecordingCondition;
void getNumCameras();
void getCameraInformation(int32_t cameraId);
@@ -125,6 +128,7 @@
void invokeDump();
void invokeShellCommand();
void invokeNotifyCalls();
+ void invokeTorchAPIs(int32_t cameraId);
// CameraClient interface
void notifyCallback(int32_t msgType, int32_t, int32_t) override;
@@ -152,6 +156,8 @@
Mutex::Autolock l(mPreviewLock);
++mPreviewBufferCount;
mPreviewCondition.broadcast();
+ mRecordingNotification = true;
+ mRecordingCondition.broadcast();
break;
}
case CAMERA_MSG_COMPRESSED_IMAGE: {
@@ -311,114 +317,138 @@
mCameraService->notifySystemEvent(eventId, args);
}
+void CameraFuzzer::invokeTorchAPIs(int32_t cameraId) {
+ String16 cameraIdStr = String16(String8::format("%d", cameraId));
+ sp<IBinder> binder = new BBinder;
+
+ mCameraService->setTorchMode(cameraIdStr, true, binder);
+ ALOGV("Turned torch on.");
+ int32_t torchStrength = rand() % 5 + 1;
+ ALOGV("Changing torch strength level to %d", torchStrength);
+ mCameraService->turnOnTorchWithStrengthLevel(cameraIdStr, torchStrength, binder);
+ mCameraService->setTorchMode(cameraIdStr, false, binder);
+ ALOGV("Turned torch off.");
+}
+
void CameraFuzzer::invokeCameraAPIs() {
- for (int32_t cameraId = 0; cameraId < mNumCameras; ++cameraId) {
- getCameraInformation(cameraId);
+ /** In order to avoid the timeout issue caused due to multiple iteration of loops, the 'for'
+ * loops are removed and the 'cameraId', 'pictureSize' and 'videoSize' are derived using the
+ * FuzzedDataProvider from the available cameras and vectors of 'pictureSizes' and 'videoSizes'
+ */
+ int32_t cameraId = mFuzzedDataProvider->ConsumeIntegralInRange<int32_t>(0, mNumCameras - 1);
+ getCameraInformation(cameraId);
+ invokeTorchAPIs(cameraId);
- ::android::binder::Status rc;
- sp<ICamera> cameraDevice;
+ ::android::binder::Status rc;
+ sp<ICamera> cameraDevice;
- rc = mCameraService->connect(this, cameraId, String16(),
- android::CameraService::USE_CALLING_UID, android::CameraService::USE_CALLING_PID,
- /*targetSdkVersion*/__ANDROID_API_FUTURE__, &cameraDevice);
- if (!rc.isOk()) {
- // camera not connected
- return;
+ rc = mCameraService->connect(this, cameraId, String16(),
+ android::CameraService::USE_CALLING_UID,
+ android::CameraService::USE_CALLING_PID,
+ /*targetSdkVersion*/ __ANDROID_API_FUTURE__, &cameraDevice);
+ if (!rc.isOk()) {
+ // camera not connected
+ return;
+ }
+ if (cameraDevice) {
+ sp<Surface> previewSurface;
+ sp<SurfaceControl> surfaceControl;
+ CameraParameters params(cameraDevice->getParameters());
+ String8 focusModes(params.get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES));
+ bool isAFSupported = false;
+ const char* focusMode = nullptr;
+
+ if (focusModes.contains(CameraParameters::FOCUS_MODE_AUTO)) {
+ isAFSupported = true;
+ } else if (focusModes.contains(CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE)) {
+ isAFSupported = true;
+ focusMode = CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE;
+ } else if (focusModes.contains(CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO)) {
+ isAFSupported = true;
+ focusMode = CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO;
+ } else if (focusModes.contains(CameraParameters::FOCUS_MODE_MACRO)) {
+ isAFSupported = true;
+ focusMode = CameraParameters::FOCUS_MODE_MACRO;
}
- if (cameraDevice) {
- sp<Surface> previewSurface;
- sp<SurfaceControl> surfaceControl;
- CameraParameters params(cameraDevice->getParameters());
- String8 focusModes(params.get(CameraParameters::KEY_SUPPORTED_FOCUS_MODES));
- bool isAFSupported = false;
- const char *focusMode = nullptr;
+ if (nullptr != focusMode) {
+ params.set(CameraParameters::KEY_FOCUS_MODE, focusMode);
+ cameraDevice->setParameters(params.flatten());
+ }
+ int previewWidth, previewHeight;
+ params.getPreviewSize(&previewWidth, &previewHeight);
- if (focusModes.contains(CameraParameters::FOCUS_MODE_AUTO)) {
- isAFSupported = true;
- } else if (focusModes.contains(CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE)) {
- isAFSupported = true;
- focusMode = CameraParameters::FOCUS_MODE_CONTINUOUS_PICTURE;
- } else if (focusModes.contains(CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO)) {
- isAFSupported = true;
- focusMode = CameraParameters::FOCUS_MODE_CONTINUOUS_VIDEO;
- } else if (focusModes.contains(CameraParameters::FOCUS_MODE_MACRO)) {
- isAFSupported = true;
- focusMode = CameraParameters::FOCUS_MODE_MACRO;
- }
- if (nullptr != focusMode) {
- params.set(CameraParameters::KEY_FOCUS_MODE, focusMode);
- cameraDevice->setParameters(params.flatten());
- }
- int previewWidth, previewHeight;
- params.getPreviewSize(&previewWidth, &previewHeight);
+ mComposerClient = new SurfaceComposerClient;
+ mComposerClient->initCheck();
- mComposerClient = new SurfaceComposerClient;
- mComposerClient->initCheck();
-
- bool shouldPassInvalidLayerMetaData = mFuzzedDataProvider->ConsumeBool();
- int layerMetaData;
- if (shouldPassInvalidLayerMetaData) {
- layerMetaData = mFuzzedDataProvider->ConsumeIntegral<int>();
- } else {
- layerMetaData = kLayerMetadata[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(
+ bool shouldPassInvalidLayerMetaData = mFuzzedDataProvider->ConsumeBool();
+ int layerMetaData;
+ if (shouldPassInvalidLayerMetaData) {
+ layerMetaData = mFuzzedDataProvider->ConsumeIntegral<int>();
+ } else {
+ layerMetaData = kLayerMetadata[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(
0, kNumLayerMetaData - 1)];
- }
- surfaceControl = mComposerClient->createSurface(
+ }
+ surfaceControl = mComposerClient->createSurface(
String8("Test Surface"), previewWidth, previewHeight,
CameraParameters::previewFormatToEnum(params.getPreviewFormat()), layerMetaData);
- if (surfaceControl.get() != nullptr) {
- SurfaceComposerClient::Transaction{}
+ if (surfaceControl.get() != nullptr) {
+ SurfaceComposerClient::Transaction{}
.setLayer(surfaceControl, 0x7fffffff)
.show(surfaceControl)
.apply();
- previewSurface = surfaceControl->getSurface();
- cameraDevice->setPreviewTarget(previewSurface->getIGraphicBufferProducer());
- }
- cameraDevice->setPreviewCallbackFlag(CAMERA_FRAME_CALLBACK_FLAG_CAMCORDER);
-
- Vector<Size> pictureSizes;
- params.getSupportedPictureSizes(pictureSizes);
-
- for (size_t i = 0; i < pictureSizes.size(); ++i) {
- params.setPictureSize(pictureSizes[i].width, pictureSizes[i].height);
- cameraDevice->setParameters(params.flatten());
- cameraDevice->startPreview();
- waitForPreviewStart();
- cameraDevice->autoFocus();
- waitForEvent(mAutoFocusLock, mAutoFocusCondition, mAutoFocusMessage);
- bool shouldPassInvalidCameraMsg = mFuzzedDataProvider->ConsumeBool();
- int msgType;
- if (shouldPassInvalidCameraMsg) {
- msgType = mFuzzedDataProvider->ConsumeIntegral<int>();
- } else {
- msgType = kCameraMsg[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(
- 0, kNumCameraMsg - 1)];
- }
- cameraDevice->takePicture(msgType);
-
- waitForEvent(mSnapshotLock, mSnapshotCondition, mSnapshotNotification);
- }
-
- Vector<Size> videoSizes;
- params.getSupportedVideoSizes(videoSizes);
-
- for (size_t i = 0; i < videoSizes.size(); ++i) {
- params.setVideoSize(videoSizes[i].width, videoSizes[i].height);
-
- cameraDevice->setParameters(params.flatten());
- cameraDevice->startPreview();
- waitForPreviewStart();
- cameraDevice->setVideoBufferMode(
- android::hardware::BnCamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE);
- cameraDevice->setVideoTarget(previewSurface->getIGraphicBufferProducer());
- cameraDevice->startRecording();
- cameraDevice->stopRecording();
- }
- cameraDevice->stopPreview();
- cameraDevice->disconnect();
+ previewSurface = surfaceControl->getSurface();
+ cameraDevice->setPreviewTarget(previewSurface->getIGraphicBufferProducer());
}
+ cameraDevice->setPreviewCallbackFlag(CAMERA_FRAME_CALLBACK_FLAG_CAMCORDER);
+
+ Vector<Size> pictureSizes;
+ params.getSupportedPictureSizes(pictureSizes);
+
+ if (pictureSizes.size()) {
+ Size pictureSize = pictureSizes[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(
+ 0, pictureSizes.size() - 1)];
+ params.setPictureSize(pictureSize.width, pictureSize.height);
+ cameraDevice->setParameters(params.flatten());
+ cameraDevice->startPreview();
+ waitForPreviewStart();
+ cameraDevice->autoFocus();
+ waitForEvent(mAutoFocusLock, mAutoFocusCondition, mAutoFocusMessage);
+ bool shouldPassInvalidCameraMsg = mFuzzedDataProvider->ConsumeBool();
+ int msgType;
+ if (shouldPassInvalidCameraMsg) {
+ msgType = mFuzzedDataProvider->ConsumeIntegral<int>();
+ } else {
+ msgType = kCameraMsg[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(
+ 0, kNumCameraMsg - 1)];
+ }
+ cameraDevice->takePicture(msgType);
+
+ waitForEvent(mSnapshotLock, mSnapshotCondition, mSnapshotNotification);
+ cameraDevice->stopPreview();
+ }
+
+ Vector<Size> videoSizes;
+ params.getSupportedVideoSizes(videoSizes);
+
+ if (videoSizes.size()) {
+ Size videoSize = videoSizes[mFuzzedDataProvider->ConsumeIntegralInRange<size_t>(
+ 0, videoSizes.size() - 1)];
+ params.setVideoSize(videoSize.width, videoSize.height);
+
+ cameraDevice->setParameters(params.flatten());
+ cameraDevice->startPreview();
+ waitForPreviewStart();
+ cameraDevice->setVideoBufferMode(
+ android::hardware::BnCamera::VIDEO_BUFFER_MODE_BUFFER_QUEUE);
+ cameraDevice->setVideoTarget(previewSurface->getIGraphicBufferProducer());
+ cameraDevice->stopPreview();
+ cameraDevice->startRecording();
+ waitForEvent(mRecordingLock, mRecordingCondition, mRecordingNotification);
+ cameraDevice->stopRecording();
+ }
+ cameraDevice->disconnect();
}
}
diff --git a/services/camera/libcameraservice/tests/Android.bp b/services/camera/libcameraservice/tests/Android.bp
index 4928faf..3616572 100644
--- a/services/camera/libcameraservice/tests/Android.bp
+++ b/services/camera/libcameraservice/tests/Android.bp
@@ -49,7 +49,6 @@
"android.hardware.camera.device@3.2",
"android.hardware.camera.device@3.4",
"android.hardware.camera.device@3.7",
- "android.hardware.camera.device@3.8",
"android.hidl.token@1.0-utils",
],
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
index d5ddf9f..7b61290 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.cpp
@@ -19,7 +19,6 @@
#include "SessionConfigurationUtils.h"
#include "../api2/DepthCompositeStream.h"
#include "../api2/HeicCompositeStream.h"
-#include "android/hardware/camera/metadata/3.8/types.h"
#include "common/CameraDeviceBase.h"
#include "common/HalConversionsTemplated.h"
#include "../CameraService.h"
@@ -459,7 +458,7 @@
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT, msg.string());
}
if (timestampBase < OutputConfiguration::TIMESTAMP_BASE_DEFAULT ||
- timestampBase > OutputConfiguration::TIMESTAMP_BASE_CHOREOGRAPHER_SYNCED) {
+ timestampBase > OutputConfiguration::TIMESTAMP_BASE_MAX) {
String8 msg = String8::format("Camera %s: invalid timestamp base %d",
logicalCameraId.string(), timestampBase);
ALOGE("%s: %s", __FUNCTION__, msg.string());
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
index 97ca6b7..a127c7b 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtils.h
@@ -21,11 +21,9 @@
#include <camera/camera2/OutputConfiguration.h>
#include <camera/camera2/SessionConfiguration.h>
#include <camera/camera2/SubmitInfo.h>
-#include <android/hardware/camera/device/3.8/types.h>
#include <aidl/android/hardware/camera/device/ICameraDevice.h>
#include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
#include <android/hardware/camera/device/3.7/ICameraDeviceSession.h>
-#include <android/hardware/camera/device/3.8/ICameraDeviceSession.h>
#include <device3/Camera3StreamInterface.h>
#include <utils/IPCTransport.h>
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp
index 07b55e1..5444f2a 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.cpp
@@ -27,8 +27,6 @@
using android::camera3::OutputStreamInfo;
using android::hardware::camera2::ICameraDeviceUser;
using android::hardware::camera::metadata::V3_6::CameraMetadataEnumAndroidSensorPixelMode;
-using android::hardware::camera::metadata::V3_8::CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap;
-using android::hardware::camera::metadata::V3_8::CameraMetadataEnumAndroidScalerAvailableStreamUseCases;
namespace android {
namespace camera3 {
@@ -36,9 +34,9 @@
namespace SessionConfigurationUtils {
status_t
-convertAidlToHidl38StreamCombination(
+convertAidlToHidl37StreamCombination(
const aidl::android::hardware::camera::device::StreamConfiguration &aidl,
- hardware::camera::device::V3_8::StreamConfiguration &hidl) {
+ hardware::camera::device::V3_7::StreamConfiguration &hidl) {
hidl.operationMode =
static_cast<hardware::camera::device::V3_2::StreamConfigurationMode>(aidl.operationMode);
if (aidl.streamConfigCounter < 0) {
@@ -50,97 +48,66 @@
hidl.streams.resize(aidl.streams.size());
size_t i = 0;
for (const auto &stream : aidl.streams) {
- //hidlv3_8
- hidl.streams[i].dynamicRangeProfile =
- static_cast<
- CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap>
- (stream.dynamicRangeProfile);
- hidl.streams[i].useCase =
- static_cast<
- CameraMetadataEnumAndroidScalerAvailableStreamUseCases>
- (stream.useCase);
+ if (static_cast<int>(stream.dynamicRangeProfile) !=
+ ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
+ ALOGE("%s Dynamic range profile %" PRId64 " not supported by HIDL", __FUNCTION__,
+ stream.dynamicRangeProfile);
+ return BAD_VALUE;
+ }
+
+ if (static_cast<int>(stream.useCase) != ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT) {
+ ALOGE("%s Stream use case %" PRId64 "not supported by HIDL", __FUNCTION__,
+ stream.useCase);
+ return BAD_VALUE;
+ }
// hidl v3_7
- hidl.streams[i].v3_7.groupId = stream.groupId;
- hidl.streams[i].v3_7.sensorPixelModesUsed.resize(stream.sensorPixelModesUsed.size());
+ hidl.streams[i].groupId = stream.groupId;
+ hidl.streams[i].sensorPixelModesUsed.resize(stream.sensorPixelModesUsed.size());
size_t j = 0;
for (const auto &mode : stream.sensorPixelModesUsed) {
- hidl.streams[i].v3_7.sensorPixelModesUsed[j] =
+ hidl.streams[i].sensorPixelModesUsed[j] =
static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
j++;
}
//hidl v3_4
- hidl.streams[i].v3_7.v3_4.physicalCameraId = stream.physicalCameraId;
+ hidl.streams[i].v3_4.physicalCameraId = stream.physicalCameraId;
if (stream.bufferSize < 0) {
return BAD_VALUE;
}
- hidl.streams[i].v3_7.v3_4.bufferSize = static_cast<uint32_t>(stream.bufferSize);
+ hidl.streams[i].v3_4.bufferSize = static_cast<uint32_t>(stream.bufferSize);
// hild v3_2
- hidl.streams[i].v3_7.v3_4.v3_2.id = stream.id;
- hidl.streams[i].v3_7.v3_4.v3_2.format =
+ hidl.streams[i].v3_4.v3_2.id = stream.id;
+ hidl.streams[i].v3_4.v3_2.format =
static_cast<hardware::graphics::common::V1_0::PixelFormat>(stream.format);
if (stream.width < 0 || stream.height < 0) {
return BAD_VALUE;
}
- hidl.streams[i].v3_7.v3_4.v3_2.width = static_cast<uint32_t>(stream.width);
- hidl.streams[i].v3_7.v3_4.v3_2.height = static_cast<uint32_t>(stream.height);
- hidl.streams[i].v3_7.v3_4.v3_2.usage =
+ hidl.streams[i].v3_4.v3_2.width = static_cast<uint32_t>(stream.width);
+ hidl.streams[i].v3_4.v3_2.height = static_cast<uint32_t>(stream.height);
+ hidl.streams[i].v3_4.v3_2.usage =
static_cast<hardware::camera::device::V3_2::BufferUsageFlags>(stream.usage);
- hidl.streams[i].v3_7.v3_4.v3_2.streamType =
+ hidl.streams[i].v3_4.v3_2.streamType =
static_cast<hardware::camera::device::V3_2::StreamType>(stream.streamType);
- hidl.streams[i].v3_7.v3_4.v3_2.dataSpace =
+ hidl.streams[i].v3_4.v3_2.dataSpace =
static_cast<hardware::camera::device::V3_2::DataspaceFlags>(stream.dataSpace);
- hidl.streams[i].v3_7.v3_4.v3_2.rotation =
+ hidl.streams[i].v3_4.v3_2.rotation =
static_cast<hardware::camera::device::V3_2::StreamRotation>(stream.rotation);
i++;
}
return OK;
}
-void mapStreamInfo(const OutputStreamInfo &streamInfo,
- camera3::camera_stream_rotation_t rotation, String8 physicalId,
- int32_t groupId, hardware::camera::device::V3_8::Stream *stream /*out*/) {
- if (stream == nullptr) {
- return;
- }
-
- stream->v3_7.v3_4.v3_2.streamType = hardware::camera::device::V3_2::StreamType::OUTPUT;
- stream->v3_7.v3_4.v3_2.width = streamInfo.width;
- stream->v3_7.v3_4.v3_2.height = streamInfo.height;
- stream->v3_7.v3_4.v3_2.format = HidlCamera3Device::mapToPixelFormat(streamInfo.format);
- auto u = streamInfo.consumerUsage;
- camera3::Camera3OutputStream::applyZSLUsageQuirk(streamInfo.format, &u);
- stream->v3_7.v3_4.v3_2.usage = HidlCamera3Device::mapToConsumerUsage(u);
- stream->v3_7.v3_4.v3_2.dataSpace = HidlCamera3Device::mapToHidlDataspace(streamInfo.dataSpace);
- stream->v3_7.v3_4.v3_2.rotation = HidlCamera3Device::mapToStreamRotation(rotation);
- stream->v3_7.v3_4.v3_2.id = -1; // Invalid stream id
- stream->v3_7.v3_4.physicalCameraId = std::string(physicalId.string());
- stream->v3_7.v3_4.bufferSize = 0;
- stream->v3_7.groupId = groupId;
- stream->v3_7.sensorPixelModesUsed.resize(streamInfo.sensorPixelModesUsed.size());
-
- size_t idx = 0;
- for (auto mode : streamInfo.sensorPixelModesUsed) {
- stream->v3_7.sensorPixelModesUsed[idx++] =
- static_cast<CameraMetadataEnumAndroidSensorPixelMode>(mode);
- }
- stream->dynamicRangeProfile =
- static_cast<CameraMetadataEnumAndroidRequestAvailableDynamicRangeProfilesMap> (
- streamInfo.dynamicRangeProfile);
- stream->useCase = static_cast<CameraMetadataEnumAndroidScalerAvailableStreamUseCases>(
- streamInfo.streamUseCase);
-}
-
binder::Status
convertToHALStreamCombination(
const SessionConfiguration& sessionConfiguration,
const String8 &logicalCameraId, const CameraMetadata &deviceInfo,
metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
- hardware::camera::device::V3_8::StreamConfiguration &streamConfiguration,
+ hardware::camera::device::V3_7::StreamConfiguration &streamConfiguration,
bool overrideForPerfClass, bool *earlyExit) {
aidl::android::hardware::camera::device::StreamConfiguration aidlStreamConfiguration;
auto ret = convertToHALStreamCombination(sessionConfiguration, logicalCameraId, deviceInfo,
@@ -153,38 +120,14 @@
return binder::Status::ok();
}
- if (convertAidlToHidl38StreamCombination(aidlStreamConfiguration, streamConfiguration) != OK) {
+ if (convertAidlToHidl37StreamCombination(aidlStreamConfiguration, streamConfiguration) != OK) {
return STATUS_ERROR(CameraService::ERROR_ILLEGAL_ARGUMENT,
- "Invalid AIDL->HIDL3.8 conversion");
+ "Invalid AIDL->HIDL3.7 conversion");
}
return binder::Status::ok();
}
-bool convertHALStreamCombinationFromV38ToV37(
- hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37,
- const hardware::camera::device::V3_8::StreamConfiguration &streamConfigV38) {
- streamConfigV37.streams.resize(streamConfigV38.streams.size());
- for (size_t i = 0; i < streamConfigV38.streams.size(); i++) {
- if (static_cast<int64_t>(streamConfigV38.streams[i].dynamicRangeProfile) !=
- ANDROID_REQUEST_AVAILABLE_DYNAMIC_RANGE_PROFILES_MAP_STANDARD) {
- // ICameraDevice older than 3.8 doesn't support 10-bit dynamic range profiles
- // image
- return false;
- }
- if (static_cast<int64_t>(streamConfigV38.streams[i].useCase) !=
- ANDROID_SCALER_AVAILABLE_STREAM_USE_CASES_DEFAULT) {
- // ICameraDevice older than 3.8 doesn't support stream use case
- return false;
- }
- streamConfigV37.streams[i] = streamConfigV38.streams[i].v3_7;
- }
- streamConfigV37.operationMode = streamConfigV38.operationMode;
- streamConfigV37.sessionParams = streamConfigV38.sessionParams;
-
- return true;
-}
-
bool convertHALStreamCombinationFromV37ToV34(
hardware::camera::device::V3_4::StreamConfiguration &streamConfigV34,
const hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37) {
diff --git a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.h b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.h
index 66956c5..c47abe0 100644
--- a/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.h
+++ b/services/camera/libcameraservice/utils/SessionConfigurationUtilsHidl.h
@@ -16,10 +16,8 @@
#ifndef ANDROID_SERVERS_CAMERA_SESSION_CONFIGURATION_UTILS_HIDL_H
#define ANDROID_SERVERS_CAMERA_SESSION_CONFIGURATION_UTILS_HIDL_H
-#include <android/hardware/camera/device/3.8/types.h>
#include <android/hardware/camera/device/3.4/ICameraDeviceSession.h>
#include <android/hardware/camera/device/3.7/ICameraDeviceSession.h>
-#include <android/hardware/camera/device/3.8/ICameraDeviceSession.h>
#include <utils/SessionConfigurationUtils.h>
@@ -30,10 +28,6 @@
namespace SessionConfigurationUtils {
-void mapStreamInfo(const camera3::OutputStreamInfo &streamInfo,
- camera3::camera_stream_rotation_t rotation, String8 physicalId, int32_t groupId,
- hardware::camera::device::V3_8::Stream *stream /*out*/);
-
// utility function to convert AIDL SessionConfiguration to HIDL
// streamConfiguration. Also checks for validity of SessionConfiguration and
// returns a non-ok binder::Status if the passed in session configuration
@@ -42,16 +36,9 @@
convertToHALStreamCombination(const SessionConfiguration& sessionConfiguration,
const String8 &cameraId, const CameraMetadata &deviceInfo,
metadataGetter getMetadata, const std::vector<std::string> &physicalCameraIds,
- hardware::camera::device::V3_8::StreamConfiguration &streamConfiguration,
+ hardware::camera::device::V3_7::StreamConfiguration &streamConfiguration,
bool overrideForPerfClass, bool *earlyExit);
-// Utility function to convert a V3_8::StreamConfiguration to
-// V3_7::StreamConfiguration. Return false if the original V3_8 configuration cannot
-// be used by older version HAL.
-bool convertHALStreamCombinationFromV38ToV37(
- hardware::camera::device::V3_7::StreamConfiguration &streamConfigV37,
- const hardware::camera::device::V3_8::StreamConfiguration &streamConfigV38);
-
// Utility function to convert a V3_7::StreamConfiguration to
// V3_4::StreamConfiguration. Return false if the original V3_7 configuration cannot
// be used by older version HAL.
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.cpp b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
index 3f18b95..bbfa0b7 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.cpp
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.cpp
@@ -228,9 +228,6 @@
if (mMmapStream != nullptr) {
// Needs to be explicitly cleared or CTS will fail but it is not clear why.
mMmapStream.clear();
- // Apparently the above close is asynchronous. An attempt to open a new device
- // right after a close can fail. Also some callbacks may still be in flight!
- // FIXME Make closing synchronous.
AudioClock::sleepForNanos(100 * AAUDIO_NANOS_PER_MILLISECOND);
}
}
@@ -362,12 +359,8 @@
asyncTask.detach();
}
-void AAudioServiceEndpointMMAP::onVolumeChanged(audio_channel_mask_t channels,
- android::Vector<float> values) {
- // TODO Do we really need a different volume for each channel?
- // We get called with an array filled with a single value!
- float volume = values[0];
- ALOGD("%s() volume[0] = %f", __func__, volume);
+void AAudioServiceEndpointMMAP::onVolumeChanged(float volume) {
+ ALOGD("%s() volume = %f", __func__, volume);
std::lock_guard<std::mutex> lock(mLockStreams);
for(const auto& stream : mRegisteredStreams) {
stream->onVolumeChanged(volume);
diff --git a/services/oboeservice/AAudioServiceEndpointMMAP.h b/services/oboeservice/AAudioServiceEndpointMMAP.h
index 3e7f2c7..3658c58 100644
--- a/services/oboeservice/AAudioServiceEndpointMMAP.h
+++ b/services/oboeservice/AAudioServiceEndpointMMAP.h
@@ -77,8 +77,7 @@
// -------------- Callback functions for MmapStreamCallback ---------------------
void onTearDown(audio_port_handle_t portHandle) override;
- void onVolumeChanged(audio_channel_mask_t channels,
- android::Vector<float> values) override;
+ void onVolumeChanged(float volume) override;
void onRoutingChanged(audio_port_handle_t portHandle) override;
// ------------------------------------------------------------------------------